1 /* 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2009 HNR Consulting. All rights reserved. 6 * Copyright (c) 2014 Intel Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/module.h> 43 #include <linux/security.h> 44 #include <rdma/ib_cache.h> 45 46 #include "mad_priv.h" 47 #include "core_priv.h" 48 #include "mad_rmpp.h" 49 #include "smi.h" 50 #include "opa_smi.h" 51 #include "agent.h" 52 53 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 54 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 55 56 module_param_named(send_queue_size, mad_sendq_size, int, 0444); 57 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); 58 module_param_named(recv_queue_size, mad_recvq_size, int, 0444); 59 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); 60 61 static struct list_head ib_mad_port_list; 62 static atomic_t ib_mad_client_id = ATOMIC_INIT(0); 63 64 /* Port list lock */ 65 static DEFINE_SPINLOCK(ib_mad_port_list_lock); 66 67 /* Forward declarations */ 68 static int method_in_use(struct ib_mad_mgmt_method_table **method, 69 struct ib_mad_reg_req *mad_reg_req); 70 static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 71 static struct ib_mad_agent_private *find_mad_agent( 72 struct ib_mad_port_private *port_priv, 73 const struct ib_mad_hdr *mad); 74 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 75 struct ib_mad_private *mad); 76 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 77 static void timeout_sends(struct work_struct *work); 78 static void local_completions(struct work_struct *work); 79 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 80 struct ib_mad_agent_private *agent_priv, 81 u8 mgmt_class); 82 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 83 struct ib_mad_agent_private *agent_priv); 84 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 85 struct ib_wc *wc); 86 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); 87 88 /* 89 * Returns a ib_mad_port_private structure or NULL for a device/port 90 * Assumes ib_mad_port_list_lock is being held 91 */ 92 static inline struct ib_mad_port_private * 93 __ib_get_mad_port(struct ib_device *device, int port_num) 94 { 95 struct ib_mad_port_private *entry; 96 97 list_for_each_entry(entry, &ib_mad_port_list, port_list) { 98 if (entry->device == device && entry->port_num == port_num) 99 return entry; 100 } 101 return NULL; 102 } 103 104 /* 105 * Wrapper function to return a ib_mad_port_private structure or NULL 106 * for a device/port 107 */ 108 static inline struct ib_mad_port_private * 109 ib_get_mad_port(struct ib_device *device, int port_num) 110 { 111 struct ib_mad_port_private *entry; 112 unsigned long flags; 113 114 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 115 entry = __ib_get_mad_port(device, port_num); 116 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 117 118 return entry; 119 } 120 121 static inline u8 convert_mgmt_class(u8 mgmt_class) 122 { 123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ 124 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 125 0 : mgmt_class; 126 } 127 128 static int get_spl_qp_index(enum ib_qp_type qp_type) 129 { 130 switch (qp_type) 131 { 132 case IB_QPT_SMI: 133 return 0; 134 case IB_QPT_GSI: 135 return 1; 136 default: 137 return -1; 138 } 139 } 140 141 static int vendor_class_index(u8 mgmt_class) 142 { 143 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; 144 } 145 146 static int is_vendor_class(u8 mgmt_class) 147 { 148 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || 149 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) 150 return 0; 151 return 1; 152 } 153 154 static int is_vendor_oui(char *oui) 155 { 156 if (oui[0] || oui[1] || oui[2]) 157 return 1; 158 return 0; 159 } 160 161 static int is_vendor_method_in_use( 162 struct ib_mad_mgmt_vendor_class *vendor_class, 163 struct ib_mad_reg_req *mad_reg_req) 164 { 165 struct ib_mad_mgmt_method_table *method; 166 int i; 167 168 for (i = 0; i < MAX_MGMT_OUI; i++) { 169 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { 170 method = vendor_class->method_table[i]; 171 if (method) { 172 if (method_in_use(&method, mad_reg_req)) 173 return 1; 174 else 175 break; 176 } 177 } 178 } 179 return 0; 180 } 181 182 int ib_response_mad(const struct ib_mad_hdr *hdr) 183 { 184 return ((hdr->method & IB_MGMT_METHOD_RESP) || 185 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || 186 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && 187 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); 188 } 189 EXPORT_SYMBOL(ib_response_mad); 190 191 /* 192 * ib_register_mad_agent - Register to send/receive MADs 193 */ 194 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 195 u8 port_num, 196 enum ib_qp_type qp_type, 197 struct ib_mad_reg_req *mad_reg_req, 198 u8 rmpp_version, 199 ib_mad_send_handler send_handler, 200 ib_mad_recv_handler recv_handler, 201 void *context, 202 u32 registration_flags) 203 { 204 struct ib_mad_port_private *port_priv; 205 struct ib_mad_agent *ret = ERR_PTR(-EINVAL); 206 struct ib_mad_agent_private *mad_agent_priv; 207 struct ib_mad_reg_req *reg_req = NULL; 208 struct ib_mad_mgmt_class_table *class; 209 struct ib_mad_mgmt_vendor_class_table *vendor; 210 struct ib_mad_mgmt_vendor_class *vendor_class; 211 struct ib_mad_mgmt_method_table *method; 212 int ret2, qpn; 213 unsigned long flags; 214 u8 mgmt_class, vclass; 215 216 /* Validate parameters */ 217 qpn = get_spl_qp_index(qp_type); 218 if (qpn == -1) { 219 dev_notice(&device->dev, 220 "ib_register_mad_agent: invalid QP Type %d\n", 221 qp_type); 222 goto error1; 223 } 224 225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { 226 dev_notice(&device->dev, 227 "ib_register_mad_agent: invalid RMPP Version %u\n", 228 rmpp_version); 229 goto error1; 230 } 231 232 /* Validate MAD registration request if supplied */ 233 if (mad_reg_req) { 234 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { 235 dev_notice(&device->dev, 236 "ib_register_mad_agent: invalid Class Version %u\n", 237 mad_reg_req->mgmt_class_version); 238 goto error1; 239 } 240 if (!recv_handler) { 241 dev_notice(&device->dev, 242 "ib_register_mad_agent: no recv_handler\n"); 243 goto error1; 244 } 245 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { 246 /* 247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only 248 * one in this range currently allowed 249 */ 250 if (mad_reg_req->mgmt_class != 251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 252 dev_notice(&device->dev, 253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", 254 mad_reg_req->mgmt_class); 255 goto error1; 256 } 257 } else if (mad_reg_req->mgmt_class == 0) { 258 /* 259 * Class 0 is reserved in IBA and is used for 260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 261 */ 262 dev_notice(&device->dev, 263 "ib_register_mad_agent: Invalid Mgmt Class 0\n"); 264 goto error1; 265 } else if (is_vendor_class(mad_reg_req->mgmt_class)) { 266 /* 267 * If class is in "new" vendor range, 268 * ensure supplied OUI is not zero 269 */ 270 if (!is_vendor_oui(mad_reg_req->oui)) { 271 dev_notice(&device->dev, 272 "ib_register_mad_agent: No OUI specified for class 0x%x\n", 273 mad_reg_req->mgmt_class); 274 goto error1; 275 } 276 } 277 /* Make sure class supplied is consistent with RMPP */ 278 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 279 if (rmpp_version) { 280 dev_notice(&device->dev, 281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", 282 mad_reg_req->mgmt_class); 283 goto error1; 284 } 285 } 286 287 /* Make sure class supplied is consistent with QP type */ 288 if (qp_type == IB_QPT_SMI) { 289 if ((mad_reg_req->mgmt_class != 290 IB_MGMT_CLASS_SUBN_LID_ROUTED) && 291 (mad_reg_req->mgmt_class != 292 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 293 dev_notice(&device->dev, 294 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", 295 mad_reg_req->mgmt_class); 296 goto error1; 297 } 298 } else { 299 if ((mad_reg_req->mgmt_class == 300 IB_MGMT_CLASS_SUBN_LID_ROUTED) || 301 (mad_reg_req->mgmt_class == 302 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 303 dev_notice(&device->dev, 304 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", 305 mad_reg_req->mgmt_class); 306 goto error1; 307 } 308 } 309 } else { 310 /* No registration request supplied */ 311 if (!send_handler) 312 goto error1; 313 if (registration_flags & IB_MAD_USER_RMPP) 314 goto error1; 315 } 316 317 /* Validate device and port */ 318 port_priv = ib_get_mad_port(device, port_num); 319 if (!port_priv) { 320 dev_notice(&device->dev, 321 "ib_register_mad_agent: Invalid port %d\n", 322 port_num); 323 ret = ERR_PTR(-ENODEV); 324 goto error1; 325 } 326 327 /* Verify the QP requested is supported. For example, Ethernet devices 328 * will not have QP0 */ 329 if (!port_priv->qp_info[qpn].qp) { 330 dev_notice(&device->dev, 331 "ib_register_mad_agent: QP %d not supported\n", qpn); 332 ret = ERR_PTR(-EPROTONOSUPPORT); 333 goto error1; 334 } 335 336 /* Allocate structures */ 337 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); 338 if (!mad_agent_priv) { 339 ret = ERR_PTR(-ENOMEM); 340 goto error1; 341 } 342 343 if (mad_reg_req) { 344 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); 345 if (!reg_req) { 346 ret = ERR_PTR(-ENOMEM); 347 goto error3; 348 } 349 } 350 351 /* Now, fill in the various structures */ 352 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 353 mad_agent_priv->reg_req = reg_req; 354 mad_agent_priv->agent.rmpp_version = rmpp_version; 355 mad_agent_priv->agent.device = device; 356 mad_agent_priv->agent.recv_handler = recv_handler; 357 mad_agent_priv->agent.send_handler = send_handler; 358 mad_agent_priv->agent.context = context; 359 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 360 mad_agent_priv->agent.port_num = port_num; 361 mad_agent_priv->agent.flags = registration_flags; 362 spin_lock_init(&mad_agent_priv->lock); 363 INIT_LIST_HEAD(&mad_agent_priv->send_list); 364 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 365 INIT_LIST_HEAD(&mad_agent_priv->done_list); 366 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 367 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); 368 INIT_LIST_HEAD(&mad_agent_priv->local_list); 369 INIT_WORK(&mad_agent_priv->local_work, local_completions); 370 atomic_set(&mad_agent_priv->refcount, 1); 371 init_completion(&mad_agent_priv->comp); 372 373 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); 374 if (ret2) { 375 ret = ERR_PTR(ret2); 376 goto error4; 377 } 378 379 spin_lock_irqsave(&port_priv->reg_lock, flags); 380 mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id); 381 382 /* 383 * Make sure MAD registration (if supplied) 384 * is non overlapping with any existing ones 385 */ 386 if (mad_reg_req) { 387 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); 388 if (!is_vendor_class(mgmt_class)) { 389 class = port_priv->version[mad_reg_req-> 390 mgmt_class_version].class; 391 if (class) { 392 method = class->method_table[mgmt_class]; 393 if (method) { 394 if (method_in_use(&method, 395 mad_reg_req)) 396 goto error5; 397 } 398 } 399 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, 400 mgmt_class); 401 } else { 402 /* "New" vendor class range */ 403 vendor = port_priv->version[mad_reg_req-> 404 mgmt_class_version].vendor; 405 if (vendor) { 406 vclass = vendor_class_index(mgmt_class); 407 vendor_class = vendor->vendor_class[vclass]; 408 if (vendor_class) { 409 if (is_vendor_method_in_use( 410 vendor_class, 411 mad_reg_req)) 412 goto error5; 413 } 414 } 415 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 416 } 417 if (ret2) { 418 ret = ERR_PTR(ret2); 419 goto error5; 420 } 421 } 422 423 /* Add mad agent into port's agent list */ 424 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 425 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 426 427 return &mad_agent_priv->agent; 428 error5: 429 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 430 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 431 error4: 432 kfree(reg_req); 433 error3: 434 kfree(mad_agent_priv); 435 error1: 436 return ret; 437 } 438 EXPORT_SYMBOL(ib_register_mad_agent); 439 440 static inline int is_snooping_sends(int mad_snoop_flags) 441 { 442 return (mad_snoop_flags & 443 (/*IB_MAD_SNOOP_POSTED_SENDS | 444 IB_MAD_SNOOP_RMPP_SENDS |*/ 445 IB_MAD_SNOOP_SEND_COMPLETIONS /*| 446 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); 447 } 448 449 static inline int is_snooping_recvs(int mad_snoop_flags) 450 { 451 return (mad_snoop_flags & 452 (IB_MAD_SNOOP_RECVS /*| 453 IB_MAD_SNOOP_RMPP_RECVS*/)); 454 } 455 456 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, 457 struct ib_mad_snoop_private *mad_snoop_priv) 458 { 459 struct ib_mad_snoop_private **new_snoop_table; 460 unsigned long flags; 461 int i; 462 463 spin_lock_irqsave(&qp_info->snoop_lock, flags); 464 /* Check for empty slot in array. */ 465 for (i = 0; i < qp_info->snoop_table_size; i++) 466 if (!qp_info->snoop_table[i]) 467 break; 468 469 if (i == qp_info->snoop_table_size) { 470 /* Grow table. */ 471 new_snoop_table = krealloc(qp_info->snoop_table, 472 sizeof mad_snoop_priv * 473 (qp_info->snoop_table_size + 1), 474 GFP_ATOMIC); 475 if (!new_snoop_table) { 476 i = -ENOMEM; 477 goto out; 478 } 479 480 qp_info->snoop_table = new_snoop_table; 481 qp_info->snoop_table_size++; 482 } 483 qp_info->snoop_table[i] = mad_snoop_priv; 484 atomic_inc(&qp_info->snoop_count); 485 out: 486 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 487 return i; 488 } 489 490 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, 491 u8 port_num, 492 enum ib_qp_type qp_type, 493 int mad_snoop_flags, 494 ib_mad_snoop_handler snoop_handler, 495 ib_mad_recv_handler recv_handler, 496 void *context) 497 { 498 struct ib_mad_port_private *port_priv; 499 struct ib_mad_agent *ret; 500 struct ib_mad_snoop_private *mad_snoop_priv; 501 int qpn; 502 int err; 503 504 /* Validate parameters */ 505 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || 506 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { 507 ret = ERR_PTR(-EINVAL); 508 goto error1; 509 } 510 qpn = get_spl_qp_index(qp_type); 511 if (qpn == -1) { 512 ret = ERR_PTR(-EINVAL); 513 goto error1; 514 } 515 port_priv = ib_get_mad_port(device, port_num); 516 if (!port_priv) { 517 ret = ERR_PTR(-ENODEV); 518 goto error1; 519 } 520 /* Allocate structures */ 521 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 522 if (!mad_snoop_priv) { 523 ret = ERR_PTR(-ENOMEM); 524 goto error1; 525 } 526 527 /* Now, fill in the various structures */ 528 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 529 mad_snoop_priv->agent.device = device; 530 mad_snoop_priv->agent.recv_handler = recv_handler; 531 mad_snoop_priv->agent.snoop_handler = snoop_handler; 532 mad_snoop_priv->agent.context = context; 533 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 534 mad_snoop_priv->agent.port_num = port_num; 535 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 536 init_completion(&mad_snoop_priv->comp); 537 538 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); 539 if (err) { 540 ret = ERR_PTR(err); 541 goto error2; 542 } 543 544 mad_snoop_priv->snoop_index = register_snoop_agent( 545 &port_priv->qp_info[qpn], 546 mad_snoop_priv); 547 if (mad_snoop_priv->snoop_index < 0) { 548 ret = ERR_PTR(mad_snoop_priv->snoop_index); 549 goto error3; 550 } 551 552 atomic_set(&mad_snoop_priv->refcount, 1); 553 return &mad_snoop_priv->agent; 554 error3: 555 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); 556 error2: 557 kfree(mad_snoop_priv); 558 error1: 559 return ret; 560 } 561 EXPORT_SYMBOL(ib_register_mad_snoop); 562 563 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 564 { 565 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 566 complete(&mad_agent_priv->comp); 567 } 568 569 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) 570 { 571 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 572 complete(&mad_snoop_priv->comp); 573 } 574 575 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 576 { 577 struct ib_mad_port_private *port_priv; 578 unsigned long flags; 579 580 /* Note that we could still be handling received MADs */ 581 582 /* 583 * Canceling all sends results in dropping received response 584 * MADs, preventing us from queuing additional work 585 */ 586 cancel_mads(mad_agent_priv); 587 port_priv = mad_agent_priv->qp_info->port_priv; 588 cancel_delayed_work(&mad_agent_priv->timed_work); 589 590 spin_lock_irqsave(&port_priv->reg_lock, flags); 591 remove_mad_reg_req(mad_agent_priv); 592 list_del(&mad_agent_priv->agent_list); 593 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 594 595 flush_workqueue(port_priv->wq); 596 ib_cancel_rmpp_recvs(mad_agent_priv); 597 598 deref_mad_agent(mad_agent_priv); 599 wait_for_completion(&mad_agent_priv->comp); 600 601 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 602 603 kfree(mad_agent_priv->reg_req); 604 kfree(mad_agent_priv); 605 } 606 607 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) 608 { 609 struct ib_mad_qp_info *qp_info; 610 unsigned long flags; 611 612 qp_info = mad_snoop_priv->qp_info; 613 spin_lock_irqsave(&qp_info->snoop_lock, flags); 614 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; 615 atomic_dec(&qp_info->snoop_count); 616 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 617 618 deref_snoop_agent(mad_snoop_priv); 619 wait_for_completion(&mad_snoop_priv->comp); 620 621 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); 622 623 kfree(mad_snoop_priv); 624 } 625 626 /* 627 * ib_unregister_mad_agent - Unregisters a client from using MAD services 628 */ 629 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) 630 { 631 struct ib_mad_agent_private *mad_agent_priv; 632 struct ib_mad_snoop_private *mad_snoop_priv; 633 634 /* If the TID is zero, the agent can only snoop. */ 635 if (mad_agent->hi_tid) { 636 mad_agent_priv = container_of(mad_agent, 637 struct ib_mad_agent_private, 638 agent); 639 unregister_mad_agent(mad_agent_priv); 640 } else { 641 mad_snoop_priv = container_of(mad_agent, 642 struct ib_mad_snoop_private, 643 agent); 644 unregister_mad_snoop(mad_snoop_priv); 645 } 646 } 647 EXPORT_SYMBOL(ib_unregister_mad_agent); 648 649 static void dequeue_mad(struct ib_mad_list_head *mad_list) 650 { 651 struct ib_mad_queue *mad_queue; 652 unsigned long flags; 653 654 BUG_ON(!mad_list->mad_queue); 655 mad_queue = mad_list->mad_queue; 656 spin_lock_irqsave(&mad_queue->lock, flags); 657 list_del(&mad_list->list); 658 mad_queue->count--; 659 spin_unlock_irqrestore(&mad_queue->lock, flags); 660 } 661 662 static void snoop_send(struct ib_mad_qp_info *qp_info, 663 struct ib_mad_send_buf *send_buf, 664 struct ib_mad_send_wc *mad_send_wc, 665 int mad_snoop_flags) 666 { 667 struct ib_mad_snoop_private *mad_snoop_priv; 668 unsigned long flags; 669 int i; 670 671 spin_lock_irqsave(&qp_info->snoop_lock, flags); 672 for (i = 0; i < qp_info->snoop_table_size; i++) { 673 mad_snoop_priv = qp_info->snoop_table[i]; 674 if (!mad_snoop_priv || 675 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 676 continue; 677 678 atomic_inc(&mad_snoop_priv->refcount); 679 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 680 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 681 send_buf, mad_send_wc); 682 deref_snoop_agent(mad_snoop_priv); 683 spin_lock_irqsave(&qp_info->snoop_lock, flags); 684 } 685 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 686 } 687 688 static void snoop_recv(struct ib_mad_qp_info *qp_info, 689 struct ib_mad_recv_wc *mad_recv_wc, 690 int mad_snoop_flags) 691 { 692 struct ib_mad_snoop_private *mad_snoop_priv; 693 unsigned long flags; 694 int i; 695 696 spin_lock_irqsave(&qp_info->snoop_lock, flags); 697 for (i = 0; i < qp_info->snoop_table_size; i++) { 698 mad_snoop_priv = qp_info->snoop_table[i]; 699 if (!mad_snoop_priv || 700 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 701 continue; 702 703 atomic_inc(&mad_snoop_priv->refcount); 704 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 705 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, 706 mad_recv_wc); 707 deref_snoop_agent(mad_snoop_priv); 708 spin_lock_irqsave(&qp_info->snoop_lock, flags); 709 } 710 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 711 } 712 713 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, 714 u16 pkey_index, u8 port_num, struct ib_wc *wc) 715 { 716 memset(wc, 0, sizeof *wc); 717 wc->wr_cqe = cqe; 718 wc->status = IB_WC_SUCCESS; 719 wc->opcode = IB_WC_RECV; 720 wc->pkey_index = pkey_index; 721 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 722 wc->src_qp = IB_QP0; 723 wc->qp = qp; 724 wc->slid = slid; 725 wc->sl = 0; 726 wc->dlid_path_bits = 0; 727 wc->port_num = port_num; 728 } 729 730 static size_t mad_priv_size(const struct ib_mad_private *mp) 731 { 732 return sizeof(struct ib_mad_private) + mp->mad_size; 733 } 734 735 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) 736 { 737 size_t size = sizeof(struct ib_mad_private) + mad_size; 738 struct ib_mad_private *ret = kzalloc(size, flags); 739 740 if (ret) 741 ret->mad_size = mad_size; 742 743 return ret; 744 } 745 746 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) 747 { 748 return rdma_max_mad_size(port_priv->device, port_priv->port_num); 749 } 750 751 static size_t mad_priv_dma_size(const struct ib_mad_private *mp) 752 { 753 return sizeof(struct ib_grh) + mp->mad_size; 754 } 755 756 /* 757 * Return 0 if SMP is to be sent 758 * Return 1 if SMP was consumed locally (whether or not solicited) 759 * Return < 0 if error 760 */ 761 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 762 struct ib_mad_send_wr_private *mad_send_wr) 763 { 764 int ret = 0; 765 struct ib_smp *smp = mad_send_wr->send_buf.mad; 766 struct opa_smp *opa_smp = (struct opa_smp *)smp; 767 unsigned long flags; 768 struct ib_mad_local_private *local; 769 struct ib_mad_private *mad_priv; 770 struct ib_mad_port_private *port_priv; 771 struct ib_mad_agent_private *recv_mad_agent = NULL; 772 struct ib_device *device = mad_agent_priv->agent.device; 773 u8 port_num; 774 struct ib_wc mad_wc; 775 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; 776 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); 777 u16 out_mad_pkey_index = 0; 778 u16 drslid; 779 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 780 mad_agent_priv->qp_info->port_priv->port_num); 781 782 if (rdma_cap_ib_switch(device) && 783 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 784 port_num = send_wr->port_num; 785 else 786 port_num = mad_agent_priv->agent.port_num; 787 788 /* 789 * Directed route handling starts if the initial LID routed part of 790 * a request or the ending LID routed part of a response is empty. 791 * If we are at the start of the LID routed part, don't update the 792 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. 793 */ 794 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { 795 u32 opa_drslid; 796 797 if ((opa_get_smp_direction(opa_smp) 798 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 799 OPA_LID_PERMISSIVE && 800 opa_smi_handle_dr_smp_send(opa_smp, 801 rdma_cap_ib_switch(device), 802 port_num) == IB_SMI_DISCARD) { 803 ret = -EINVAL; 804 dev_err(&device->dev, "OPA Invalid directed route\n"); 805 goto out; 806 } 807 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 808 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && 809 opa_drslid & 0xffff0000) { 810 ret = -EINVAL; 811 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 812 opa_drslid); 813 goto out; 814 } 815 drslid = (u16)(opa_drslid & 0x0000ffff); 816 817 /* Check to post send on QP or process locally */ 818 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && 819 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) 820 goto out; 821 } else { 822 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 823 IB_LID_PERMISSIVE && 824 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == 825 IB_SMI_DISCARD) { 826 ret = -EINVAL; 827 dev_err(&device->dev, "Invalid directed route\n"); 828 goto out; 829 } 830 drslid = be16_to_cpu(smp->dr_slid); 831 832 /* Check to post send on QP or process locally */ 833 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && 834 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) 835 goto out; 836 } 837 838 local = kmalloc(sizeof *local, GFP_ATOMIC); 839 if (!local) { 840 ret = -ENOMEM; 841 goto out; 842 } 843 local->mad_priv = NULL; 844 local->recv_mad_agent = NULL; 845 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); 846 if (!mad_priv) { 847 ret = -ENOMEM; 848 kfree(local); 849 goto out; 850 } 851 852 build_smp_wc(mad_agent_priv->agent.qp, 853 send_wr->wr.wr_cqe, drslid, 854 send_wr->pkey_index, 855 send_wr->port_num, &mad_wc); 856 857 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { 858 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len 859 + mad_send_wr->send_buf.data_len 860 + sizeof(struct ib_grh); 861 } 862 863 /* No GRH for DR SMP */ 864 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, 865 (const struct ib_mad_hdr *)smp, mad_size, 866 (struct ib_mad_hdr *)mad_priv->mad, 867 &mad_size, &out_mad_pkey_index); 868 switch (ret) 869 { 870 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 871 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && 872 mad_agent_priv->agent.recv_handler) { 873 local->mad_priv = mad_priv; 874 local->recv_mad_agent = mad_agent_priv; 875 /* 876 * Reference MAD agent until receive 877 * side of local completion handled 878 */ 879 atomic_inc(&mad_agent_priv->refcount); 880 } else 881 kfree(mad_priv); 882 break; 883 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 884 kfree(mad_priv); 885 break; 886 case IB_MAD_RESULT_SUCCESS: 887 /* Treat like an incoming receive MAD */ 888 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 889 mad_agent_priv->agent.port_num); 890 if (port_priv) { 891 memcpy(mad_priv->mad, smp, mad_priv->mad_size); 892 recv_mad_agent = find_mad_agent(port_priv, 893 (const struct ib_mad_hdr *)mad_priv->mad); 894 } 895 if (!port_priv || !recv_mad_agent) { 896 /* 897 * No receiving agent so drop packet and 898 * generate send completion. 899 */ 900 kfree(mad_priv); 901 break; 902 } 903 local->mad_priv = mad_priv; 904 local->recv_mad_agent = recv_mad_agent; 905 break; 906 default: 907 kfree(mad_priv); 908 kfree(local); 909 ret = -EINVAL; 910 goto out; 911 } 912 913 local->mad_send_wr = mad_send_wr; 914 if (opa) { 915 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; 916 local->return_wc_byte_len = mad_size; 917 } 918 /* Reference MAD agent until send side of local completion handled */ 919 atomic_inc(&mad_agent_priv->refcount); 920 /* Queue local completion to local list */ 921 spin_lock_irqsave(&mad_agent_priv->lock, flags); 922 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); 923 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 924 queue_work(mad_agent_priv->qp_info->port_priv->wq, 925 &mad_agent_priv->local_work); 926 ret = 1; 927 out: 928 return ret; 929 } 930 931 static int get_pad_size(int hdr_len, int data_len, size_t mad_size) 932 { 933 int seg_size, pad; 934 935 seg_size = mad_size - hdr_len; 936 if (data_len && seg_size) { 937 pad = seg_size - data_len % seg_size; 938 return pad == seg_size ? 0 : pad; 939 } else 940 return seg_size; 941 } 942 943 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) 944 { 945 struct ib_rmpp_segment *s, *t; 946 947 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { 948 list_del(&s->list); 949 kfree(s); 950 } 951 } 952 953 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, 954 size_t mad_size, gfp_t gfp_mask) 955 { 956 struct ib_mad_send_buf *send_buf = &send_wr->send_buf; 957 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; 958 struct ib_rmpp_segment *seg = NULL; 959 int left, seg_size, pad; 960 961 send_buf->seg_size = mad_size - send_buf->hdr_len; 962 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; 963 seg_size = send_buf->seg_size; 964 pad = send_wr->pad; 965 966 /* Allocate data segments. */ 967 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { 968 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); 969 if (!seg) { 970 free_send_rmpp_list(send_wr); 971 return -ENOMEM; 972 } 973 seg->num = ++send_buf->seg_count; 974 list_add_tail(&seg->list, &send_wr->rmpp_list); 975 } 976 977 /* Zero any padding */ 978 if (pad) 979 memset(seg->data + seg_size - pad, 0, pad); 980 981 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> 982 agent.rmpp_version; 983 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 984 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 985 986 send_wr->cur_seg = container_of(send_wr->rmpp_list.next, 987 struct ib_rmpp_segment, list); 988 send_wr->last_ack_seg = send_wr->cur_seg; 989 return 0; 990 } 991 992 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) 993 { 994 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); 995 } 996 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); 997 998 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 999 u32 remote_qpn, u16 pkey_index, 1000 int rmpp_active, 1001 int hdr_len, int data_len, 1002 gfp_t gfp_mask, 1003 u8 base_version) 1004 { 1005 struct ib_mad_agent_private *mad_agent_priv; 1006 struct ib_mad_send_wr_private *mad_send_wr; 1007 int pad, message_size, ret, size; 1008 void *buf; 1009 size_t mad_size; 1010 bool opa; 1011 1012 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 1013 agent); 1014 1015 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); 1016 1017 if (opa && base_version == OPA_MGMT_BASE_VERSION) 1018 mad_size = sizeof(struct opa_mad); 1019 else 1020 mad_size = sizeof(struct ib_mad); 1021 1022 pad = get_pad_size(hdr_len, data_len, mad_size); 1023 message_size = hdr_len + data_len + pad; 1024 1025 if (ib_mad_kernel_rmpp_agent(mad_agent)) { 1026 if (!rmpp_active && message_size > mad_size) 1027 return ERR_PTR(-EINVAL); 1028 } else 1029 if (rmpp_active || message_size > mad_size) 1030 return ERR_PTR(-EINVAL); 1031 1032 size = rmpp_active ? hdr_len : mad_size; 1033 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); 1034 if (!buf) 1035 return ERR_PTR(-ENOMEM); 1036 1037 mad_send_wr = buf + size; 1038 INIT_LIST_HEAD(&mad_send_wr->rmpp_list); 1039 mad_send_wr->send_buf.mad = buf; 1040 mad_send_wr->send_buf.hdr_len = hdr_len; 1041 mad_send_wr->send_buf.data_len = data_len; 1042 mad_send_wr->pad = pad; 1043 1044 mad_send_wr->mad_agent_priv = mad_agent_priv; 1045 mad_send_wr->sg_list[0].length = hdr_len; 1046 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; 1047 1048 /* OPA MADs don't have to be the full 2048 bytes */ 1049 if (opa && base_version == OPA_MGMT_BASE_VERSION && 1050 data_len < mad_size - hdr_len) 1051 mad_send_wr->sg_list[1].length = data_len; 1052 else 1053 mad_send_wr->sg_list[1].length = mad_size - hdr_len; 1054 1055 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; 1056 1057 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1058 1059 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1060 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; 1061 mad_send_wr->send_wr.wr.num_sge = 2; 1062 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; 1063 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; 1064 mad_send_wr->send_wr.remote_qpn = remote_qpn; 1065 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; 1066 mad_send_wr->send_wr.pkey_index = pkey_index; 1067 1068 if (rmpp_active) { 1069 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); 1070 if (ret) { 1071 kfree(buf); 1072 return ERR_PTR(ret); 1073 } 1074 } 1075 1076 mad_send_wr->send_buf.mad_agent = mad_agent; 1077 atomic_inc(&mad_agent_priv->refcount); 1078 return &mad_send_wr->send_buf; 1079 } 1080 EXPORT_SYMBOL(ib_create_send_mad); 1081 1082 int ib_get_mad_data_offset(u8 mgmt_class) 1083 { 1084 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 1085 return IB_MGMT_SA_HDR; 1086 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1087 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1088 (mgmt_class == IB_MGMT_CLASS_BIS)) 1089 return IB_MGMT_DEVICE_HDR; 1090 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1091 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 1092 return IB_MGMT_VENDOR_HDR; 1093 else 1094 return IB_MGMT_MAD_HDR; 1095 } 1096 EXPORT_SYMBOL(ib_get_mad_data_offset); 1097 1098 int ib_is_mad_class_rmpp(u8 mgmt_class) 1099 { 1100 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 1101 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1102 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1103 (mgmt_class == IB_MGMT_CLASS_BIS) || 1104 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1105 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 1106 return 1; 1107 return 0; 1108 } 1109 EXPORT_SYMBOL(ib_is_mad_class_rmpp); 1110 1111 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 1112 { 1113 struct ib_mad_send_wr_private *mad_send_wr; 1114 struct list_head *list; 1115 1116 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1117 send_buf); 1118 list = &mad_send_wr->cur_seg->list; 1119 1120 if (mad_send_wr->cur_seg->num < seg_num) { 1121 list_for_each_entry(mad_send_wr->cur_seg, list, list) 1122 if (mad_send_wr->cur_seg->num == seg_num) 1123 break; 1124 } else if (mad_send_wr->cur_seg->num > seg_num) { 1125 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) 1126 if (mad_send_wr->cur_seg->num == seg_num) 1127 break; 1128 } 1129 return mad_send_wr->cur_seg->data; 1130 } 1131 EXPORT_SYMBOL(ib_get_rmpp_segment); 1132 1133 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) 1134 { 1135 if (mad_send_wr->send_buf.seg_count) 1136 return ib_get_rmpp_segment(&mad_send_wr->send_buf, 1137 mad_send_wr->seg_num); 1138 else 1139 return mad_send_wr->send_buf.mad + 1140 mad_send_wr->send_buf.hdr_len; 1141 } 1142 1143 void ib_free_send_mad(struct ib_mad_send_buf *send_buf) 1144 { 1145 struct ib_mad_agent_private *mad_agent_priv; 1146 struct ib_mad_send_wr_private *mad_send_wr; 1147 1148 mad_agent_priv = container_of(send_buf->mad_agent, 1149 struct ib_mad_agent_private, agent); 1150 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1151 send_buf); 1152 1153 free_send_rmpp_list(mad_send_wr); 1154 kfree(send_buf->mad); 1155 deref_mad_agent(mad_agent_priv); 1156 } 1157 EXPORT_SYMBOL(ib_free_send_mad); 1158 1159 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 1160 { 1161 struct ib_mad_qp_info *qp_info; 1162 struct list_head *list; 1163 struct ib_send_wr *bad_send_wr; 1164 struct ib_mad_agent *mad_agent; 1165 struct ib_sge *sge; 1166 unsigned long flags; 1167 int ret; 1168 1169 /* Set WR ID to find mad_send_wr upon completion */ 1170 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1171 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1172 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1173 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1174 1175 mad_agent = mad_send_wr->send_buf.mad_agent; 1176 sge = mad_send_wr->sg_list; 1177 sge[0].addr = ib_dma_map_single(mad_agent->device, 1178 mad_send_wr->send_buf.mad, 1179 sge[0].length, 1180 DMA_TO_DEVICE); 1181 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) 1182 return -ENOMEM; 1183 1184 mad_send_wr->header_mapping = sge[0].addr; 1185 1186 sge[1].addr = ib_dma_map_single(mad_agent->device, 1187 ib_get_payload(mad_send_wr), 1188 sge[1].length, 1189 DMA_TO_DEVICE); 1190 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { 1191 ib_dma_unmap_single(mad_agent->device, 1192 mad_send_wr->header_mapping, 1193 sge[0].length, DMA_TO_DEVICE); 1194 return -ENOMEM; 1195 } 1196 mad_send_wr->payload_mapping = sge[1].addr; 1197 1198 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1199 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1200 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, 1201 &bad_send_wr); 1202 list = &qp_info->send_queue.list; 1203 } else { 1204 ret = 0; 1205 list = &qp_info->overflow_list; 1206 } 1207 1208 if (!ret) { 1209 qp_info->send_queue.count++; 1210 list_add_tail(&mad_send_wr->mad_list.list, list); 1211 } 1212 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1213 if (ret) { 1214 ib_dma_unmap_single(mad_agent->device, 1215 mad_send_wr->header_mapping, 1216 sge[0].length, DMA_TO_DEVICE); 1217 ib_dma_unmap_single(mad_agent->device, 1218 mad_send_wr->payload_mapping, 1219 sge[1].length, DMA_TO_DEVICE); 1220 } 1221 return ret; 1222 } 1223 1224 /* 1225 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 1226 * with the registered client 1227 */ 1228 int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 1229 struct ib_mad_send_buf **bad_send_buf) 1230 { 1231 struct ib_mad_agent_private *mad_agent_priv; 1232 struct ib_mad_send_buf *next_send_buf; 1233 struct ib_mad_send_wr_private *mad_send_wr; 1234 unsigned long flags; 1235 int ret = -EINVAL; 1236 1237 /* Walk list of send WRs and post each on send list */ 1238 for (; send_buf; send_buf = next_send_buf) { 1239 mad_send_wr = container_of(send_buf, 1240 struct ib_mad_send_wr_private, 1241 send_buf); 1242 mad_agent_priv = mad_send_wr->mad_agent_priv; 1243 1244 ret = ib_mad_enforce_security(mad_agent_priv, 1245 mad_send_wr->send_wr.pkey_index); 1246 if (ret) 1247 goto error; 1248 1249 if (!send_buf->mad_agent->send_handler || 1250 (send_buf->timeout_ms && 1251 !send_buf->mad_agent->recv_handler)) { 1252 ret = -EINVAL; 1253 goto error; 1254 } 1255 1256 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1257 if (mad_agent_priv->agent.rmpp_version) { 1258 ret = -EINVAL; 1259 goto error; 1260 } 1261 } 1262 1263 /* 1264 * Save pointer to next work request to post in case the 1265 * current one completes, and the user modifies the work 1266 * request associated with the completion 1267 */ 1268 next_send_buf = send_buf->next; 1269 mad_send_wr->send_wr.ah = send_buf->ah; 1270 1271 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1272 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1273 ret = handle_outgoing_dr_smp(mad_agent_priv, 1274 mad_send_wr); 1275 if (ret < 0) /* error */ 1276 goto error; 1277 else if (ret == 1) /* locally consumed */ 1278 continue; 1279 } 1280 1281 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; 1282 /* Timeout will be updated after send completes */ 1283 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); 1284 mad_send_wr->max_retries = send_buf->retries; 1285 mad_send_wr->retries_left = send_buf->retries; 1286 send_buf->retries = 0; 1287 /* Reference for work request to QP + response */ 1288 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 1289 mad_send_wr->status = IB_WC_SUCCESS; 1290 1291 /* Reference MAD agent until send completes */ 1292 atomic_inc(&mad_agent_priv->refcount); 1293 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1294 list_add_tail(&mad_send_wr->agent_list, 1295 &mad_agent_priv->send_list); 1296 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1297 1298 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1299 ret = ib_send_rmpp_mad(mad_send_wr); 1300 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) 1301 ret = ib_send_mad(mad_send_wr); 1302 } else 1303 ret = ib_send_mad(mad_send_wr); 1304 if (ret < 0) { 1305 /* Fail send request */ 1306 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1307 list_del(&mad_send_wr->agent_list); 1308 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1309 atomic_dec(&mad_agent_priv->refcount); 1310 goto error; 1311 } 1312 } 1313 return 0; 1314 error: 1315 if (bad_send_buf) 1316 *bad_send_buf = send_buf; 1317 return ret; 1318 } 1319 EXPORT_SYMBOL(ib_post_send_mad); 1320 1321 /* 1322 * ib_free_recv_mad - Returns data buffers used to receive 1323 * a MAD to the access layer 1324 */ 1325 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1326 { 1327 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; 1328 struct ib_mad_private_header *mad_priv_hdr; 1329 struct ib_mad_private *priv; 1330 struct list_head free_list; 1331 1332 INIT_LIST_HEAD(&free_list); 1333 list_splice_init(&mad_recv_wc->rmpp_list, &free_list); 1334 1335 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, 1336 &free_list, list) { 1337 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, 1338 recv_buf); 1339 mad_priv_hdr = container_of(mad_recv_wc, 1340 struct ib_mad_private_header, 1341 recv_wc); 1342 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1343 header); 1344 kfree(priv); 1345 } 1346 } 1347 EXPORT_SYMBOL(ib_free_recv_mad); 1348 1349 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, 1350 u8 rmpp_version, 1351 ib_mad_send_handler send_handler, 1352 ib_mad_recv_handler recv_handler, 1353 void *context) 1354 { 1355 return ERR_PTR(-EINVAL); /* XXX: for now */ 1356 } 1357 EXPORT_SYMBOL(ib_redirect_mad_qp); 1358 1359 int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 1360 struct ib_wc *wc) 1361 { 1362 dev_err(&mad_agent->device->dev, 1363 "ib_process_mad_wc() not implemented yet\n"); 1364 return 0; 1365 } 1366 EXPORT_SYMBOL(ib_process_mad_wc); 1367 1368 static int method_in_use(struct ib_mad_mgmt_method_table **method, 1369 struct ib_mad_reg_req *mad_reg_req) 1370 { 1371 int i; 1372 1373 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { 1374 if ((*method)->agent[i]) { 1375 pr_err("Method %d already in use\n", i); 1376 return -EINVAL; 1377 } 1378 } 1379 return 0; 1380 } 1381 1382 static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1383 { 1384 /* Allocate management method table */ 1385 *method = kzalloc(sizeof **method, GFP_ATOMIC); 1386 return (*method) ? 0 : (-ENOMEM); 1387 } 1388 1389 /* 1390 * Check to see if there are any methods still in use 1391 */ 1392 static int check_method_table(struct ib_mad_mgmt_method_table *method) 1393 { 1394 int i; 1395 1396 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) 1397 if (method->agent[i]) 1398 return 1; 1399 return 0; 1400 } 1401 1402 /* 1403 * Check to see if there are any method tables for this class still in use 1404 */ 1405 static int check_class_table(struct ib_mad_mgmt_class_table *class) 1406 { 1407 int i; 1408 1409 for (i = 0; i < MAX_MGMT_CLASS; i++) 1410 if (class->method_table[i]) 1411 return 1; 1412 return 0; 1413 } 1414 1415 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) 1416 { 1417 int i; 1418 1419 for (i = 0; i < MAX_MGMT_OUI; i++) 1420 if (vendor_class->method_table[i]) 1421 return 1; 1422 return 0; 1423 } 1424 1425 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, 1426 const char *oui) 1427 { 1428 int i; 1429 1430 for (i = 0; i < MAX_MGMT_OUI; i++) 1431 /* Is there matching OUI for this vendor class ? */ 1432 if (!memcmp(vendor_class->oui[i], oui, 3)) 1433 return i; 1434 1435 return -1; 1436 } 1437 1438 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) 1439 { 1440 int i; 1441 1442 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) 1443 if (vendor->vendor_class[i]) 1444 return 1; 1445 1446 return 0; 1447 } 1448 1449 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, 1450 struct ib_mad_agent_private *agent) 1451 { 1452 int i; 1453 1454 /* Remove any methods for this mad agent */ 1455 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { 1456 if (method->agent[i] == agent) { 1457 method->agent[i] = NULL; 1458 } 1459 } 1460 } 1461 1462 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1463 struct ib_mad_agent_private *agent_priv, 1464 u8 mgmt_class) 1465 { 1466 struct ib_mad_port_private *port_priv; 1467 struct ib_mad_mgmt_class_table **class; 1468 struct ib_mad_mgmt_method_table **method; 1469 int i, ret; 1470 1471 port_priv = agent_priv->qp_info->port_priv; 1472 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1473 if (!*class) { 1474 /* Allocate management class table for "new" class version */ 1475 *class = kzalloc(sizeof **class, GFP_ATOMIC); 1476 if (!*class) { 1477 ret = -ENOMEM; 1478 goto error1; 1479 } 1480 1481 /* Allocate method table for this management class */ 1482 method = &(*class)->method_table[mgmt_class]; 1483 if ((ret = allocate_method_table(method))) 1484 goto error2; 1485 } else { 1486 method = &(*class)->method_table[mgmt_class]; 1487 if (!*method) { 1488 /* Allocate method table for this management class */ 1489 if ((ret = allocate_method_table(method))) 1490 goto error1; 1491 } 1492 } 1493 1494 /* Now, make sure methods are not already in use */ 1495 if (method_in_use(method, mad_reg_req)) 1496 goto error3; 1497 1498 /* Finally, add in methods being registered */ 1499 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1500 (*method)->agent[i] = agent_priv; 1501 1502 return 0; 1503 1504 error3: 1505 /* Remove any methods for this mad agent */ 1506 remove_methods_mad_agent(*method, agent_priv); 1507 /* Now, check to see if there are any methods in use */ 1508 if (!check_method_table(*method)) { 1509 /* If not, release management method table */ 1510 kfree(*method); 1511 *method = NULL; 1512 } 1513 ret = -EINVAL; 1514 goto error1; 1515 error2: 1516 kfree(*class); 1517 *class = NULL; 1518 error1: 1519 return ret; 1520 } 1521 1522 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1523 struct ib_mad_agent_private *agent_priv) 1524 { 1525 struct ib_mad_port_private *port_priv; 1526 struct ib_mad_mgmt_vendor_class_table **vendor_table; 1527 struct ib_mad_mgmt_vendor_class_table *vendor = NULL; 1528 struct ib_mad_mgmt_vendor_class *vendor_class = NULL; 1529 struct ib_mad_mgmt_method_table **method; 1530 int i, ret = -ENOMEM; 1531 u8 vclass; 1532 1533 /* "New" vendor (with OUI) class */ 1534 vclass = vendor_class_index(mad_reg_req->mgmt_class); 1535 port_priv = agent_priv->qp_info->port_priv; 1536 vendor_table = &port_priv->version[ 1537 mad_reg_req->mgmt_class_version].vendor; 1538 if (!*vendor_table) { 1539 /* Allocate mgmt vendor class table for "new" class version */ 1540 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); 1541 if (!vendor) 1542 goto error1; 1543 1544 *vendor_table = vendor; 1545 } 1546 if (!(*vendor_table)->vendor_class[vclass]) { 1547 /* Allocate table for this management vendor class */ 1548 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); 1549 if (!vendor_class) 1550 goto error2; 1551 1552 (*vendor_table)->vendor_class[vclass] = vendor_class; 1553 } 1554 for (i = 0; i < MAX_MGMT_OUI; i++) { 1555 /* Is there matching OUI for this vendor class ? */ 1556 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], 1557 mad_reg_req->oui, 3)) { 1558 method = &(*vendor_table)->vendor_class[ 1559 vclass]->method_table[i]; 1560 BUG_ON(!*method); 1561 goto check_in_use; 1562 } 1563 } 1564 for (i = 0; i < MAX_MGMT_OUI; i++) { 1565 /* OUI slot available ? */ 1566 if (!is_vendor_oui((*vendor_table)->vendor_class[ 1567 vclass]->oui[i])) { 1568 method = &(*vendor_table)->vendor_class[ 1569 vclass]->method_table[i]; 1570 BUG_ON(*method); 1571 /* Allocate method table for this OUI */ 1572 if ((ret = allocate_method_table(method))) 1573 goto error3; 1574 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], 1575 mad_reg_req->oui, 3); 1576 goto check_in_use; 1577 } 1578 } 1579 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); 1580 goto error3; 1581 1582 check_in_use: 1583 /* Now, make sure methods are not already in use */ 1584 if (method_in_use(method, mad_reg_req)) 1585 goto error4; 1586 1587 /* Finally, add in methods being registered */ 1588 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1589 (*method)->agent[i] = agent_priv; 1590 1591 return 0; 1592 1593 error4: 1594 /* Remove any methods for this mad agent */ 1595 remove_methods_mad_agent(*method, agent_priv); 1596 /* Now, check to see if there are any methods in use */ 1597 if (!check_method_table(*method)) { 1598 /* If not, release management method table */ 1599 kfree(*method); 1600 *method = NULL; 1601 } 1602 ret = -EINVAL; 1603 error3: 1604 if (vendor_class) { 1605 (*vendor_table)->vendor_class[vclass] = NULL; 1606 kfree(vendor_class); 1607 } 1608 error2: 1609 if (vendor) { 1610 *vendor_table = NULL; 1611 kfree(vendor); 1612 } 1613 error1: 1614 return ret; 1615 } 1616 1617 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) 1618 { 1619 struct ib_mad_port_private *port_priv; 1620 struct ib_mad_mgmt_class_table *class; 1621 struct ib_mad_mgmt_method_table *method; 1622 struct ib_mad_mgmt_vendor_class_table *vendor; 1623 struct ib_mad_mgmt_vendor_class *vendor_class; 1624 int index; 1625 u8 mgmt_class; 1626 1627 /* 1628 * Was MAD registration request supplied 1629 * with original registration ? 1630 */ 1631 if (!agent_priv->reg_req) { 1632 goto out; 1633 } 1634 1635 port_priv = agent_priv->qp_info->port_priv; 1636 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); 1637 class = port_priv->version[ 1638 agent_priv->reg_req->mgmt_class_version].class; 1639 if (!class) 1640 goto vendor_check; 1641 1642 method = class->method_table[mgmt_class]; 1643 if (method) { 1644 /* Remove any methods for this mad agent */ 1645 remove_methods_mad_agent(method, agent_priv); 1646 /* Now, check to see if there are any methods still in use */ 1647 if (!check_method_table(method)) { 1648 /* If not, release management method table */ 1649 kfree(method); 1650 class->method_table[mgmt_class] = NULL; 1651 /* Any management classes left ? */ 1652 if (!check_class_table(class)) { 1653 /* If not, release management class table */ 1654 kfree(class); 1655 port_priv->version[ 1656 agent_priv->reg_req-> 1657 mgmt_class_version].class = NULL; 1658 } 1659 } 1660 } 1661 1662 vendor_check: 1663 if (!is_vendor_class(mgmt_class)) 1664 goto out; 1665 1666 /* normalize mgmt_class to vendor range 2 */ 1667 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); 1668 vendor = port_priv->version[ 1669 agent_priv->reg_req->mgmt_class_version].vendor; 1670 1671 if (!vendor) 1672 goto out; 1673 1674 vendor_class = vendor->vendor_class[mgmt_class]; 1675 if (vendor_class) { 1676 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); 1677 if (index < 0) 1678 goto out; 1679 method = vendor_class->method_table[index]; 1680 if (method) { 1681 /* Remove any methods for this mad agent */ 1682 remove_methods_mad_agent(method, agent_priv); 1683 /* 1684 * Now, check to see if there are 1685 * any methods still in use 1686 */ 1687 if (!check_method_table(method)) { 1688 /* If not, release management method table */ 1689 kfree(method); 1690 vendor_class->method_table[index] = NULL; 1691 memset(vendor_class->oui[index], 0, 3); 1692 /* Any OUIs left ? */ 1693 if (!check_vendor_class(vendor_class)) { 1694 /* If not, release vendor class table */ 1695 kfree(vendor_class); 1696 vendor->vendor_class[mgmt_class] = NULL; 1697 /* Any other vendor classes left ? */ 1698 if (!check_vendor_table(vendor)) { 1699 kfree(vendor); 1700 port_priv->version[ 1701 agent_priv->reg_req-> 1702 mgmt_class_version]. 1703 vendor = NULL; 1704 } 1705 } 1706 } 1707 } 1708 } 1709 1710 out: 1711 return; 1712 } 1713 1714 static struct ib_mad_agent_private * 1715 find_mad_agent(struct ib_mad_port_private *port_priv, 1716 const struct ib_mad_hdr *mad_hdr) 1717 { 1718 struct ib_mad_agent_private *mad_agent = NULL; 1719 unsigned long flags; 1720 1721 spin_lock_irqsave(&port_priv->reg_lock, flags); 1722 if (ib_response_mad(mad_hdr)) { 1723 u32 hi_tid; 1724 struct ib_mad_agent_private *entry; 1725 1726 /* 1727 * Routing is based on high 32 bits of transaction ID 1728 * of MAD. 1729 */ 1730 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; 1731 list_for_each_entry(entry, &port_priv->agent_list, agent_list) { 1732 if (entry->agent.hi_tid == hi_tid) { 1733 mad_agent = entry; 1734 break; 1735 } 1736 } 1737 } else { 1738 struct ib_mad_mgmt_class_table *class; 1739 struct ib_mad_mgmt_method_table *method; 1740 struct ib_mad_mgmt_vendor_class_table *vendor; 1741 struct ib_mad_mgmt_vendor_class *vendor_class; 1742 const struct ib_vendor_mad *vendor_mad; 1743 int index; 1744 1745 /* 1746 * Routing is based on version, class, and method 1747 * For "newer" vendor MADs, also based on OUI 1748 */ 1749 if (mad_hdr->class_version >= MAX_MGMT_VERSION) 1750 goto out; 1751 if (!is_vendor_class(mad_hdr->mgmt_class)) { 1752 class = port_priv->version[ 1753 mad_hdr->class_version].class; 1754 if (!class) 1755 goto out; 1756 if (convert_mgmt_class(mad_hdr->mgmt_class) >= 1757 ARRAY_SIZE(class->method_table)) 1758 goto out; 1759 method = class->method_table[convert_mgmt_class( 1760 mad_hdr->mgmt_class)]; 1761 if (method) 1762 mad_agent = method->agent[mad_hdr->method & 1763 ~IB_MGMT_METHOD_RESP]; 1764 } else { 1765 vendor = port_priv->version[ 1766 mad_hdr->class_version].vendor; 1767 if (!vendor) 1768 goto out; 1769 vendor_class = vendor->vendor_class[vendor_class_index( 1770 mad_hdr->mgmt_class)]; 1771 if (!vendor_class) 1772 goto out; 1773 /* Find matching OUI */ 1774 vendor_mad = (const struct ib_vendor_mad *)mad_hdr; 1775 index = find_vendor_oui(vendor_class, vendor_mad->oui); 1776 if (index == -1) 1777 goto out; 1778 method = vendor_class->method_table[index]; 1779 if (method) { 1780 mad_agent = method->agent[mad_hdr->method & 1781 ~IB_MGMT_METHOD_RESP]; 1782 } 1783 } 1784 } 1785 1786 if (mad_agent) { 1787 if (mad_agent->agent.recv_handler) 1788 atomic_inc(&mad_agent->refcount); 1789 else { 1790 dev_notice(&port_priv->device->dev, 1791 "No receive handler for client %p on port %d\n", 1792 &mad_agent->agent, port_priv->port_num); 1793 mad_agent = NULL; 1794 } 1795 } 1796 out: 1797 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 1798 1799 return mad_agent; 1800 } 1801 1802 static int validate_mad(const struct ib_mad_hdr *mad_hdr, 1803 const struct ib_mad_qp_info *qp_info, 1804 bool opa) 1805 { 1806 int valid = 0; 1807 u32 qp_num = qp_info->qp->qp_num; 1808 1809 /* Make sure MAD base version is understood */ 1810 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && 1811 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { 1812 pr_err("MAD received with unsupported base version %d %s\n", 1813 mad_hdr->base_version, opa ? "(opa)" : ""); 1814 goto out; 1815 } 1816 1817 /* Filter SMI packets sent to other than QP0 */ 1818 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || 1819 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 1820 if (qp_num == 0) 1821 valid = 1; 1822 } else { 1823 /* CM attributes other than ClassPortInfo only use Send method */ 1824 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && 1825 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && 1826 (mad_hdr->method != IB_MGMT_METHOD_SEND)) 1827 goto out; 1828 /* Filter GSI packets sent to QP0 */ 1829 if (qp_num != 0) 1830 valid = 1; 1831 } 1832 1833 out: 1834 return valid; 1835 } 1836 1837 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, 1838 const struct ib_mad_hdr *mad_hdr) 1839 { 1840 struct ib_rmpp_mad *rmpp_mad; 1841 1842 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; 1843 return !mad_agent_priv->agent.rmpp_version || 1844 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || 1845 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 1846 IB_MGMT_RMPP_FLAG_ACTIVE) || 1847 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1848 } 1849 1850 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, 1851 const struct ib_mad_recv_wc *rwc) 1852 { 1853 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == 1854 rwc->recv_buf.mad->mad_hdr.mgmt_class; 1855 } 1856 1857 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, 1858 const struct ib_mad_send_wr_private *wr, 1859 const struct ib_mad_recv_wc *rwc ) 1860 { 1861 struct rdma_ah_attr attr; 1862 u8 send_resp, rcv_resp; 1863 union ib_gid sgid; 1864 struct ib_device *device = mad_agent_priv->agent.device; 1865 u8 port_num = mad_agent_priv->agent.port_num; 1866 u8 lmc; 1867 bool has_grh; 1868 1869 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); 1870 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); 1871 1872 if (send_resp == rcv_resp) 1873 /* both requests, or both responses. GIDs different */ 1874 return 0; 1875 1876 if (rdma_query_ah(wr->send_buf.ah, &attr)) 1877 /* Assume not equal, to avoid false positives. */ 1878 return 0; 1879 1880 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH); 1881 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH)) 1882 /* one has GID, other does not. Assume different */ 1883 return 0; 1884 1885 if (!send_resp && rcv_resp) { 1886 /* is request/response. */ 1887 if (!has_grh) { 1888 if (ib_get_cached_lmc(device, port_num, &lmc)) 1889 return 0; 1890 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^ 1891 rwc->wc->dlid_path_bits) & 1892 ((1 << lmc) - 1))); 1893 } else { 1894 const struct ib_global_route *grh = 1895 rdma_ah_read_grh(&attr); 1896 1897 if (ib_get_cached_gid(device, port_num, 1898 grh->sgid_index, &sgid, NULL)) 1899 return 0; 1900 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 1901 16); 1902 } 1903 } 1904 1905 if (!has_grh) 1906 return rdma_ah_get_dlid(&attr) == rwc->wc->slid; 1907 else 1908 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw, 1909 rwc->recv_buf.grh->sgid.raw, 1910 16); 1911 } 1912 1913 static inline int is_direct(u8 class) 1914 { 1915 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); 1916 } 1917 1918 struct ib_mad_send_wr_private* 1919 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, 1920 const struct ib_mad_recv_wc *wc) 1921 { 1922 struct ib_mad_send_wr_private *wr; 1923 const struct ib_mad_hdr *mad_hdr; 1924 1925 mad_hdr = &wc->recv_buf.mad->mad_hdr; 1926 1927 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { 1928 if ((wr->tid == mad_hdr->tid) && 1929 rcv_has_same_class(wr, wc) && 1930 /* 1931 * Don't check GID for direct routed MADs. 1932 * These might have permissive LIDs. 1933 */ 1934 (is_direct(mad_hdr->mgmt_class) || 1935 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1936 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1937 } 1938 1939 /* 1940 * It's possible to receive the response before we've 1941 * been notified that the send has completed 1942 */ 1943 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { 1944 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && 1945 wr->tid == mad_hdr->tid && 1946 wr->timeout && 1947 rcv_has_same_class(wr, wc) && 1948 /* 1949 * Don't check GID for direct routed MADs. 1950 * These might have permissive LIDs. 1951 */ 1952 (is_direct(mad_hdr->mgmt_class) || 1953 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1954 /* Verify request has not been canceled */ 1955 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1956 } 1957 return NULL; 1958 } 1959 1960 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) 1961 { 1962 mad_send_wr->timeout = 0; 1963 if (mad_send_wr->refcount == 1) 1964 list_move_tail(&mad_send_wr->agent_list, 1965 &mad_send_wr->mad_agent_priv->done_list); 1966 } 1967 1968 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1969 struct ib_mad_recv_wc *mad_recv_wc) 1970 { 1971 struct ib_mad_send_wr_private *mad_send_wr; 1972 struct ib_mad_send_wc mad_send_wc; 1973 unsigned long flags; 1974 int ret; 1975 1976 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1977 ret = ib_mad_enforce_security(mad_agent_priv, 1978 mad_recv_wc->wc->pkey_index); 1979 if (ret) { 1980 ib_free_recv_mad(mad_recv_wc); 1981 deref_mad_agent(mad_agent_priv); 1982 return; 1983 } 1984 1985 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1986 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1987 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 1988 mad_recv_wc); 1989 if (!mad_recv_wc) { 1990 deref_mad_agent(mad_agent_priv); 1991 return; 1992 } 1993 } 1994 1995 /* Complete corresponding request */ 1996 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { 1997 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1998 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 1999 if (!mad_send_wr) { 2000 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2001 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) 2002 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) 2003 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) 2004 & IB_MGMT_RMPP_FLAG_ACTIVE)) { 2005 /* user rmpp is in effect 2006 * and this is an active RMPP MAD 2007 */ 2008 mad_agent_priv->agent.recv_handler( 2009 &mad_agent_priv->agent, NULL, 2010 mad_recv_wc); 2011 atomic_dec(&mad_agent_priv->refcount); 2012 } else { 2013 /* not user rmpp, revert to normal behavior and 2014 * drop the mad */ 2015 ib_free_recv_mad(mad_recv_wc); 2016 deref_mad_agent(mad_agent_priv); 2017 return; 2018 } 2019 } else { 2020 ib_mark_mad_done(mad_send_wr); 2021 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2022 2023 /* Defined behavior is to complete response before request */ 2024 mad_agent_priv->agent.recv_handler( 2025 &mad_agent_priv->agent, 2026 &mad_send_wr->send_buf, 2027 mad_recv_wc); 2028 atomic_dec(&mad_agent_priv->refcount); 2029 2030 mad_send_wc.status = IB_WC_SUCCESS; 2031 mad_send_wc.vendor_err = 0; 2032 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2033 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2034 } 2035 } else { 2036 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, 2037 mad_recv_wc); 2038 deref_mad_agent(mad_agent_priv); 2039 } 2040 2041 return; 2042 } 2043 2044 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, 2045 const struct ib_mad_qp_info *qp_info, 2046 const struct ib_wc *wc, 2047 int port_num, 2048 struct ib_mad_private *recv, 2049 struct ib_mad_private *response) 2050 { 2051 enum smi_forward_action retsmi; 2052 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2053 2054 if (smi_handle_dr_smp_recv(smp, 2055 rdma_cap_ib_switch(port_priv->device), 2056 port_num, 2057 port_priv->device->phys_port_cnt) == 2058 IB_SMI_DISCARD) 2059 return IB_SMI_DISCARD; 2060 2061 retsmi = smi_check_forward_dr_smp(smp); 2062 if (retsmi == IB_SMI_LOCAL) 2063 return IB_SMI_HANDLE; 2064 2065 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2066 if (smi_handle_dr_smp_send(smp, 2067 rdma_cap_ib_switch(port_priv->device), 2068 port_num) == IB_SMI_DISCARD) 2069 return IB_SMI_DISCARD; 2070 2071 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2072 return IB_SMI_DISCARD; 2073 } else if (rdma_cap_ib_switch(port_priv->device)) { 2074 /* forward case for switches */ 2075 memcpy(response, recv, mad_priv_size(response)); 2076 response->header.recv_wc.wc = &response->header.wc; 2077 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2078 response->header.recv_wc.recv_buf.grh = &response->grh; 2079 2080 agent_send_response((const struct ib_mad_hdr *)response->mad, 2081 &response->grh, wc, 2082 port_priv->device, 2083 smi_get_fwd_port(smp), 2084 qp_info->qp->qp_num, 2085 response->mad_size, 2086 false); 2087 2088 return IB_SMI_DISCARD; 2089 } 2090 return IB_SMI_HANDLE; 2091 } 2092 2093 static bool generate_unmatched_resp(const struct ib_mad_private *recv, 2094 struct ib_mad_private *response, 2095 size_t *resp_len, bool opa) 2096 { 2097 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; 2098 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; 2099 2100 if (recv_hdr->method == IB_MGMT_METHOD_GET || 2101 recv_hdr->method == IB_MGMT_METHOD_SET) { 2102 memcpy(response, recv, mad_priv_size(response)); 2103 response->header.recv_wc.wc = &response->header.wc; 2104 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2105 response->header.recv_wc.recv_buf.grh = &response->grh; 2106 resp_hdr->method = IB_MGMT_METHOD_GET_RESP; 2107 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 2108 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2109 resp_hdr->status |= IB_SMP_DIRECTION; 2110 2111 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { 2112 if (recv_hdr->mgmt_class == 2113 IB_MGMT_CLASS_SUBN_LID_ROUTED || 2114 recv_hdr->mgmt_class == 2115 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2116 *resp_len = opa_get_smp_header_size( 2117 (struct opa_smp *)recv->mad); 2118 else 2119 *resp_len = sizeof(struct ib_mad_hdr); 2120 } 2121 2122 return true; 2123 } else { 2124 return false; 2125 } 2126 } 2127 2128 static enum smi_action 2129 handle_opa_smi(struct ib_mad_port_private *port_priv, 2130 struct ib_mad_qp_info *qp_info, 2131 struct ib_wc *wc, 2132 int port_num, 2133 struct ib_mad_private *recv, 2134 struct ib_mad_private *response) 2135 { 2136 enum smi_forward_action retsmi; 2137 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2138 2139 if (opa_smi_handle_dr_smp_recv(smp, 2140 rdma_cap_ib_switch(port_priv->device), 2141 port_num, 2142 port_priv->device->phys_port_cnt) == 2143 IB_SMI_DISCARD) 2144 return IB_SMI_DISCARD; 2145 2146 retsmi = opa_smi_check_forward_dr_smp(smp); 2147 if (retsmi == IB_SMI_LOCAL) 2148 return IB_SMI_HANDLE; 2149 2150 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2151 if (opa_smi_handle_dr_smp_send(smp, 2152 rdma_cap_ib_switch(port_priv->device), 2153 port_num) == IB_SMI_DISCARD) 2154 return IB_SMI_DISCARD; 2155 2156 if (opa_smi_check_local_smp(smp, port_priv->device) == 2157 IB_SMI_DISCARD) 2158 return IB_SMI_DISCARD; 2159 2160 } else if (rdma_cap_ib_switch(port_priv->device)) { 2161 /* forward case for switches */ 2162 memcpy(response, recv, mad_priv_size(response)); 2163 response->header.recv_wc.wc = &response->header.wc; 2164 response->header.recv_wc.recv_buf.opa_mad = 2165 (struct opa_mad *)response->mad; 2166 response->header.recv_wc.recv_buf.grh = &response->grh; 2167 2168 agent_send_response((const struct ib_mad_hdr *)response->mad, 2169 &response->grh, wc, 2170 port_priv->device, 2171 opa_smi_get_fwd_port(smp), 2172 qp_info->qp->qp_num, 2173 recv->header.wc.byte_len, 2174 true); 2175 2176 return IB_SMI_DISCARD; 2177 } 2178 2179 return IB_SMI_HANDLE; 2180 } 2181 2182 static enum smi_action 2183 handle_smi(struct ib_mad_port_private *port_priv, 2184 struct ib_mad_qp_info *qp_info, 2185 struct ib_wc *wc, 2186 int port_num, 2187 struct ib_mad_private *recv, 2188 struct ib_mad_private *response, 2189 bool opa) 2190 { 2191 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; 2192 2193 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && 2194 mad_hdr->class_version == OPA_SM_CLASS_VERSION) 2195 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, 2196 response); 2197 2198 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); 2199 } 2200 2201 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) 2202 { 2203 struct ib_mad_port_private *port_priv = cq->cq_context; 2204 struct ib_mad_list_head *mad_list = 2205 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2206 struct ib_mad_qp_info *qp_info; 2207 struct ib_mad_private_header *mad_priv_hdr; 2208 struct ib_mad_private *recv, *response = NULL; 2209 struct ib_mad_agent_private *mad_agent; 2210 int port_num; 2211 int ret = IB_MAD_RESULT_SUCCESS; 2212 size_t mad_size; 2213 u16 resp_mad_pkey_index = 0; 2214 bool opa; 2215 2216 if (list_empty_careful(&port_priv->port_list)) 2217 return; 2218 2219 if (wc->status != IB_WC_SUCCESS) { 2220 /* 2221 * Receive errors indicate that the QP has entered the error 2222 * state - error handling/shutdown code will cleanup 2223 */ 2224 return; 2225 } 2226 2227 qp_info = mad_list->mad_queue->qp_info; 2228 dequeue_mad(mad_list); 2229 2230 opa = rdma_cap_opa_mad(qp_info->port_priv->device, 2231 qp_info->port_priv->port_num); 2232 2233 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 2234 mad_list); 2235 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 2236 ib_dma_unmap_single(port_priv->device, 2237 recv->header.mapping, 2238 mad_priv_dma_size(recv), 2239 DMA_FROM_DEVICE); 2240 2241 /* Setup MAD receive work completion from "normal" work completion */ 2242 recv->header.wc = *wc; 2243 recv->header.recv_wc.wc = &recv->header.wc; 2244 2245 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { 2246 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); 2247 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2248 } else { 2249 recv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2250 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2251 } 2252 2253 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; 2254 recv->header.recv_wc.recv_buf.grh = &recv->grh; 2255 2256 if (atomic_read(&qp_info->snoop_count)) 2257 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); 2258 2259 /* Validate MAD */ 2260 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) 2261 goto out; 2262 2263 mad_size = recv->mad_size; 2264 response = alloc_mad_private(mad_size, GFP_KERNEL); 2265 if (!response) 2266 goto out; 2267 2268 if (rdma_cap_ib_switch(port_priv->device)) 2269 port_num = wc->port_num; 2270 else 2271 port_num = port_priv->port_num; 2272 2273 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == 2274 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 2275 if (handle_smi(port_priv, qp_info, wc, port_num, recv, 2276 response, opa) 2277 == IB_SMI_DISCARD) 2278 goto out; 2279 } 2280 2281 /* Give driver "right of first refusal" on incoming MAD */ 2282 if (port_priv->device->process_mad) { 2283 ret = port_priv->device->process_mad(port_priv->device, 0, 2284 port_priv->port_num, 2285 wc, &recv->grh, 2286 (const struct ib_mad_hdr *)recv->mad, 2287 recv->mad_size, 2288 (struct ib_mad_hdr *)response->mad, 2289 &mad_size, &resp_mad_pkey_index); 2290 2291 if (opa) 2292 wc->pkey_index = resp_mad_pkey_index; 2293 2294 if (ret & IB_MAD_RESULT_SUCCESS) { 2295 if (ret & IB_MAD_RESULT_CONSUMED) 2296 goto out; 2297 if (ret & IB_MAD_RESULT_REPLY) { 2298 agent_send_response((const struct ib_mad_hdr *)response->mad, 2299 &recv->grh, wc, 2300 port_priv->device, 2301 port_num, 2302 qp_info->qp->qp_num, 2303 mad_size, opa); 2304 goto out; 2305 } 2306 } 2307 } 2308 2309 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); 2310 if (mad_agent) { 2311 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); 2312 /* 2313 * recv is freed up in error cases in ib_mad_complete_recv 2314 * or via recv_handler in ib_mad_complete_recv() 2315 */ 2316 recv = NULL; 2317 } else if ((ret & IB_MAD_RESULT_SUCCESS) && 2318 generate_unmatched_resp(recv, response, &mad_size, opa)) { 2319 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, 2320 port_priv->device, port_num, 2321 qp_info->qp->qp_num, mad_size, opa); 2322 } 2323 2324 out: 2325 /* Post another receive request for this QP */ 2326 if (response) { 2327 ib_mad_post_receive_mads(qp_info, response); 2328 kfree(recv); 2329 } else 2330 ib_mad_post_receive_mads(qp_info, recv); 2331 } 2332 2333 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) 2334 { 2335 struct ib_mad_send_wr_private *mad_send_wr; 2336 unsigned long delay; 2337 2338 if (list_empty(&mad_agent_priv->wait_list)) { 2339 cancel_delayed_work(&mad_agent_priv->timed_work); 2340 } else { 2341 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2342 struct ib_mad_send_wr_private, 2343 agent_list); 2344 2345 if (time_after(mad_agent_priv->timeout, 2346 mad_send_wr->timeout)) { 2347 mad_agent_priv->timeout = mad_send_wr->timeout; 2348 delay = mad_send_wr->timeout - jiffies; 2349 if ((long)delay <= 0) 2350 delay = 1; 2351 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2352 &mad_agent_priv->timed_work, delay); 2353 } 2354 } 2355 } 2356 2357 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) 2358 { 2359 struct ib_mad_agent_private *mad_agent_priv; 2360 struct ib_mad_send_wr_private *temp_mad_send_wr; 2361 struct list_head *list_item; 2362 unsigned long delay; 2363 2364 mad_agent_priv = mad_send_wr->mad_agent_priv; 2365 list_del(&mad_send_wr->agent_list); 2366 2367 delay = mad_send_wr->timeout; 2368 mad_send_wr->timeout += jiffies; 2369 2370 if (delay) { 2371 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { 2372 temp_mad_send_wr = list_entry(list_item, 2373 struct ib_mad_send_wr_private, 2374 agent_list); 2375 if (time_after(mad_send_wr->timeout, 2376 temp_mad_send_wr->timeout)) 2377 break; 2378 } 2379 } 2380 else 2381 list_item = &mad_agent_priv->wait_list; 2382 list_add(&mad_send_wr->agent_list, list_item); 2383 2384 /* Reschedule a work item if we have a shorter timeout */ 2385 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) 2386 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2387 &mad_agent_priv->timed_work, delay); 2388 } 2389 2390 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2391 int timeout_ms) 2392 { 2393 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2394 wait_for_response(mad_send_wr); 2395 } 2396 2397 /* 2398 * Process a send work completion 2399 */ 2400 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 2401 struct ib_mad_send_wc *mad_send_wc) 2402 { 2403 struct ib_mad_agent_private *mad_agent_priv; 2404 unsigned long flags; 2405 int ret; 2406 2407 mad_agent_priv = mad_send_wr->mad_agent_priv; 2408 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2409 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 2410 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); 2411 if (ret == IB_RMPP_RESULT_CONSUMED) 2412 goto done; 2413 } else 2414 ret = IB_RMPP_RESULT_UNHANDLED; 2415 2416 if (mad_send_wc->status != IB_WC_SUCCESS && 2417 mad_send_wr->status == IB_WC_SUCCESS) { 2418 mad_send_wr->status = mad_send_wc->status; 2419 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2420 } 2421 2422 if (--mad_send_wr->refcount > 0) { 2423 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && 2424 mad_send_wr->status == IB_WC_SUCCESS) { 2425 wait_for_response(mad_send_wr); 2426 } 2427 goto done; 2428 } 2429 2430 /* Remove send from MAD agent and notify client of completion */ 2431 list_del(&mad_send_wr->agent_list); 2432 adjust_timeout(mad_agent_priv); 2433 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2434 2435 if (mad_send_wr->status != IB_WC_SUCCESS ) 2436 mad_send_wc->status = mad_send_wr->status; 2437 if (ret == IB_RMPP_RESULT_INTERNAL) 2438 ib_rmpp_send_handler(mad_send_wc); 2439 else 2440 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2441 mad_send_wc); 2442 2443 /* Release reference on agent taken when sending */ 2444 deref_mad_agent(mad_agent_priv); 2445 return; 2446 done: 2447 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2448 } 2449 2450 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) 2451 { 2452 struct ib_mad_port_private *port_priv = cq->cq_context; 2453 struct ib_mad_list_head *mad_list = 2454 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2455 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; 2456 struct ib_mad_qp_info *qp_info; 2457 struct ib_mad_queue *send_queue; 2458 struct ib_send_wr *bad_send_wr; 2459 struct ib_mad_send_wc mad_send_wc; 2460 unsigned long flags; 2461 int ret; 2462 2463 if (list_empty_careful(&port_priv->port_list)) 2464 return; 2465 2466 if (wc->status != IB_WC_SUCCESS) { 2467 if (!ib_mad_send_error(port_priv, wc)) 2468 return; 2469 } 2470 2471 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2472 mad_list); 2473 send_queue = mad_list->mad_queue; 2474 qp_info = send_queue->qp_info; 2475 2476 retry: 2477 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2478 mad_send_wr->header_mapping, 2479 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2480 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2481 mad_send_wr->payload_mapping, 2482 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2483 queued_send_wr = NULL; 2484 spin_lock_irqsave(&send_queue->lock, flags); 2485 list_del(&mad_list->list); 2486 2487 /* Move queued send to the send queue */ 2488 if (send_queue->count-- > send_queue->max_active) { 2489 mad_list = container_of(qp_info->overflow_list.next, 2490 struct ib_mad_list_head, list); 2491 queued_send_wr = container_of(mad_list, 2492 struct ib_mad_send_wr_private, 2493 mad_list); 2494 list_move_tail(&mad_list->list, &send_queue->list); 2495 } 2496 spin_unlock_irqrestore(&send_queue->lock, flags); 2497 2498 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2499 mad_send_wc.status = wc->status; 2500 mad_send_wc.vendor_err = wc->vendor_err; 2501 if (atomic_read(&qp_info->snoop_count)) 2502 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, 2503 IB_MAD_SNOOP_SEND_COMPLETIONS); 2504 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2505 2506 if (queued_send_wr) { 2507 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, 2508 &bad_send_wr); 2509 if (ret) { 2510 dev_err(&port_priv->device->dev, 2511 "ib_post_send failed: %d\n", ret); 2512 mad_send_wr = queued_send_wr; 2513 wc->status = IB_WC_LOC_QP_OP_ERR; 2514 goto retry; 2515 } 2516 } 2517 } 2518 2519 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) 2520 { 2521 struct ib_mad_send_wr_private *mad_send_wr; 2522 struct ib_mad_list_head *mad_list; 2523 unsigned long flags; 2524 2525 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 2526 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { 2527 mad_send_wr = container_of(mad_list, 2528 struct ib_mad_send_wr_private, 2529 mad_list); 2530 mad_send_wr->retry = 1; 2531 } 2532 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 2533 } 2534 2535 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 2536 struct ib_wc *wc) 2537 { 2538 struct ib_mad_list_head *mad_list = 2539 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2540 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; 2541 struct ib_mad_send_wr_private *mad_send_wr; 2542 int ret; 2543 2544 /* 2545 * Send errors will transition the QP to SQE - move 2546 * QP to RTS and repost flushed work requests 2547 */ 2548 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2549 mad_list); 2550 if (wc->status == IB_WC_WR_FLUSH_ERR) { 2551 if (mad_send_wr->retry) { 2552 /* Repost send */ 2553 struct ib_send_wr *bad_send_wr; 2554 2555 mad_send_wr->retry = 0; 2556 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, 2557 &bad_send_wr); 2558 if (!ret) 2559 return false; 2560 } 2561 } else { 2562 struct ib_qp_attr *attr; 2563 2564 /* Transition QP to RTS and fail offending send */ 2565 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2566 if (attr) { 2567 attr->qp_state = IB_QPS_RTS; 2568 attr->cur_qp_state = IB_QPS_SQE; 2569 ret = ib_modify_qp(qp_info->qp, attr, 2570 IB_QP_STATE | IB_QP_CUR_STATE); 2571 kfree(attr); 2572 if (ret) 2573 dev_err(&port_priv->device->dev, 2574 "%s - ib_modify_qp to RTS: %d\n", 2575 __func__, ret); 2576 else 2577 mark_sends_for_retry(qp_info); 2578 } 2579 } 2580 2581 return true; 2582 } 2583 2584 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) 2585 { 2586 unsigned long flags; 2587 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; 2588 struct ib_mad_send_wc mad_send_wc; 2589 struct list_head cancel_list; 2590 2591 INIT_LIST_HEAD(&cancel_list); 2592 2593 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2594 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2595 &mad_agent_priv->send_list, agent_list) { 2596 if (mad_send_wr->status == IB_WC_SUCCESS) { 2597 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2598 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2599 } 2600 } 2601 2602 /* Empty wait list to prevent receives from finding a request */ 2603 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); 2604 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2605 2606 /* Report all cancelled requests */ 2607 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 2608 mad_send_wc.vendor_err = 0; 2609 2610 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2611 &cancel_list, agent_list) { 2612 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2613 list_del(&mad_send_wr->agent_list); 2614 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2615 &mad_send_wc); 2616 atomic_dec(&mad_agent_priv->refcount); 2617 } 2618 } 2619 2620 static struct ib_mad_send_wr_private* 2621 find_send_wr(struct ib_mad_agent_private *mad_agent_priv, 2622 struct ib_mad_send_buf *send_buf) 2623 { 2624 struct ib_mad_send_wr_private *mad_send_wr; 2625 2626 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2627 agent_list) { 2628 if (&mad_send_wr->send_buf == send_buf) 2629 return mad_send_wr; 2630 } 2631 2632 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2633 agent_list) { 2634 if (is_rmpp_data_mad(mad_agent_priv, 2635 mad_send_wr->send_buf.mad) && 2636 &mad_send_wr->send_buf == send_buf) 2637 return mad_send_wr; 2638 } 2639 return NULL; 2640 } 2641 2642 int ib_modify_mad(struct ib_mad_agent *mad_agent, 2643 struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2644 { 2645 struct ib_mad_agent_private *mad_agent_priv; 2646 struct ib_mad_send_wr_private *mad_send_wr; 2647 unsigned long flags; 2648 int active; 2649 2650 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2651 agent); 2652 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2653 mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2654 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2655 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2656 return -EINVAL; 2657 } 2658 2659 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); 2660 if (!timeout_ms) { 2661 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2662 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2663 } 2664 2665 mad_send_wr->send_buf.timeout_ms = timeout_ms; 2666 if (active) 2667 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2668 else 2669 ib_reset_mad_timeout(mad_send_wr, timeout_ms); 2670 2671 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2672 return 0; 2673 } 2674 EXPORT_SYMBOL(ib_modify_mad); 2675 2676 void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2677 struct ib_mad_send_buf *send_buf) 2678 { 2679 ib_modify_mad(mad_agent, send_buf, 0); 2680 } 2681 EXPORT_SYMBOL(ib_cancel_mad); 2682 2683 static void local_completions(struct work_struct *work) 2684 { 2685 struct ib_mad_agent_private *mad_agent_priv; 2686 struct ib_mad_local_private *local; 2687 struct ib_mad_agent_private *recv_mad_agent; 2688 unsigned long flags; 2689 int free_mad; 2690 struct ib_wc wc; 2691 struct ib_mad_send_wc mad_send_wc; 2692 bool opa; 2693 2694 mad_agent_priv = 2695 container_of(work, struct ib_mad_agent_private, local_work); 2696 2697 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 2698 mad_agent_priv->qp_info->port_priv->port_num); 2699 2700 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2701 while (!list_empty(&mad_agent_priv->local_list)) { 2702 local = list_entry(mad_agent_priv->local_list.next, 2703 struct ib_mad_local_private, 2704 completion_list); 2705 list_del(&local->completion_list); 2706 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2707 free_mad = 0; 2708 if (local->mad_priv) { 2709 u8 base_version; 2710 recv_mad_agent = local->recv_mad_agent; 2711 if (!recv_mad_agent) { 2712 dev_err(&mad_agent_priv->agent.device->dev, 2713 "No receive MAD agent for local completion\n"); 2714 free_mad = 1; 2715 goto local_send_completion; 2716 } 2717 2718 /* 2719 * Defined behavior is to complete response 2720 * before request 2721 */ 2722 build_smp_wc(recv_mad_agent->agent.qp, 2723 local->mad_send_wr->send_wr.wr.wr_cqe, 2724 be16_to_cpu(IB_LID_PERMISSIVE), 2725 local->mad_send_wr->send_wr.pkey_index, 2726 recv_mad_agent->agent.port_num, &wc); 2727 2728 local->mad_priv->header.recv_wc.wc = &wc; 2729 2730 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; 2731 if (opa && base_version == OPA_MGMT_BASE_VERSION) { 2732 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; 2733 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2734 } else { 2735 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2736 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2737 } 2738 2739 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); 2740 list_add(&local->mad_priv->header.recv_wc.recv_buf.list, 2741 &local->mad_priv->header.recv_wc.rmpp_list); 2742 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2743 local->mad_priv->header.recv_wc.recv_buf.mad = 2744 (struct ib_mad *)local->mad_priv->mad; 2745 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) 2746 snoop_recv(recv_mad_agent->qp_info, 2747 &local->mad_priv->header.recv_wc, 2748 IB_MAD_SNOOP_RECVS); 2749 recv_mad_agent->agent.recv_handler( 2750 &recv_mad_agent->agent, 2751 &local->mad_send_wr->send_buf, 2752 &local->mad_priv->header.recv_wc); 2753 spin_lock_irqsave(&recv_mad_agent->lock, flags); 2754 atomic_dec(&recv_mad_agent->refcount); 2755 spin_unlock_irqrestore(&recv_mad_agent->lock, flags); 2756 } 2757 2758 local_send_completion: 2759 /* Complete send */ 2760 mad_send_wc.status = IB_WC_SUCCESS; 2761 mad_send_wc.vendor_err = 0; 2762 mad_send_wc.send_buf = &local->mad_send_wr->send_buf; 2763 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2764 snoop_send(mad_agent_priv->qp_info, 2765 &local->mad_send_wr->send_buf, 2766 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); 2767 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2768 &mad_send_wc); 2769 2770 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2771 atomic_dec(&mad_agent_priv->refcount); 2772 if (free_mad) 2773 kfree(local->mad_priv); 2774 kfree(local); 2775 } 2776 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2777 } 2778 2779 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) 2780 { 2781 int ret; 2782 2783 if (!mad_send_wr->retries_left) 2784 return -ETIMEDOUT; 2785 2786 mad_send_wr->retries_left--; 2787 mad_send_wr->send_buf.retries++; 2788 2789 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 2790 2791 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { 2792 ret = ib_retry_rmpp(mad_send_wr); 2793 switch (ret) { 2794 case IB_RMPP_RESULT_UNHANDLED: 2795 ret = ib_send_mad(mad_send_wr); 2796 break; 2797 case IB_RMPP_RESULT_CONSUMED: 2798 ret = 0; 2799 break; 2800 default: 2801 ret = -ECOMM; 2802 break; 2803 } 2804 } else 2805 ret = ib_send_mad(mad_send_wr); 2806 2807 if (!ret) { 2808 mad_send_wr->refcount++; 2809 list_add_tail(&mad_send_wr->agent_list, 2810 &mad_send_wr->mad_agent_priv->send_list); 2811 } 2812 return ret; 2813 } 2814 2815 static void timeout_sends(struct work_struct *work) 2816 { 2817 struct ib_mad_agent_private *mad_agent_priv; 2818 struct ib_mad_send_wr_private *mad_send_wr; 2819 struct ib_mad_send_wc mad_send_wc; 2820 unsigned long flags, delay; 2821 2822 mad_agent_priv = container_of(work, struct ib_mad_agent_private, 2823 timed_work.work); 2824 mad_send_wc.vendor_err = 0; 2825 2826 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2827 while (!list_empty(&mad_agent_priv->wait_list)) { 2828 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2829 struct ib_mad_send_wr_private, 2830 agent_list); 2831 2832 if (time_after(mad_send_wr->timeout, jiffies)) { 2833 delay = mad_send_wr->timeout - jiffies; 2834 if ((long)delay <= 0) 2835 delay = 1; 2836 queue_delayed_work(mad_agent_priv->qp_info-> 2837 port_priv->wq, 2838 &mad_agent_priv->timed_work, delay); 2839 break; 2840 } 2841 2842 list_del(&mad_send_wr->agent_list); 2843 if (mad_send_wr->status == IB_WC_SUCCESS && 2844 !retry_send(mad_send_wr)) 2845 continue; 2846 2847 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2848 2849 if (mad_send_wr->status == IB_WC_SUCCESS) 2850 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2851 else 2852 mad_send_wc.status = mad_send_wr->status; 2853 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2854 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2855 &mad_send_wc); 2856 2857 atomic_dec(&mad_agent_priv->refcount); 2858 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2859 } 2860 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2861 } 2862 2863 /* 2864 * Allocate receive MADs and post receive WRs for them 2865 */ 2866 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 2867 struct ib_mad_private *mad) 2868 { 2869 unsigned long flags; 2870 int post, ret; 2871 struct ib_mad_private *mad_priv; 2872 struct ib_sge sg_list; 2873 struct ib_recv_wr recv_wr, *bad_recv_wr; 2874 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; 2875 2876 /* Initialize common scatter list fields */ 2877 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; 2878 2879 /* Initialize common receive WR fields */ 2880 recv_wr.next = NULL; 2881 recv_wr.sg_list = &sg_list; 2882 recv_wr.num_sge = 1; 2883 2884 do { 2885 /* Allocate and map receive buffer */ 2886 if (mad) { 2887 mad_priv = mad; 2888 mad = NULL; 2889 } else { 2890 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), 2891 GFP_ATOMIC); 2892 if (!mad_priv) { 2893 ret = -ENOMEM; 2894 break; 2895 } 2896 } 2897 sg_list.length = mad_priv_dma_size(mad_priv); 2898 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, 2899 &mad_priv->grh, 2900 mad_priv_dma_size(mad_priv), 2901 DMA_FROM_DEVICE); 2902 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2903 sg_list.addr))) { 2904 ret = -ENOMEM; 2905 break; 2906 } 2907 mad_priv->header.mapping = sg_list.addr; 2908 mad_priv->header.mad_list.mad_queue = recv_queue; 2909 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; 2910 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; 2911 2912 /* Post receive WR */ 2913 spin_lock_irqsave(&recv_queue->lock, flags); 2914 post = (++recv_queue->count < recv_queue->max_active); 2915 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); 2916 spin_unlock_irqrestore(&recv_queue->lock, flags); 2917 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); 2918 if (ret) { 2919 spin_lock_irqsave(&recv_queue->lock, flags); 2920 list_del(&mad_priv->header.mad_list.list); 2921 recv_queue->count--; 2922 spin_unlock_irqrestore(&recv_queue->lock, flags); 2923 ib_dma_unmap_single(qp_info->port_priv->device, 2924 mad_priv->header.mapping, 2925 mad_priv_dma_size(mad_priv), 2926 DMA_FROM_DEVICE); 2927 kfree(mad_priv); 2928 dev_err(&qp_info->port_priv->device->dev, 2929 "ib_post_recv failed: %d\n", ret); 2930 break; 2931 } 2932 } while (post); 2933 2934 return ret; 2935 } 2936 2937 /* 2938 * Return all the posted receive MADs 2939 */ 2940 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) 2941 { 2942 struct ib_mad_private_header *mad_priv_hdr; 2943 struct ib_mad_private *recv; 2944 struct ib_mad_list_head *mad_list; 2945 2946 if (!qp_info->qp) 2947 return; 2948 2949 while (!list_empty(&qp_info->recv_queue.list)) { 2950 2951 mad_list = list_entry(qp_info->recv_queue.list.next, 2952 struct ib_mad_list_head, list); 2953 mad_priv_hdr = container_of(mad_list, 2954 struct ib_mad_private_header, 2955 mad_list); 2956 recv = container_of(mad_priv_hdr, struct ib_mad_private, 2957 header); 2958 2959 /* Remove from posted receive MAD list */ 2960 list_del(&mad_list->list); 2961 2962 ib_dma_unmap_single(qp_info->port_priv->device, 2963 recv->header.mapping, 2964 mad_priv_dma_size(recv), 2965 DMA_FROM_DEVICE); 2966 kfree(recv); 2967 } 2968 2969 qp_info->recv_queue.count = 0; 2970 } 2971 2972 /* 2973 * Start the port 2974 */ 2975 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) 2976 { 2977 int ret, i; 2978 struct ib_qp_attr *attr; 2979 struct ib_qp *qp; 2980 u16 pkey_index; 2981 2982 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2983 if (!attr) 2984 return -ENOMEM; 2985 2986 ret = ib_find_pkey(port_priv->device, port_priv->port_num, 2987 IB_DEFAULT_PKEY_FULL, &pkey_index); 2988 if (ret) 2989 pkey_index = 0; 2990 2991 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2992 qp = port_priv->qp_info[i].qp; 2993 if (!qp) 2994 continue; 2995 2996 /* 2997 * PKey index for QP1 is irrelevant but 2998 * one is needed for the Reset to Init transition 2999 */ 3000 attr->qp_state = IB_QPS_INIT; 3001 attr->pkey_index = pkey_index; 3002 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 3003 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 3004 IB_QP_PKEY_INDEX | IB_QP_QKEY); 3005 if (ret) { 3006 dev_err(&port_priv->device->dev, 3007 "Couldn't change QP%d state to INIT: %d\n", 3008 i, ret); 3009 goto out; 3010 } 3011 3012 attr->qp_state = IB_QPS_RTR; 3013 ret = ib_modify_qp(qp, attr, IB_QP_STATE); 3014 if (ret) { 3015 dev_err(&port_priv->device->dev, 3016 "Couldn't change QP%d state to RTR: %d\n", 3017 i, ret); 3018 goto out; 3019 } 3020 3021 attr->qp_state = IB_QPS_RTS; 3022 attr->sq_psn = IB_MAD_SEND_Q_PSN; 3023 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); 3024 if (ret) { 3025 dev_err(&port_priv->device->dev, 3026 "Couldn't change QP%d state to RTS: %d\n", 3027 i, ret); 3028 goto out; 3029 } 3030 } 3031 3032 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 3033 if (ret) { 3034 dev_err(&port_priv->device->dev, 3035 "Failed to request completion notification: %d\n", 3036 ret); 3037 goto out; 3038 } 3039 3040 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 3041 if (!port_priv->qp_info[i].qp) 3042 continue; 3043 3044 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); 3045 if (ret) { 3046 dev_err(&port_priv->device->dev, 3047 "Couldn't post receive WRs\n"); 3048 goto out; 3049 } 3050 } 3051 out: 3052 kfree(attr); 3053 return ret; 3054 } 3055 3056 static void qp_event_handler(struct ib_event *event, void *qp_context) 3057 { 3058 struct ib_mad_qp_info *qp_info = qp_context; 3059 3060 /* It's worse than that! He's dead, Jim! */ 3061 dev_err(&qp_info->port_priv->device->dev, 3062 "Fatal error (%d) on MAD QP (%d)\n", 3063 event->event, qp_info->qp->qp_num); 3064 } 3065 3066 static void init_mad_queue(struct ib_mad_qp_info *qp_info, 3067 struct ib_mad_queue *mad_queue) 3068 { 3069 mad_queue->qp_info = qp_info; 3070 mad_queue->count = 0; 3071 spin_lock_init(&mad_queue->lock); 3072 INIT_LIST_HEAD(&mad_queue->list); 3073 } 3074 3075 static void init_mad_qp(struct ib_mad_port_private *port_priv, 3076 struct ib_mad_qp_info *qp_info) 3077 { 3078 qp_info->port_priv = port_priv; 3079 init_mad_queue(qp_info, &qp_info->send_queue); 3080 init_mad_queue(qp_info, &qp_info->recv_queue); 3081 INIT_LIST_HEAD(&qp_info->overflow_list); 3082 spin_lock_init(&qp_info->snoop_lock); 3083 qp_info->snoop_table = NULL; 3084 qp_info->snoop_table_size = 0; 3085 atomic_set(&qp_info->snoop_count, 0); 3086 } 3087 3088 static int create_mad_qp(struct ib_mad_qp_info *qp_info, 3089 enum ib_qp_type qp_type) 3090 { 3091 struct ib_qp_init_attr qp_init_attr; 3092 int ret; 3093 3094 memset(&qp_init_attr, 0, sizeof qp_init_attr); 3095 qp_init_attr.send_cq = qp_info->port_priv->cq; 3096 qp_init_attr.recv_cq = qp_info->port_priv->cq; 3097 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 3098 qp_init_attr.cap.max_send_wr = mad_sendq_size; 3099 qp_init_attr.cap.max_recv_wr = mad_recvq_size; 3100 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 3101 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 3102 qp_init_attr.qp_type = qp_type; 3103 qp_init_attr.port_num = qp_info->port_priv->port_num; 3104 qp_init_attr.qp_context = qp_info; 3105 qp_init_attr.event_handler = qp_event_handler; 3106 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); 3107 if (IS_ERR(qp_info->qp)) { 3108 dev_err(&qp_info->port_priv->device->dev, 3109 "Couldn't create ib_mad QP%d\n", 3110 get_spl_qp_index(qp_type)); 3111 ret = PTR_ERR(qp_info->qp); 3112 goto error; 3113 } 3114 /* Use minimum queue sizes unless the CQ is resized */ 3115 qp_info->send_queue.max_active = mad_sendq_size; 3116 qp_info->recv_queue.max_active = mad_recvq_size; 3117 return 0; 3118 3119 error: 3120 return ret; 3121 } 3122 3123 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) 3124 { 3125 if (!qp_info->qp) 3126 return; 3127 3128 ib_destroy_qp(qp_info->qp); 3129 kfree(qp_info->snoop_table); 3130 } 3131 3132 /* 3133 * Open the port 3134 * Create the QP, PD, MR, and CQ if needed 3135 */ 3136 static int ib_mad_port_open(struct ib_device *device, 3137 int port_num) 3138 { 3139 int ret, cq_size; 3140 struct ib_mad_port_private *port_priv; 3141 unsigned long flags; 3142 char name[sizeof "ib_mad123"]; 3143 int has_smi; 3144 3145 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) 3146 return -EFAULT; 3147 3148 if (WARN_ON(rdma_cap_opa_mad(device, port_num) && 3149 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) 3150 return -EFAULT; 3151 3152 /* Create new device info */ 3153 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 3154 if (!port_priv) 3155 return -ENOMEM; 3156 3157 port_priv->device = device; 3158 port_priv->port_num = port_num; 3159 spin_lock_init(&port_priv->reg_lock); 3160 INIT_LIST_HEAD(&port_priv->agent_list); 3161 init_mad_qp(port_priv, &port_priv->qp_info[0]); 3162 init_mad_qp(port_priv, &port_priv->qp_info[1]); 3163 3164 cq_size = mad_sendq_size + mad_recvq_size; 3165 has_smi = rdma_cap_ib_smi(device, port_num); 3166 if (has_smi) 3167 cq_size *= 2; 3168 3169 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, 3170 IB_POLL_WORKQUEUE); 3171 if (IS_ERR(port_priv->cq)) { 3172 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); 3173 ret = PTR_ERR(port_priv->cq); 3174 goto error3; 3175 } 3176 3177 port_priv->pd = ib_alloc_pd(device, 0); 3178 if (IS_ERR(port_priv->pd)) { 3179 dev_err(&device->dev, "Couldn't create ib_mad PD\n"); 3180 ret = PTR_ERR(port_priv->pd); 3181 goto error4; 3182 } 3183 3184 if (has_smi) { 3185 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); 3186 if (ret) 3187 goto error6; 3188 } 3189 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); 3190 if (ret) 3191 goto error7; 3192 3193 snprintf(name, sizeof name, "ib_mad%d", port_num); 3194 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 3195 if (!port_priv->wq) { 3196 ret = -ENOMEM; 3197 goto error8; 3198 } 3199 3200 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3201 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 3202 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3203 3204 ret = ib_mad_port_start(port_priv); 3205 if (ret) { 3206 dev_err(&device->dev, "Couldn't start port\n"); 3207 goto error9; 3208 } 3209 3210 return 0; 3211 3212 error9: 3213 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3214 list_del_init(&port_priv->port_list); 3215 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3216 3217 destroy_workqueue(port_priv->wq); 3218 error8: 3219 destroy_mad_qp(&port_priv->qp_info[1]); 3220 error7: 3221 destroy_mad_qp(&port_priv->qp_info[0]); 3222 error6: 3223 ib_dealloc_pd(port_priv->pd); 3224 error4: 3225 ib_free_cq(port_priv->cq); 3226 cleanup_recv_queue(&port_priv->qp_info[1]); 3227 cleanup_recv_queue(&port_priv->qp_info[0]); 3228 error3: 3229 kfree(port_priv); 3230 3231 return ret; 3232 } 3233 3234 /* 3235 * Close the port 3236 * If there are no classes using the port, free the port 3237 * resources (CQ, MR, PD, QP) and remove the port's info structure 3238 */ 3239 static int ib_mad_port_close(struct ib_device *device, int port_num) 3240 { 3241 struct ib_mad_port_private *port_priv; 3242 unsigned long flags; 3243 3244 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3245 port_priv = __ib_get_mad_port(device, port_num); 3246 if (port_priv == NULL) { 3247 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3248 dev_err(&device->dev, "Port %d not found\n", port_num); 3249 return -ENODEV; 3250 } 3251 list_del_init(&port_priv->port_list); 3252 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3253 3254 destroy_workqueue(port_priv->wq); 3255 destroy_mad_qp(&port_priv->qp_info[1]); 3256 destroy_mad_qp(&port_priv->qp_info[0]); 3257 ib_dealloc_pd(port_priv->pd); 3258 ib_free_cq(port_priv->cq); 3259 cleanup_recv_queue(&port_priv->qp_info[1]); 3260 cleanup_recv_queue(&port_priv->qp_info[0]); 3261 /* XXX: Handle deallocation of MAD registration tables */ 3262 3263 kfree(port_priv); 3264 3265 return 0; 3266 } 3267 3268 static void ib_mad_init_device(struct ib_device *device) 3269 { 3270 int start, i; 3271 3272 start = rdma_start_port(device); 3273 3274 for (i = start; i <= rdma_end_port(device); i++) { 3275 if (!rdma_cap_ib_mad(device, i)) 3276 continue; 3277 3278 if (ib_mad_port_open(device, i)) { 3279 dev_err(&device->dev, "Couldn't open port %d\n", i); 3280 goto error; 3281 } 3282 if (ib_agent_port_open(device, i)) { 3283 dev_err(&device->dev, 3284 "Couldn't open port %d for agents\n", i); 3285 goto error_agent; 3286 } 3287 } 3288 return; 3289 3290 error_agent: 3291 if (ib_mad_port_close(device, i)) 3292 dev_err(&device->dev, "Couldn't close port %d\n", i); 3293 3294 error: 3295 while (--i >= start) { 3296 if (!rdma_cap_ib_mad(device, i)) 3297 continue; 3298 3299 if (ib_agent_port_close(device, i)) 3300 dev_err(&device->dev, 3301 "Couldn't close port %d for agents\n", i); 3302 if (ib_mad_port_close(device, i)) 3303 dev_err(&device->dev, "Couldn't close port %d\n", i); 3304 } 3305 } 3306 3307 static void ib_mad_remove_device(struct ib_device *device, void *client_data) 3308 { 3309 int i; 3310 3311 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 3312 if (!rdma_cap_ib_mad(device, i)) 3313 continue; 3314 3315 if (ib_agent_port_close(device, i)) 3316 dev_err(&device->dev, 3317 "Couldn't close port %d for agents\n", i); 3318 if (ib_mad_port_close(device, i)) 3319 dev_err(&device->dev, "Couldn't close port %d\n", i); 3320 } 3321 } 3322 3323 static struct ib_client mad_client = { 3324 .name = "mad", 3325 .add = ib_mad_init_device, 3326 .remove = ib_mad_remove_device 3327 }; 3328 3329 int ib_mad_init(void) 3330 { 3331 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); 3332 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); 3333 3334 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); 3335 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); 3336 3337 INIT_LIST_HEAD(&ib_mad_port_list); 3338 3339 if (ib_register_client(&mad_client)) { 3340 pr_err("Couldn't register ib_mad client\n"); 3341 return -EINVAL; 3342 } 3343 3344 return 0; 3345 } 3346 3347 void ib_mad_cleanup(void) 3348 { 3349 ib_unregister_client(&mad_client); 3350 } 3351