1 /* 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2009 HNR Consulting. All rights reserved. 6 * Copyright (c) 2014 Intel Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/module.h> 43 #include <linux/security.h> 44 #include <rdma/ib_cache.h> 45 46 #include "mad_priv.h" 47 #include "core_priv.h" 48 #include "mad_rmpp.h" 49 #include "smi.h" 50 #include "opa_smi.h" 51 #include "agent.h" 52 53 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 54 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 55 56 module_param_named(send_queue_size, mad_sendq_size, int, 0444); 57 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); 58 module_param_named(recv_queue_size, mad_recvq_size, int, 0444); 59 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); 60 61 static struct list_head ib_mad_port_list; 62 static atomic_t ib_mad_client_id = ATOMIC_INIT(0); 63 64 /* Port list lock */ 65 static DEFINE_SPINLOCK(ib_mad_port_list_lock); 66 67 /* Forward declarations */ 68 static int method_in_use(struct ib_mad_mgmt_method_table **method, 69 struct ib_mad_reg_req *mad_reg_req); 70 static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 71 static struct ib_mad_agent_private *find_mad_agent( 72 struct ib_mad_port_private *port_priv, 73 const struct ib_mad_hdr *mad); 74 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 75 struct ib_mad_private *mad); 76 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 77 static void timeout_sends(struct work_struct *work); 78 static void local_completions(struct work_struct *work); 79 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 80 struct ib_mad_agent_private *agent_priv, 81 u8 mgmt_class); 82 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 83 struct ib_mad_agent_private *agent_priv); 84 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 85 struct ib_wc *wc); 86 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); 87 88 /* 89 * Returns a ib_mad_port_private structure or NULL for a device/port 90 * Assumes ib_mad_port_list_lock is being held 91 */ 92 static inline struct ib_mad_port_private * 93 __ib_get_mad_port(struct ib_device *device, int port_num) 94 { 95 struct ib_mad_port_private *entry; 96 97 list_for_each_entry(entry, &ib_mad_port_list, port_list) { 98 if (entry->device == device && entry->port_num == port_num) 99 return entry; 100 } 101 return NULL; 102 } 103 104 /* 105 * Wrapper function to return a ib_mad_port_private structure or NULL 106 * for a device/port 107 */ 108 static inline struct ib_mad_port_private * 109 ib_get_mad_port(struct ib_device *device, int port_num) 110 { 111 struct ib_mad_port_private *entry; 112 unsigned long flags; 113 114 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 115 entry = __ib_get_mad_port(device, port_num); 116 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 117 118 return entry; 119 } 120 121 static inline u8 convert_mgmt_class(u8 mgmt_class) 122 { 123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ 124 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 125 0 : mgmt_class; 126 } 127 128 static int get_spl_qp_index(enum ib_qp_type qp_type) 129 { 130 switch (qp_type) 131 { 132 case IB_QPT_SMI: 133 return 0; 134 case IB_QPT_GSI: 135 return 1; 136 default: 137 return -1; 138 } 139 } 140 141 static int vendor_class_index(u8 mgmt_class) 142 { 143 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; 144 } 145 146 static int is_vendor_class(u8 mgmt_class) 147 { 148 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || 149 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) 150 return 0; 151 return 1; 152 } 153 154 static int is_vendor_oui(char *oui) 155 { 156 if (oui[0] || oui[1] || oui[2]) 157 return 1; 158 return 0; 159 } 160 161 static int is_vendor_method_in_use( 162 struct ib_mad_mgmt_vendor_class *vendor_class, 163 struct ib_mad_reg_req *mad_reg_req) 164 { 165 struct ib_mad_mgmt_method_table *method; 166 int i; 167 168 for (i = 0; i < MAX_MGMT_OUI; i++) { 169 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { 170 method = vendor_class->method_table[i]; 171 if (method) { 172 if (method_in_use(&method, mad_reg_req)) 173 return 1; 174 else 175 break; 176 } 177 } 178 } 179 return 0; 180 } 181 182 int ib_response_mad(const struct ib_mad_hdr *hdr) 183 { 184 return ((hdr->method & IB_MGMT_METHOD_RESP) || 185 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || 186 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && 187 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); 188 } 189 EXPORT_SYMBOL(ib_response_mad); 190 191 /* 192 * ib_register_mad_agent - Register to send/receive MADs 193 */ 194 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 195 u8 port_num, 196 enum ib_qp_type qp_type, 197 struct ib_mad_reg_req *mad_reg_req, 198 u8 rmpp_version, 199 ib_mad_send_handler send_handler, 200 ib_mad_recv_handler recv_handler, 201 void *context, 202 u32 registration_flags) 203 { 204 struct ib_mad_port_private *port_priv; 205 struct ib_mad_agent *ret = ERR_PTR(-EINVAL); 206 struct ib_mad_agent_private *mad_agent_priv; 207 struct ib_mad_reg_req *reg_req = NULL; 208 struct ib_mad_mgmt_class_table *class; 209 struct ib_mad_mgmt_vendor_class_table *vendor; 210 struct ib_mad_mgmt_vendor_class *vendor_class; 211 struct ib_mad_mgmt_method_table *method; 212 int ret2, qpn; 213 unsigned long flags; 214 u8 mgmt_class, vclass; 215 216 /* Validate parameters */ 217 qpn = get_spl_qp_index(qp_type); 218 if (qpn == -1) { 219 dev_notice(&device->dev, 220 "ib_register_mad_agent: invalid QP Type %d\n", 221 qp_type); 222 goto error1; 223 } 224 225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { 226 dev_notice(&device->dev, 227 "ib_register_mad_agent: invalid RMPP Version %u\n", 228 rmpp_version); 229 goto error1; 230 } 231 232 /* Validate MAD registration request if supplied */ 233 if (mad_reg_req) { 234 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { 235 dev_notice(&device->dev, 236 "ib_register_mad_agent: invalid Class Version %u\n", 237 mad_reg_req->mgmt_class_version); 238 goto error1; 239 } 240 if (!recv_handler) { 241 dev_notice(&device->dev, 242 "ib_register_mad_agent: no recv_handler\n"); 243 goto error1; 244 } 245 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { 246 /* 247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only 248 * one in this range currently allowed 249 */ 250 if (mad_reg_req->mgmt_class != 251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 252 dev_notice(&device->dev, 253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", 254 mad_reg_req->mgmt_class); 255 goto error1; 256 } 257 } else if (mad_reg_req->mgmt_class == 0) { 258 /* 259 * Class 0 is reserved in IBA and is used for 260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 261 */ 262 dev_notice(&device->dev, 263 "ib_register_mad_agent: Invalid Mgmt Class 0\n"); 264 goto error1; 265 } else if (is_vendor_class(mad_reg_req->mgmt_class)) { 266 /* 267 * If class is in "new" vendor range, 268 * ensure supplied OUI is not zero 269 */ 270 if (!is_vendor_oui(mad_reg_req->oui)) { 271 dev_notice(&device->dev, 272 "ib_register_mad_agent: No OUI specified for class 0x%x\n", 273 mad_reg_req->mgmt_class); 274 goto error1; 275 } 276 } 277 /* Make sure class supplied is consistent with RMPP */ 278 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 279 if (rmpp_version) { 280 dev_notice(&device->dev, 281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", 282 mad_reg_req->mgmt_class); 283 goto error1; 284 } 285 } 286 287 /* Make sure class supplied is consistent with QP type */ 288 if (qp_type == IB_QPT_SMI) { 289 if ((mad_reg_req->mgmt_class != 290 IB_MGMT_CLASS_SUBN_LID_ROUTED) && 291 (mad_reg_req->mgmt_class != 292 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 293 dev_notice(&device->dev, 294 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", 295 mad_reg_req->mgmt_class); 296 goto error1; 297 } 298 } else { 299 if ((mad_reg_req->mgmt_class == 300 IB_MGMT_CLASS_SUBN_LID_ROUTED) || 301 (mad_reg_req->mgmt_class == 302 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 303 dev_notice(&device->dev, 304 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", 305 mad_reg_req->mgmt_class); 306 goto error1; 307 } 308 } 309 } else { 310 /* No registration request supplied */ 311 if (!send_handler) 312 goto error1; 313 if (registration_flags & IB_MAD_USER_RMPP) 314 goto error1; 315 } 316 317 /* Validate device and port */ 318 port_priv = ib_get_mad_port(device, port_num); 319 if (!port_priv) { 320 dev_notice(&device->dev, 321 "ib_register_mad_agent: Invalid port %d\n", 322 port_num); 323 ret = ERR_PTR(-ENODEV); 324 goto error1; 325 } 326 327 /* Verify the QP requested is supported. For example, Ethernet devices 328 * will not have QP0 */ 329 if (!port_priv->qp_info[qpn].qp) { 330 dev_notice(&device->dev, 331 "ib_register_mad_agent: QP %d not supported\n", qpn); 332 ret = ERR_PTR(-EPROTONOSUPPORT); 333 goto error1; 334 } 335 336 /* Allocate structures */ 337 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); 338 if (!mad_agent_priv) { 339 ret = ERR_PTR(-ENOMEM); 340 goto error1; 341 } 342 343 if (mad_reg_req) { 344 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); 345 if (!reg_req) { 346 ret = ERR_PTR(-ENOMEM); 347 goto error3; 348 } 349 } 350 351 /* Now, fill in the various structures */ 352 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 353 mad_agent_priv->reg_req = reg_req; 354 mad_agent_priv->agent.rmpp_version = rmpp_version; 355 mad_agent_priv->agent.device = device; 356 mad_agent_priv->agent.recv_handler = recv_handler; 357 mad_agent_priv->agent.send_handler = send_handler; 358 mad_agent_priv->agent.context = context; 359 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 360 mad_agent_priv->agent.port_num = port_num; 361 mad_agent_priv->agent.flags = registration_flags; 362 spin_lock_init(&mad_agent_priv->lock); 363 INIT_LIST_HEAD(&mad_agent_priv->send_list); 364 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 365 INIT_LIST_HEAD(&mad_agent_priv->done_list); 366 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 367 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); 368 INIT_LIST_HEAD(&mad_agent_priv->local_list); 369 INIT_WORK(&mad_agent_priv->local_work, local_completions); 370 atomic_set(&mad_agent_priv->refcount, 1); 371 init_completion(&mad_agent_priv->comp); 372 373 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); 374 if (ret2) { 375 ret = ERR_PTR(ret2); 376 goto error4; 377 } 378 379 spin_lock_irqsave(&port_priv->reg_lock, flags); 380 mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id); 381 382 /* 383 * Make sure MAD registration (if supplied) 384 * is non overlapping with any existing ones 385 */ 386 if (mad_reg_req) { 387 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); 388 if (!is_vendor_class(mgmt_class)) { 389 class = port_priv->version[mad_reg_req-> 390 mgmt_class_version].class; 391 if (class) { 392 method = class->method_table[mgmt_class]; 393 if (method) { 394 if (method_in_use(&method, 395 mad_reg_req)) 396 goto error5; 397 } 398 } 399 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, 400 mgmt_class); 401 } else { 402 /* "New" vendor class range */ 403 vendor = port_priv->version[mad_reg_req-> 404 mgmt_class_version].vendor; 405 if (vendor) { 406 vclass = vendor_class_index(mgmt_class); 407 vendor_class = vendor->vendor_class[vclass]; 408 if (vendor_class) { 409 if (is_vendor_method_in_use( 410 vendor_class, 411 mad_reg_req)) 412 goto error5; 413 } 414 } 415 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 416 } 417 if (ret2) { 418 ret = ERR_PTR(ret2); 419 goto error5; 420 } 421 } 422 423 /* Add mad agent into port's agent list */ 424 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 425 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 426 427 return &mad_agent_priv->agent; 428 error5: 429 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 430 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 431 error4: 432 kfree(reg_req); 433 error3: 434 kfree(mad_agent_priv); 435 error1: 436 return ret; 437 } 438 EXPORT_SYMBOL(ib_register_mad_agent); 439 440 static inline int is_snooping_sends(int mad_snoop_flags) 441 { 442 return (mad_snoop_flags & 443 (/*IB_MAD_SNOOP_POSTED_SENDS | 444 IB_MAD_SNOOP_RMPP_SENDS |*/ 445 IB_MAD_SNOOP_SEND_COMPLETIONS /*| 446 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); 447 } 448 449 static inline int is_snooping_recvs(int mad_snoop_flags) 450 { 451 return (mad_snoop_flags & 452 (IB_MAD_SNOOP_RECVS /*| 453 IB_MAD_SNOOP_RMPP_RECVS*/)); 454 } 455 456 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, 457 struct ib_mad_snoop_private *mad_snoop_priv) 458 { 459 struct ib_mad_snoop_private **new_snoop_table; 460 unsigned long flags; 461 int i; 462 463 spin_lock_irqsave(&qp_info->snoop_lock, flags); 464 /* Check for empty slot in array. */ 465 for (i = 0; i < qp_info->snoop_table_size; i++) 466 if (!qp_info->snoop_table[i]) 467 break; 468 469 if (i == qp_info->snoop_table_size) { 470 /* Grow table. */ 471 new_snoop_table = krealloc(qp_info->snoop_table, 472 sizeof mad_snoop_priv * 473 (qp_info->snoop_table_size + 1), 474 GFP_ATOMIC); 475 if (!new_snoop_table) { 476 i = -ENOMEM; 477 goto out; 478 } 479 480 qp_info->snoop_table = new_snoop_table; 481 qp_info->snoop_table_size++; 482 } 483 qp_info->snoop_table[i] = mad_snoop_priv; 484 atomic_inc(&qp_info->snoop_count); 485 out: 486 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 487 return i; 488 } 489 490 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, 491 u8 port_num, 492 enum ib_qp_type qp_type, 493 int mad_snoop_flags, 494 ib_mad_snoop_handler snoop_handler, 495 ib_mad_recv_handler recv_handler, 496 void *context) 497 { 498 struct ib_mad_port_private *port_priv; 499 struct ib_mad_agent *ret; 500 struct ib_mad_snoop_private *mad_snoop_priv; 501 int qpn; 502 int err; 503 504 /* Validate parameters */ 505 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || 506 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { 507 ret = ERR_PTR(-EINVAL); 508 goto error1; 509 } 510 qpn = get_spl_qp_index(qp_type); 511 if (qpn == -1) { 512 ret = ERR_PTR(-EINVAL); 513 goto error1; 514 } 515 port_priv = ib_get_mad_port(device, port_num); 516 if (!port_priv) { 517 ret = ERR_PTR(-ENODEV); 518 goto error1; 519 } 520 /* Allocate structures */ 521 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 522 if (!mad_snoop_priv) { 523 ret = ERR_PTR(-ENOMEM); 524 goto error1; 525 } 526 527 /* Now, fill in the various structures */ 528 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 529 mad_snoop_priv->agent.device = device; 530 mad_snoop_priv->agent.recv_handler = recv_handler; 531 mad_snoop_priv->agent.snoop_handler = snoop_handler; 532 mad_snoop_priv->agent.context = context; 533 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 534 mad_snoop_priv->agent.port_num = port_num; 535 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 536 init_completion(&mad_snoop_priv->comp); 537 538 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); 539 if (err) { 540 ret = ERR_PTR(err); 541 goto error2; 542 } 543 544 mad_snoop_priv->snoop_index = register_snoop_agent( 545 &port_priv->qp_info[qpn], 546 mad_snoop_priv); 547 if (mad_snoop_priv->snoop_index < 0) { 548 ret = ERR_PTR(mad_snoop_priv->snoop_index); 549 goto error3; 550 } 551 552 atomic_set(&mad_snoop_priv->refcount, 1); 553 return &mad_snoop_priv->agent; 554 error3: 555 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); 556 error2: 557 kfree(mad_snoop_priv); 558 error1: 559 return ret; 560 } 561 EXPORT_SYMBOL(ib_register_mad_snoop); 562 563 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 564 { 565 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 566 complete(&mad_agent_priv->comp); 567 } 568 569 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) 570 { 571 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 572 complete(&mad_snoop_priv->comp); 573 } 574 575 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 576 { 577 struct ib_mad_port_private *port_priv; 578 unsigned long flags; 579 580 /* Note that we could still be handling received MADs */ 581 582 /* 583 * Canceling all sends results in dropping received response 584 * MADs, preventing us from queuing additional work 585 */ 586 cancel_mads(mad_agent_priv); 587 port_priv = mad_agent_priv->qp_info->port_priv; 588 cancel_delayed_work(&mad_agent_priv->timed_work); 589 590 spin_lock_irqsave(&port_priv->reg_lock, flags); 591 remove_mad_reg_req(mad_agent_priv); 592 list_del(&mad_agent_priv->agent_list); 593 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 594 595 flush_workqueue(port_priv->wq); 596 ib_cancel_rmpp_recvs(mad_agent_priv); 597 598 deref_mad_agent(mad_agent_priv); 599 wait_for_completion(&mad_agent_priv->comp); 600 601 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 602 603 kfree(mad_agent_priv->reg_req); 604 kfree(mad_agent_priv); 605 } 606 607 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) 608 { 609 struct ib_mad_qp_info *qp_info; 610 unsigned long flags; 611 612 qp_info = mad_snoop_priv->qp_info; 613 spin_lock_irqsave(&qp_info->snoop_lock, flags); 614 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; 615 atomic_dec(&qp_info->snoop_count); 616 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 617 618 deref_snoop_agent(mad_snoop_priv); 619 wait_for_completion(&mad_snoop_priv->comp); 620 621 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); 622 623 kfree(mad_snoop_priv); 624 } 625 626 /* 627 * ib_unregister_mad_agent - Unregisters a client from using MAD services 628 */ 629 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) 630 { 631 struct ib_mad_agent_private *mad_agent_priv; 632 struct ib_mad_snoop_private *mad_snoop_priv; 633 634 /* If the TID is zero, the agent can only snoop. */ 635 if (mad_agent->hi_tid) { 636 mad_agent_priv = container_of(mad_agent, 637 struct ib_mad_agent_private, 638 agent); 639 unregister_mad_agent(mad_agent_priv); 640 } else { 641 mad_snoop_priv = container_of(mad_agent, 642 struct ib_mad_snoop_private, 643 agent); 644 unregister_mad_snoop(mad_snoop_priv); 645 } 646 } 647 EXPORT_SYMBOL(ib_unregister_mad_agent); 648 649 static void dequeue_mad(struct ib_mad_list_head *mad_list) 650 { 651 struct ib_mad_queue *mad_queue; 652 unsigned long flags; 653 654 mad_queue = mad_list->mad_queue; 655 spin_lock_irqsave(&mad_queue->lock, flags); 656 list_del(&mad_list->list); 657 mad_queue->count--; 658 spin_unlock_irqrestore(&mad_queue->lock, flags); 659 } 660 661 static void snoop_send(struct ib_mad_qp_info *qp_info, 662 struct ib_mad_send_buf *send_buf, 663 struct ib_mad_send_wc *mad_send_wc, 664 int mad_snoop_flags) 665 { 666 struct ib_mad_snoop_private *mad_snoop_priv; 667 unsigned long flags; 668 int i; 669 670 spin_lock_irqsave(&qp_info->snoop_lock, flags); 671 for (i = 0; i < qp_info->snoop_table_size; i++) { 672 mad_snoop_priv = qp_info->snoop_table[i]; 673 if (!mad_snoop_priv || 674 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 675 continue; 676 677 atomic_inc(&mad_snoop_priv->refcount); 678 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 679 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 680 send_buf, mad_send_wc); 681 deref_snoop_agent(mad_snoop_priv); 682 spin_lock_irqsave(&qp_info->snoop_lock, flags); 683 } 684 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 685 } 686 687 static void snoop_recv(struct ib_mad_qp_info *qp_info, 688 struct ib_mad_recv_wc *mad_recv_wc, 689 int mad_snoop_flags) 690 { 691 struct ib_mad_snoop_private *mad_snoop_priv; 692 unsigned long flags; 693 int i; 694 695 spin_lock_irqsave(&qp_info->snoop_lock, flags); 696 for (i = 0; i < qp_info->snoop_table_size; i++) { 697 mad_snoop_priv = qp_info->snoop_table[i]; 698 if (!mad_snoop_priv || 699 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 700 continue; 701 702 atomic_inc(&mad_snoop_priv->refcount); 703 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 704 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, 705 mad_recv_wc); 706 deref_snoop_agent(mad_snoop_priv); 707 spin_lock_irqsave(&qp_info->snoop_lock, flags); 708 } 709 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 710 } 711 712 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, 713 u16 pkey_index, u8 port_num, struct ib_wc *wc) 714 { 715 memset(wc, 0, sizeof *wc); 716 wc->wr_cqe = cqe; 717 wc->status = IB_WC_SUCCESS; 718 wc->opcode = IB_WC_RECV; 719 wc->pkey_index = pkey_index; 720 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 721 wc->src_qp = IB_QP0; 722 wc->qp = qp; 723 wc->slid = slid; 724 wc->sl = 0; 725 wc->dlid_path_bits = 0; 726 wc->port_num = port_num; 727 } 728 729 static size_t mad_priv_size(const struct ib_mad_private *mp) 730 { 731 return sizeof(struct ib_mad_private) + mp->mad_size; 732 } 733 734 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) 735 { 736 size_t size = sizeof(struct ib_mad_private) + mad_size; 737 struct ib_mad_private *ret = kzalloc(size, flags); 738 739 if (ret) 740 ret->mad_size = mad_size; 741 742 return ret; 743 } 744 745 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) 746 { 747 return rdma_max_mad_size(port_priv->device, port_priv->port_num); 748 } 749 750 static size_t mad_priv_dma_size(const struct ib_mad_private *mp) 751 { 752 return sizeof(struct ib_grh) + mp->mad_size; 753 } 754 755 /* 756 * Return 0 if SMP is to be sent 757 * Return 1 if SMP was consumed locally (whether or not solicited) 758 * Return < 0 if error 759 */ 760 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 761 struct ib_mad_send_wr_private *mad_send_wr) 762 { 763 int ret = 0; 764 struct ib_smp *smp = mad_send_wr->send_buf.mad; 765 struct opa_smp *opa_smp = (struct opa_smp *)smp; 766 unsigned long flags; 767 struct ib_mad_local_private *local; 768 struct ib_mad_private *mad_priv; 769 struct ib_mad_port_private *port_priv; 770 struct ib_mad_agent_private *recv_mad_agent = NULL; 771 struct ib_device *device = mad_agent_priv->agent.device; 772 u8 port_num; 773 struct ib_wc mad_wc; 774 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; 775 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); 776 u16 out_mad_pkey_index = 0; 777 u16 drslid; 778 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 779 mad_agent_priv->qp_info->port_priv->port_num); 780 781 if (rdma_cap_ib_switch(device) && 782 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 783 port_num = send_wr->port_num; 784 else 785 port_num = mad_agent_priv->agent.port_num; 786 787 /* 788 * Directed route handling starts if the initial LID routed part of 789 * a request or the ending LID routed part of a response is empty. 790 * If we are at the start of the LID routed part, don't update the 791 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. 792 */ 793 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { 794 u32 opa_drslid; 795 796 if ((opa_get_smp_direction(opa_smp) 797 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 798 OPA_LID_PERMISSIVE && 799 opa_smi_handle_dr_smp_send(opa_smp, 800 rdma_cap_ib_switch(device), 801 port_num) == IB_SMI_DISCARD) { 802 ret = -EINVAL; 803 dev_err(&device->dev, "OPA Invalid directed route\n"); 804 goto out; 805 } 806 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 807 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && 808 opa_drslid & 0xffff0000) { 809 ret = -EINVAL; 810 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 811 opa_drslid); 812 goto out; 813 } 814 drslid = (u16)(opa_drslid & 0x0000ffff); 815 816 /* Check to post send on QP or process locally */ 817 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && 818 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) 819 goto out; 820 } else { 821 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 822 IB_LID_PERMISSIVE && 823 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == 824 IB_SMI_DISCARD) { 825 ret = -EINVAL; 826 dev_err(&device->dev, "Invalid directed route\n"); 827 goto out; 828 } 829 drslid = be16_to_cpu(smp->dr_slid); 830 831 /* Check to post send on QP or process locally */ 832 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && 833 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) 834 goto out; 835 } 836 837 local = kmalloc(sizeof *local, GFP_ATOMIC); 838 if (!local) { 839 ret = -ENOMEM; 840 goto out; 841 } 842 local->mad_priv = NULL; 843 local->recv_mad_agent = NULL; 844 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); 845 if (!mad_priv) { 846 ret = -ENOMEM; 847 kfree(local); 848 goto out; 849 } 850 851 build_smp_wc(mad_agent_priv->agent.qp, 852 send_wr->wr.wr_cqe, drslid, 853 send_wr->pkey_index, 854 send_wr->port_num, &mad_wc); 855 856 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { 857 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len 858 + mad_send_wr->send_buf.data_len 859 + sizeof(struct ib_grh); 860 } 861 862 /* No GRH for DR SMP */ 863 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, 864 (const struct ib_mad_hdr *)smp, mad_size, 865 (struct ib_mad_hdr *)mad_priv->mad, 866 &mad_size, &out_mad_pkey_index); 867 switch (ret) 868 { 869 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 870 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && 871 mad_agent_priv->agent.recv_handler) { 872 local->mad_priv = mad_priv; 873 local->recv_mad_agent = mad_agent_priv; 874 /* 875 * Reference MAD agent until receive 876 * side of local completion handled 877 */ 878 atomic_inc(&mad_agent_priv->refcount); 879 } else 880 kfree(mad_priv); 881 break; 882 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 883 kfree(mad_priv); 884 break; 885 case IB_MAD_RESULT_SUCCESS: 886 /* Treat like an incoming receive MAD */ 887 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 888 mad_agent_priv->agent.port_num); 889 if (port_priv) { 890 memcpy(mad_priv->mad, smp, mad_priv->mad_size); 891 recv_mad_agent = find_mad_agent(port_priv, 892 (const struct ib_mad_hdr *)mad_priv->mad); 893 } 894 if (!port_priv || !recv_mad_agent) { 895 /* 896 * No receiving agent so drop packet and 897 * generate send completion. 898 */ 899 kfree(mad_priv); 900 break; 901 } 902 local->mad_priv = mad_priv; 903 local->recv_mad_agent = recv_mad_agent; 904 break; 905 default: 906 kfree(mad_priv); 907 kfree(local); 908 ret = -EINVAL; 909 goto out; 910 } 911 912 local->mad_send_wr = mad_send_wr; 913 if (opa) { 914 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; 915 local->return_wc_byte_len = mad_size; 916 } 917 /* Reference MAD agent until send side of local completion handled */ 918 atomic_inc(&mad_agent_priv->refcount); 919 /* Queue local completion to local list */ 920 spin_lock_irqsave(&mad_agent_priv->lock, flags); 921 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); 922 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 923 queue_work(mad_agent_priv->qp_info->port_priv->wq, 924 &mad_agent_priv->local_work); 925 ret = 1; 926 out: 927 return ret; 928 } 929 930 static int get_pad_size(int hdr_len, int data_len, size_t mad_size) 931 { 932 int seg_size, pad; 933 934 seg_size = mad_size - hdr_len; 935 if (data_len && seg_size) { 936 pad = seg_size - data_len % seg_size; 937 return pad == seg_size ? 0 : pad; 938 } else 939 return seg_size; 940 } 941 942 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) 943 { 944 struct ib_rmpp_segment *s, *t; 945 946 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { 947 list_del(&s->list); 948 kfree(s); 949 } 950 } 951 952 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, 953 size_t mad_size, gfp_t gfp_mask) 954 { 955 struct ib_mad_send_buf *send_buf = &send_wr->send_buf; 956 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; 957 struct ib_rmpp_segment *seg = NULL; 958 int left, seg_size, pad; 959 960 send_buf->seg_size = mad_size - send_buf->hdr_len; 961 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; 962 seg_size = send_buf->seg_size; 963 pad = send_wr->pad; 964 965 /* Allocate data segments. */ 966 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { 967 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); 968 if (!seg) { 969 free_send_rmpp_list(send_wr); 970 return -ENOMEM; 971 } 972 seg->num = ++send_buf->seg_count; 973 list_add_tail(&seg->list, &send_wr->rmpp_list); 974 } 975 976 /* Zero any padding */ 977 if (pad) 978 memset(seg->data + seg_size - pad, 0, pad); 979 980 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> 981 agent.rmpp_version; 982 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 983 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 984 985 send_wr->cur_seg = container_of(send_wr->rmpp_list.next, 986 struct ib_rmpp_segment, list); 987 send_wr->last_ack_seg = send_wr->cur_seg; 988 return 0; 989 } 990 991 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) 992 { 993 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); 994 } 995 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); 996 997 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 998 u32 remote_qpn, u16 pkey_index, 999 int rmpp_active, 1000 int hdr_len, int data_len, 1001 gfp_t gfp_mask, 1002 u8 base_version) 1003 { 1004 struct ib_mad_agent_private *mad_agent_priv; 1005 struct ib_mad_send_wr_private *mad_send_wr; 1006 int pad, message_size, ret, size; 1007 void *buf; 1008 size_t mad_size; 1009 bool opa; 1010 1011 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 1012 agent); 1013 1014 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); 1015 1016 if (opa && base_version == OPA_MGMT_BASE_VERSION) 1017 mad_size = sizeof(struct opa_mad); 1018 else 1019 mad_size = sizeof(struct ib_mad); 1020 1021 pad = get_pad_size(hdr_len, data_len, mad_size); 1022 message_size = hdr_len + data_len + pad; 1023 1024 if (ib_mad_kernel_rmpp_agent(mad_agent)) { 1025 if (!rmpp_active && message_size > mad_size) 1026 return ERR_PTR(-EINVAL); 1027 } else 1028 if (rmpp_active || message_size > mad_size) 1029 return ERR_PTR(-EINVAL); 1030 1031 size = rmpp_active ? hdr_len : mad_size; 1032 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); 1033 if (!buf) 1034 return ERR_PTR(-ENOMEM); 1035 1036 mad_send_wr = buf + size; 1037 INIT_LIST_HEAD(&mad_send_wr->rmpp_list); 1038 mad_send_wr->send_buf.mad = buf; 1039 mad_send_wr->send_buf.hdr_len = hdr_len; 1040 mad_send_wr->send_buf.data_len = data_len; 1041 mad_send_wr->pad = pad; 1042 1043 mad_send_wr->mad_agent_priv = mad_agent_priv; 1044 mad_send_wr->sg_list[0].length = hdr_len; 1045 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; 1046 1047 /* OPA MADs don't have to be the full 2048 bytes */ 1048 if (opa && base_version == OPA_MGMT_BASE_VERSION && 1049 data_len < mad_size - hdr_len) 1050 mad_send_wr->sg_list[1].length = data_len; 1051 else 1052 mad_send_wr->sg_list[1].length = mad_size - hdr_len; 1053 1054 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; 1055 1056 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1057 1058 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1059 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; 1060 mad_send_wr->send_wr.wr.num_sge = 2; 1061 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; 1062 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; 1063 mad_send_wr->send_wr.remote_qpn = remote_qpn; 1064 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; 1065 mad_send_wr->send_wr.pkey_index = pkey_index; 1066 1067 if (rmpp_active) { 1068 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); 1069 if (ret) { 1070 kfree(buf); 1071 return ERR_PTR(ret); 1072 } 1073 } 1074 1075 mad_send_wr->send_buf.mad_agent = mad_agent; 1076 atomic_inc(&mad_agent_priv->refcount); 1077 return &mad_send_wr->send_buf; 1078 } 1079 EXPORT_SYMBOL(ib_create_send_mad); 1080 1081 int ib_get_mad_data_offset(u8 mgmt_class) 1082 { 1083 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 1084 return IB_MGMT_SA_HDR; 1085 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1086 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1087 (mgmt_class == IB_MGMT_CLASS_BIS)) 1088 return IB_MGMT_DEVICE_HDR; 1089 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1090 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 1091 return IB_MGMT_VENDOR_HDR; 1092 else 1093 return IB_MGMT_MAD_HDR; 1094 } 1095 EXPORT_SYMBOL(ib_get_mad_data_offset); 1096 1097 int ib_is_mad_class_rmpp(u8 mgmt_class) 1098 { 1099 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 1100 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1101 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1102 (mgmt_class == IB_MGMT_CLASS_BIS) || 1103 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1104 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 1105 return 1; 1106 return 0; 1107 } 1108 EXPORT_SYMBOL(ib_is_mad_class_rmpp); 1109 1110 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 1111 { 1112 struct ib_mad_send_wr_private *mad_send_wr; 1113 struct list_head *list; 1114 1115 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1116 send_buf); 1117 list = &mad_send_wr->cur_seg->list; 1118 1119 if (mad_send_wr->cur_seg->num < seg_num) { 1120 list_for_each_entry(mad_send_wr->cur_seg, list, list) 1121 if (mad_send_wr->cur_seg->num == seg_num) 1122 break; 1123 } else if (mad_send_wr->cur_seg->num > seg_num) { 1124 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) 1125 if (mad_send_wr->cur_seg->num == seg_num) 1126 break; 1127 } 1128 return mad_send_wr->cur_seg->data; 1129 } 1130 EXPORT_SYMBOL(ib_get_rmpp_segment); 1131 1132 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) 1133 { 1134 if (mad_send_wr->send_buf.seg_count) 1135 return ib_get_rmpp_segment(&mad_send_wr->send_buf, 1136 mad_send_wr->seg_num); 1137 else 1138 return mad_send_wr->send_buf.mad + 1139 mad_send_wr->send_buf.hdr_len; 1140 } 1141 1142 void ib_free_send_mad(struct ib_mad_send_buf *send_buf) 1143 { 1144 struct ib_mad_agent_private *mad_agent_priv; 1145 struct ib_mad_send_wr_private *mad_send_wr; 1146 1147 mad_agent_priv = container_of(send_buf->mad_agent, 1148 struct ib_mad_agent_private, agent); 1149 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1150 send_buf); 1151 1152 free_send_rmpp_list(mad_send_wr); 1153 kfree(send_buf->mad); 1154 deref_mad_agent(mad_agent_priv); 1155 } 1156 EXPORT_SYMBOL(ib_free_send_mad); 1157 1158 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 1159 { 1160 struct ib_mad_qp_info *qp_info; 1161 struct list_head *list; 1162 struct ib_send_wr *bad_send_wr; 1163 struct ib_mad_agent *mad_agent; 1164 struct ib_sge *sge; 1165 unsigned long flags; 1166 int ret; 1167 1168 /* Set WR ID to find mad_send_wr upon completion */ 1169 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1170 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1171 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1172 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1173 1174 mad_agent = mad_send_wr->send_buf.mad_agent; 1175 sge = mad_send_wr->sg_list; 1176 sge[0].addr = ib_dma_map_single(mad_agent->device, 1177 mad_send_wr->send_buf.mad, 1178 sge[0].length, 1179 DMA_TO_DEVICE); 1180 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) 1181 return -ENOMEM; 1182 1183 mad_send_wr->header_mapping = sge[0].addr; 1184 1185 sge[1].addr = ib_dma_map_single(mad_agent->device, 1186 ib_get_payload(mad_send_wr), 1187 sge[1].length, 1188 DMA_TO_DEVICE); 1189 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { 1190 ib_dma_unmap_single(mad_agent->device, 1191 mad_send_wr->header_mapping, 1192 sge[0].length, DMA_TO_DEVICE); 1193 return -ENOMEM; 1194 } 1195 mad_send_wr->payload_mapping = sge[1].addr; 1196 1197 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1198 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1199 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, 1200 &bad_send_wr); 1201 list = &qp_info->send_queue.list; 1202 } else { 1203 ret = 0; 1204 list = &qp_info->overflow_list; 1205 } 1206 1207 if (!ret) { 1208 qp_info->send_queue.count++; 1209 list_add_tail(&mad_send_wr->mad_list.list, list); 1210 } 1211 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1212 if (ret) { 1213 ib_dma_unmap_single(mad_agent->device, 1214 mad_send_wr->header_mapping, 1215 sge[0].length, DMA_TO_DEVICE); 1216 ib_dma_unmap_single(mad_agent->device, 1217 mad_send_wr->payload_mapping, 1218 sge[1].length, DMA_TO_DEVICE); 1219 } 1220 return ret; 1221 } 1222 1223 /* 1224 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 1225 * with the registered client 1226 */ 1227 int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 1228 struct ib_mad_send_buf **bad_send_buf) 1229 { 1230 struct ib_mad_agent_private *mad_agent_priv; 1231 struct ib_mad_send_buf *next_send_buf; 1232 struct ib_mad_send_wr_private *mad_send_wr; 1233 unsigned long flags; 1234 int ret = -EINVAL; 1235 1236 /* Walk list of send WRs and post each on send list */ 1237 for (; send_buf; send_buf = next_send_buf) { 1238 mad_send_wr = container_of(send_buf, 1239 struct ib_mad_send_wr_private, 1240 send_buf); 1241 mad_agent_priv = mad_send_wr->mad_agent_priv; 1242 1243 ret = ib_mad_enforce_security(mad_agent_priv, 1244 mad_send_wr->send_wr.pkey_index); 1245 if (ret) 1246 goto error; 1247 1248 if (!send_buf->mad_agent->send_handler || 1249 (send_buf->timeout_ms && 1250 !send_buf->mad_agent->recv_handler)) { 1251 ret = -EINVAL; 1252 goto error; 1253 } 1254 1255 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1256 if (mad_agent_priv->agent.rmpp_version) { 1257 ret = -EINVAL; 1258 goto error; 1259 } 1260 } 1261 1262 /* 1263 * Save pointer to next work request to post in case the 1264 * current one completes, and the user modifies the work 1265 * request associated with the completion 1266 */ 1267 next_send_buf = send_buf->next; 1268 mad_send_wr->send_wr.ah = send_buf->ah; 1269 1270 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1271 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1272 ret = handle_outgoing_dr_smp(mad_agent_priv, 1273 mad_send_wr); 1274 if (ret < 0) /* error */ 1275 goto error; 1276 else if (ret == 1) /* locally consumed */ 1277 continue; 1278 } 1279 1280 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; 1281 /* Timeout will be updated after send completes */ 1282 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); 1283 mad_send_wr->max_retries = send_buf->retries; 1284 mad_send_wr->retries_left = send_buf->retries; 1285 send_buf->retries = 0; 1286 /* Reference for work request to QP + response */ 1287 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 1288 mad_send_wr->status = IB_WC_SUCCESS; 1289 1290 /* Reference MAD agent until send completes */ 1291 atomic_inc(&mad_agent_priv->refcount); 1292 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1293 list_add_tail(&mad_send_wr->agent_list, 1294 &mad_agent_priv->send_list); 1295 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1296 1297 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1298 ret = ib_send_rmpp_mad(mad_send_wr); 1299 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) 1300 ret = ib_send_mad(mad_send_wr); 1301 } else 1302 ret = ib_send_mad(mad_send_wr); 1303 if (ret < 0) { 1304 /* Fail send request */ 1305 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1306 list_del(&mad_send_wr->agent_list); 1307 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1308 atomic_dec(&mad_agent_priv->refcount); 1309 goto error; 1310 } 1311 } 1312 return 0; 1313 error: 1314 if (bad_send_buf) 1315 *bad_send_buf = send_buf; 1316 return ret; 1317 } 1318 EXPORT_SYMBOL(ib_post_send_mad); 1319 1320 /* 1321 * ib_free_recv_mad - Returns data buffers used to receive 1322 * a MAD to the access layer 1323 */ 1324 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1325 { 1326 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; 1327 struct ib_mad_private_header *mad_priv_hdr; 1328 struct ib_mad_private *priv; 1329 struct list_head free_list; 1330 1331 INIT_LIST_HEAD(&free_list); 1332 list_splice_init(&mad_recv_wc->rmpp_list, &free_list); 1333 1334 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, 1335 &free_list, list) { 1336 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, 1337 recv_buf); 1338 mad_priv_hdr = container_of(mad_recv_wc, 1339 struct ib_mad_private_header, 1340 recv_wc); 1341 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1342 header); 1343 kfree(priv); 1344 } 1345 } 1346 EXPORT_SYMBOL(ib_free_recv_mad); 1347 1348 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, 1349 u8 rmpp_version, 1350 ib_mad_send_handler send_handler, 1351 ib_mad_recv_handler recv_handler, 1352 void *context) 1353 { 1354 return ERR_PTR(-EINVAL); /* XXX: for now */ 1355 } 1356 EXPORT_SYMBOL(ib_redirect_mad_qp); 1357 1358 int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 1359 struct ib_wc *wc) 1360 { 1361 dev_err(&mad_agent->device->dev, 1362 "ib_process_mad_wc() not implemented yet\n"); 1363 return 0; 1364 } 1365 EXPORT_SYMBOL(ib_process_mad_wc); 1366 1367 static int method_in_use(struct ib_mad_mgmt_method_table **method, 1368 struct ib_mad_reg_req *mad_reg_req) 1369 { 1370 int i; 1371 1372 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { 1373 if ((*method)->agent[i]) { 1374 pr_err("Method %d already in use\n", i); 1375 return -EINVAL; 1376 } 1377 } 1378 return 0; 1379 } 1380 1381 static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1382 { 1383 /* Allocate management method table */ 1384 *method = kzalloc(sizeof **method, GFP_ATOMIC); 1385 return (*method) ? 0 : (-ENOMEM); 1386 } 1387 1388 /* 1389 * Check to see if there are any methods still in use 1390 */ 1391 static int check_method_table(struct ib_mad_mgmt_method_table *method) 1392 { 1393 int i; 1394 1395 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) 1396 if (method->agent[i]) 1397 return 1; 1398 return 0; 1399 } 1400 1401 /* 1402 * Check to see if there are any method tables for this class still in use 1403 */ 1404 static int check_class_table(struct ib_mad_mgmt_class_table *class) 1405 { 1406 int i; 1407 1408 for (i = 0; i < MAX_MGMT_CLASS; i++) 1409 if (class->method_table[i]) 1410 return 1; 1411 return 0; 1412 } 1413 1414 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) 1415 { 1416 int i; 1417 1418 for (i = 0; i < MAX_MGMT_OUI; i++) 1419 if (vendor_class->method_table[i]) 1420 return 1; 1421 return 0; 1422 } 1423 1424 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, 1425 const char *oui) 1426 { 1427 int i; 1428 1429 for (i = 0; i < MAX_MGMT_OUI; i++) 1430 /* Is there matching OUI for this vendor class ? */ 1431 if (!memcmp(vendor_class->oui[i], oui, 3)) 1432 return i; 1433 1434 return -1; 1435 } 1436 1437 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) 1438 { 1439 int i; 1440 1441 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) 1442 if (vendor->vendor_class[i]) 1443 return 1; 1444 1445 return 0; 1446 } 1447 1448 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, 1449 struct ib_mad_agent_private *agent) 1450 { 1451 int i; 1452 1453 /* Remove any methods for this mad agent */ 1454 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { 1455 if (method->agent[i] == agent) { 1456 method->agent[i] = NULL; 1457 } 1458 } 1459 } 1460 1461 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1462 struct ib_mad_agent_private *agent_priv, 1463 u8 mgmt_class) 1464 { 1465 struct ib_mad_port_private *port_priv; 1466 struct ib_mad_mgmt_class_table **class; 1467 struct ib_mad_mgmt_method_table **method; 1468 int i, ret; 1469 1470 port_priv = agent_priv->qp_info->port_priv; 1471 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1472 if (!*class) { 1473 /* Allocate management class table for "new" class version */ 1474 *class = kzalloc(sizeof **class, GFP_ATOMIC); 1475 if (!*class) { 1476 ret = -ENOMEM; 1477 goto error1; 1478 } 1479 1480 /* Allocate method table for this management class */ 1481 method = &(*class)->method_table[mgmt_class]; 1482 if ((ret = allocate_method_table(method))) 1483 goto error2; 1484 } else { 1485 method = &(*class)->method_table[mgmt_class]; 1486 if (!*method) { 1487 /* Allocate method table for this management class */ 1488 if ((ret = allocate_method_table(method))) 1489 goto error1; 1490 } 1491 } 1492 1493 /* Now, make sure methods are not already in use */ 1494 if (method_in_use(method, mad_reg_req)) 1495 goto error3; 1496 1497 /* Finally, add in methods being registered */ 1498 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1499 (*method)->agent[i] = agent_priv; 1500 1501 return 0; 1502 1503 error3: 1504 /* Remove any methods for this mad agent */ 1505 remove_methods_mad_agent(*method, agent_priv); 1506 /* Now, check to see if there are any methods in use */ 1507 if (!check_method_table(*method)) { 1508 /* If not, release management method table */ 1509 kfree(*method); 1510 *method = NULL; 1511 } 1512 ret = -EINVAL; 1513 goto error1; 1514 error2: 1515 kfree(*class); 1516 *class = NULL; 1517 error1: 1518 return ret; 1519 } 1520 1521 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1522 struct ib_mad_agent_private *agent_priv) 1523 { 1524 struct ib_mad_port_private *port_priv; 1525 struct ib_mad_mgmt_vendor_class_table **vendor_table; 1526 struct ib_mad_mgmt_vendor_class_table *vendor = NULL; 1527 struct ib_mad_mgmt_vendor_class *vendor_class = NULL; 1528 struct ib_mad_mgmt_method_table **method; 1529 int i, ret = -ENOMEM; 1530 u8 vclass; 1531 1532 /* "New" vendor (with OUI) class */ 1533 vclass = vendor_class_index(mad_reg_req->mgmt_class); 1534 port_priv = agent_priv->qp_info->port_priv; 1535 vendor_table = &port_priv->version[ 1536 mad_reg_req->mgmt_class_version].vendor; 1537 if (!*vendor_table) { 1538 /* Allocate mgmt vendor class table for "new" class version */ 1539 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); 1540 if (!vendor) 1541 goto error1; 1542 1543 *vendor_table = vendor; 1544 } 1545 if (!(*vendor_table)->vendor_class[vclass]) { 1546 /* Allocate table for this management vendor class */ 1547 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); 1548 if (!vendor_class) 1549 goto error2; 1550 1551 (*vendor_table)->vendor_class[vclass] = vendor_class; 1552 } 1553 for (i = 0; i < MAX_MGMT_OUI; i++) { 1554 /* Is there matching OUI for this vendor class ? */ 1555 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], 1556 mad_reg_req->oui, 3)) { 1557 method = &(*vendor_table)->vendor_class[ 1558 vclass]->method_table[i]; 1559 if (!*method) 1560 goto error3; 1561 goto check_in_use; 1562 } 1563 } 1564 for (i = 0; i < MAX_MGMT_OUI; i++) { 1565 /* OUI slot available ? */ 1566 if (!is_vendor_oui((*vendor_table)->vendor_class[ 1567 vclass]->oui[i])) { 1568 method = &(*vendor_table)->vendor_class[ 1569 vclass]->method_table[i]; 1570 /* Allocate method table for this OUI */ 1571 if (!*method) { 1572 ret = allocate_method_table(method); 1573 if (ret) 1574 goto error3; 1575 } 1576 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], 1577 mad_reg_req->oui, 3); 1578 goto check_in_use; 1579 } 1580 } 1581 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); 1582 goto error3; 1583 1584 check_in_use: 1585 /* Now, make sure methods are not already in use */ 1586 if (method_in_use(method, mad_reg_req)) 1587 goto error4; 1588 1589 /* Finally, add in methods being registered */ 1590 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1591 (*method)->agent[i] = agent_priv; 1592 1593 return 0; 1594 1595 error4: 1596 /* Remove any methods for this mad agent */ 1597 remove_methods_mad_agent(*method, agent_priv); 1598 /* Now, check to see if there are any methods in use */ 1599 if (!check_method_table(*method)) { 1600 /* If not, release management method table */ 1601 kfree(*method); 1602 *method = NULL; 1603 } 1604 ret = -EINVAL; 1605 error3: 1606 if (vendor_class) { 1607 (*vendor_table)->vendor_class[vclass] = NULL; 1608 kfree(vendor_class); 1609 } 1610 error2: 1611 if (vendor) { 1612 *vendor_table = NULL; 1613 kfree(vendor); 1614 } 1615 error1: 1616 return ret; 1617 } 1618 1619 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) 1620 { 1621 struct ib_mad_port_private *port_priv; 1622 struct ib_mad_mgmt_class_table *class; 1623 struct ib_mad_mgmt_method_table *method; 1624 struct ib_mad_mgmt_vendor_class_table *vendor; 1625 struct ib_mad_mgmt_vendor_class *vendor_class; 1626 int index; 1627 u8 mgmt_class; 1628 1629 /* 1630 * Was MAD registration request supplied 1631 * with original registration ? 1632 */ 1633 if (!agent_priv->reg_req) { 1634 goto out; 1635 } 1636 1637 port_priv = agent_priv->qp_info->port_priv; 1638 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); 1639 class = port_priv->version[ 1640 agent_priv->reg_req->mgmt_class_version].class; 1641 if (!class) 1642 goto vendor_check; 1643 1644 method = class->method_table[mgmt_class]; 1645 if (method) { 1646 /* Remove any methods for this mad agent */ 1647 remove_methods_mad_agent(method, agent_priv); 1648 /* Now, check to see if there are any methods still in use */ 1649 if (!check_method_table(method)) { 1650 /* If not, release management method table */ 1651 kfree(method); 1652 class->method_table[mgmt_class] = NULL; 1653 /* Any management classes left ? */ 1654 if (!check_class_table(class)) { 1655 /* If not, release management class table */ 1656 kfree(class); 1657 port_priv->version[ 1658 agent_priv->reg_req-> 1659 mgmt_class_version].class = NULL; 1660 } 1661 } 1662 } 1663 1664 vendor_check: 1665 if (!is_vendor_class(mgmt_class)) 1666 goto out; 1667 1668 /* normalize mgmt_class to vendor range 2 */ 1669 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); 1670 vendor = port_priv->version[ 1671 agent_priv->reg_req->mgmt_class_version].vendor; 1672 1673 if (!vendor) 1674 goto out; 1675 1676 vendor_class = vendor->vendor_class[mgmt_class]; 1677 if (vendor_class) { 1678 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); 1679 if (index < 0) 1680 goto out; 1681 method = vendor_class->method_table[index]; 1682 if (method) { 1683 /* Remove any methods for this mad agent */ 1684 remove_methods_mad_agent(method, agent_priv); 1685 /* 1686 * Now, check to see if there are 1687 * any methods still in use 1688 */ 1689 if (!check_method_table(method)) { 1690 /* If not, release management method table */ 1691 kfree(method); 1692 vendor_class->method_table[index] = NULL; 1693 memset(vendor_class->oui[index], 0, 3); 1694 /* Any OUIs left ? */ 1695 if (!check_vendor_class(vendor_class)) { 1696 /* If not, release vendor class table */ 1697 kfree(vendor_class); 1698 vendor->vendor_class[mgmt_class] = NULL; 1699 /* Any other vendor classes left ? */ 1700 if (!check_vendor_table(vendor)) { 1701 kfree(vendor); 1702 port_priv->version[ 1703 agent_priv->reg_req-> 1704 mgmt_class_version]. 1705 vendor = NULL; 1706 } 1707 } 1708 } 1709 } 1710 } 1711 1712 out: 1713 return; 1714 } 1715 1716 static struct ib_mad_agent_private * 1717 find_mad_agent(struct ib_mad_port_private *port_priv, 1718 const struct ib_mad_hdr *mad_hdr) 1719 { 1720 struct ib_mad_agent_private *mad_agent = NULL; 1721 unsigned long flags; 1722 1723 spin_lock_irqsave(&port_priv->reg_lock, flags); 1724 if (ib_response_mad(mad_hdr)) { 1725 u32 hi_tid; 1726 struct ib_mad_agent_private *entry; 1727 1728 /* 1729 * Routing is based on high 32 bits of transaction ID 1730 * of MAD. 1731 */ 1732 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; 1733 list_for_each_entry(entry, &port_priv->agent_list, agent_list) { 1734 if (entry->agent.hi_tid == hi_tid) { 1735 mad_agent = entry; 1736 break; 1737 } 1738 } 1739 } else { 1740 struct ib_mad_mgmt_class_table *class; 1741 struct ib_mad_mgmt_method_table *method; 1742 struct ib_mad_mgmt_vendor_class_table *vendor; 1743 struct ib_mad_mgmt_vendor_class *vendor_class; 1744 const struct ib_vendor_mad *vendor_mad; 1745 int index; 1746 1747 /* 1748 * Routing is based on version, class, and method 1749 * For "newer" vendor MADs, also based on OUI 1750 */ 1751 if (mad_hdr->class_version >= MAX_MGMT_VERSION) 1752 goto out; 1753 if (!is_vendor_class(mad_hdr->mgmt_class)) { 1754 class = port_priv->version[ 1755 mad_hdr->class_version].class; 1756 if (!class) 1757 goto out; 1758 if (convert_mgmt_class(mad_hdr->mgmt_class) >= 1759 ARRAY_SIZE(class->method_table)) 1760 goto out; 1761 method = class->method_table[convert_mgmt_class( 1762 mad_hdr->mgmt_class)]; 1763 if (method) 1764 mad_agent = method->agent[mad_hdr->method & 1765 ~IB_MGMT_METHOD_RESP]; 1766 } else { 1767 vendor = port_priv->version[ 1768 mad_hdr->class_version].vendor; 1769 if (!vendor) 1770 goto out; 1771 vendor_class = vendor->vendor_class[vendor_class_index( 1772 mad_hdr->mgmt_class)]; 1773 if (!vendor_class) 1774 goto out; 1775 /* Find matching OUI */ 1776 vendor_mad = (const struct ib_vendor_mad *)mad_hdr; 1777 index = find_vendor_oui(vendor_class, vendor_mad->oui); 1778 if (index == -1) 1779 goto out; 1780 method = vendor_class->method_table[index]; 1781 if (method) { 1782 mad_agent = method->agent[mad_hdr->method & 1783 ~IB_MGMT_METHOD_RESP]; 1784 } 1785 } 1786 } 1787 1788 if (mad_agent) { 1789 if (mad_agent->agent.recv_handler) 1790 atomic_inc(&mad_agent->refcount); 1791 else { 1792 dev_notice(&port_priv->device->dev, 1793 "No receive handler for client %p on port %d\n", 1794 &mad_agent->agent, port_priv->port_num); 1795 mad_agent = NULL; 1796 } 1797 } 1798 out: 1799 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 1800 1801 return mad_agent; 1802 } 1803 1804 static int validate_mad(const struct ib_mad_hdr *mad_hdr, 1805 const struct ib_mad_qp_info *qp_info, 1806 bool opa) 1807 { 1808 int valid = 0; 1809 u32 qp_num = qp_info->qp->qp_num; 1810 1811 /* Make sure MAD base version is understood */ 1812 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && 1813 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { 1814 pr_err("MAD received with unsupported base version %d %s\n", 1815 mad_hdr->base_version, opa ? "(opa)" : ""); 1816 goto out; 1817 } 1818 1819 /* Filter SMI packets sent to other than QP0 */ 1820 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || 1821 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 1822 if (qp_num == 0) 1823 valid = 1; 1824 } else { 1825 /* CM attributes other than ClassPortInfo only use Send method */ 1826 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && 1827 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && 1828 (mad_hdr->method != IB_MGMT_METHOD_SEND)) 1829 goto out; 1830 /* Filter GSI packets sent to QP0 */ 1831 if (qp_num != 0) 1832 valid = 1; 1833 } 1834 1835 out: 1836 return valid; 1837 } 1838 1839 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, 1840 const struct ib_mad_hdr *mad_hdr) 1841 { 1842 struct ib_rmpp_mad *rmpp_mad; 1843 1844 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; 1845 return !mad_agent_priv->agent.rmpp_version || 1846 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || 1847 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 1848 IB_MGMT_RMPP_FLAG_ACTIVE) || 1849 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1850 } 1851 1852 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, 1853 const struct ib_mad_recv_wc *rwc) 1854 { 1855 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == 1856 rwc->recv_buf.mad->mad_hdr.mgmt_class; 1857 } 1858 1859 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, 1860 const struct ib_mad_send_wr_private *wr, 1861 const struct ib_mad_recv_wc *rwc ) 1862 { 1863 struct rdma_ah_attr attr; 1864 u8 send_resp, rcv_resp; 1865 union ib_gid sgid; 1866 struct ib_device *device = mad_agent_priv->agent.device; 1867 u8 port_num = mad_agent_priv->agent.port_num; 1868 u8 lmc; 1869 bool has_grh; 1870 1871 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); 1872 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); 1873 1874 if (send_resp == rcv_resp) 1875 /* both requests, or both responses. GIDs different */ 1876 return 0; 1877 1878 if (rdma_query_ah(wr->send_buf.ah, &attr)) 1879 /* Assume not equal, to avoid false positives. */ 1880 return 0; 1881 1882 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH); 1883 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH)) 1884 /* one has GID, other does not. Assume different */ 1885 return 0; 1886 1887 if (!send_resp && rcv_resp) { 1888 /* is request/response. */ 1889 if (!has_grh) { 1890 if (ib_get_cached_lmc(device, port_num, &lmc)) 1891 return 0; 1892 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^ 1893 rwc->wc->dlid_path_bits) & 1894 ((1 << lmc) - 1))); 1895 } else { 1896 const struct ib_global_route *grh = 1897 rdma_ah_read_grh(&attr); 1898 1899 if (ib_get_cached_gid(device, port_num, 1900 grh->sgid_index, &sgid, NULL)) 1901 return 0; 1902 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 1903 16); 1904 } 1905 } 1906 1907 if (!has_grh) 1908 return rdma_ah_get_dlid(&attr) == rwc->wc->slid; 1909 else 1910 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw, 1911 rwc->recv_buf.grh->sgid.raw, 1912 16); 1913 } 1914 1915 static inline int is_direct(u8 class) 1916 { 1917 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); 1918 } 1919 1920 struct ib_mad_send_wr_private* 1921 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, 1922 const struct ib_mad_recv_wc *wc) 1923 { 1924 struct ib_mad_send_wr_private *wr; 1925 const struct ib_mad_hdr *mad_hdr; 1926 1927 mad_hdr = &wc->recv_buf.mad->mad_hdr; 1928 1929 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { 1930 if ((wr->tid == mad_hdr->tid) && 1931 rcv_has_same_class(wr, wc) && 1932 /* 1933 * Don't check GID for direct routed MADs. 1934 * These might have permissive LIDs. 1935 */ 1936 (is_direct(mad_hdr->mgmt_class) || 1937 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1938 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1939 } 1940 1941 /* 1942 * It's possible to receive the response before we've 1943 * been notified that the send has completed 1944 */ 1945 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { 1946 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && 1947 wr->tid == mad_hdr->tid && 1948 wr->timeout && 1949 rcv_has_same_class(wr, wc) && 1950 /* 1951 * Don't check GID for direct routed MADs. 1952 * These might have permissive LIDs. 1953 */ 1954 (is_direct(mad_hdr->mgmt_class) || 1955 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1956 /* Verify request has not been canceled */ 1957 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1958 } 1959 return NULL; 1960 } 1961 1962 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) 1963 { 1964 mad_send_wr->timeout = 0; 1965 if (mad_send_wr->refcount == 1) 1966 list_move_tail(&mad_send_wr->agent_list, 1967 &mad_send_wr->mad_agent_priv->done_list); 1968 } 1969 1970 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1971 struct ib_mad_recv_wc *mad_recv_wc) 1972 { 1973 struct ib_mad_send_wr_private *mad_send_wr; 1974 struct ib_mad_send_wc mad_send_wc; 1975 unsigned long flags; 1976 int ret; 1977 1978 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1979 ret = ib_mad_enforce_security(mad_agent_priv, 1980 mad_recv_wc->wc->pkey_index); 1981 if (ret) { 1982 ib_free_recv_mad(mad_recv_wc); 1983 deref_mad_agent(mad_agent_priv); 1984 return; 1985 } 1986 1987 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1988 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1989 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 1990 mad_recv_wc); 1991 if (!mad_recv_wc) { 1992 deref_mad_agent(mad_agent_priv); 1993 return; 1994 } 1995 } 1996 1997 /* Complete corresponding request */ 1998 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { 1999 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2000 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 2001 if (!mad_send_wr) { 2002 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2003 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) 2004 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) 2005 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) 2006 & IB_MGMT_RMPP_FLAG_ACTIVE)) { 2007 /* user rmpp is in effect 2008 * and this is an active RMPP MAD 2009 */ 2010 mad_agent_priv->agent.recv_handler( 2011 &mad_agent_priv->agent, NULL, 2012 mad_recv_wc); 2013 atomic_dec(&mad_agent_priv->refcount); 2014 } else { 2015 /* not user rmpp, revert to normal behavior and 2016 * drop the mad */ 2017 ib_free_recv_mad(mad_recv_wc); 2018 deref_mad_agent(mad_agent_priv); 2019 return; 2020 } 2021 } else { 2022 ib_mark_mad_done(mad_send_wr); 2023 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2024 2025 /* Defined behavior is to complete response before request */ 2026 mad_agent_priv->agent.recv_handler( 2027 &mad_agent_priv->agent, 2028 &mad_send_wr->send_buf, 2029 mad_recv_wc); 2030 atomic_dec(&mad_agent_priv->refcount); 2031 2032 mad_send_wc.status = IB_WC_SUCCESS; 2033 mad_send_wc.vendor_err = 0; 2034 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2035 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2036 } 2037 } else { 2038 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, 2039 mad_recv_wc); 2040 deref_mad_agent(mad_agent_priv); 2041 } 2042 2043 return; 2044 } 2045 2046 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, 2047 const struct ib_mad_qp_info *qp_info, 2048 const struct ib_wc *wc, 2049 int port_num, 2050 struct ib_mad_private *recv, 2051 struct ib_mad_private *response) 2052 { 2053 enum smi_forward_action retsmi; 2054 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2055 2056 if (smi_handle_dr_smp_recv(smp, 2057 rdma_cap_ib_switch(port_priv->device), 2058 port_num, 2059 port_priv->device->phys_port_cnt) == 2060 IB_SMI_DISCARD) 2061 return IB_SMI_DISCARD; 2062 2063 retsmi = smi_check_forward_dr_smp(smp); 2064 if (retsmi == IB_SMI_LOCAL) 2065 return IB_SMI_HANDLE; 2066 2067 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2068 if (smi_handle_dr_smp_send(smp, 2069 rdma_cap_ib_switch(port_priv->device), 2070 port_num) == IB_SMI_DISCARD) 2071 return IB_SMI_DISCARD; 2072 2073 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2074 return IB_SMI_DISCARD; 2075 } else if (rdma_cap_ib_switch(port_priv->device)) { 2076 /* forward case for switches */ 2077 memcpy(response, recv, mad_priv_size(response)); 2078 response->header.recv_wc.wc = &response->header.wc; 2079 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2080 response->header.recv_wc.recv_buf.grh = &response->grh; 2081 2082 agent_send_response((const struct ib_mad_hdr *)response->mad, 2083 &response->grh, wc, 2084 port_priv->device, 2085 smi_get_fwd_port(smp), 2086 qp_info->qp->qp_num, 2087 response->mad_size, 2088 false); 2089 2090 return IB_SMI_DISCARD; 2091 } 2092 return IB_SMI_HANDLE; 2093 } 2094 2095 static bool generate_unmatched_resp(const struct ib_mad_private *recv, 2096 struct ib_mad_private *response, 2097 size_t *resp_len, bool opa) 2098 { 2099 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; 2100 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; 2101 2102 if (recv_hdr->method == IB_MGMT_METHOD_GET || 2103 recv_hdr->method == IB_MGMT_METHOD_SET) { 2104 memcpy(response, recv, mad_priv_size(response)); 2105 response->header.recv_wc.wc = &response->header.wc; 2106 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2107 response->header.recv_wc.recv_buf.grh = &response->grh; 2108 resp_hdr->method = IB_MGMT_METHOD_GET_RESP; 2109 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 2110 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2111 resp_hdr->status |= IB_SMP_DIRECTION; 2112 2113 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { 2114 if (recv_hdr->mgmt_class == 2115 IB_MGMT_CLASS_SUBN_LID_ROUTED || 2116 recv_hdr->mgmt_class == 2117 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2118 *resp_len = opa_get_smp_header_size( 2119 (struct opa_smp *)recv->mad); 2120 else 2121 *resp_len = sizeof(struct ib_mad_hdr); 2122 } 2123 2124 return true; 2125 } else { 2126 return false; 2127 } 2128 } 2129 2130 static enum smi_action 2131 handle_opa_smi(struct ib_mad_port_private *port_priv, 2132 struct ib_mad_qp_info *qp_info, 2133 struct ib_wc *wc, 2134 int port_num, 2135 struct ib_mad_private *recv, 2136 struct ib_mad_private *response) 2137 { 2138 enum smi_forward_action retsmi; 2139 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2140 2141 if (opa_smi_handle_dr_smp_recv(smp, 2142 rdma_cap_ib_switch(port_priv->device), 2143 port_num, 2144 port_priv->device->phys_port_cnt) == 2145 IB_SMI_DISCARD) 2146 return IB_SMI_DISCARD; 2147 2148 retsmi = opa_smi_check_forward_dr_smp(smp); 2149 if (retsmi == IB_SMI_LOCAL) 2150 return IB_SMI_HANDLE; 2151 2152 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2153 if (opa_smi_handle_dr_smp_send(smp, 2154 rdma_cap_ib_switch(port_priv->device), 2155 port_num) == IB_SMI_DISCARD) 2156 return IB_SMI_DISCARD; 2157 2158 if (opa_smi_check_local_smp(smp, port_priv->device) == 2159 IB_SMI_DISCARD) 2160 return IB_SMI_DISCARD; 2161 2162 } else if (rdma_cap_ib_switch(port_priv->device)) { 2163 /* forward case for switches */ 2164 memcpy(response, recv, mad_priv_size(response)); 2165 response->header.recv_wc.wc = &response->header.wc; 2166 response->header.recv_wc.recv_buf.opa_mad = 2167 (struct opa_mad *)response->mad; 2168 response->header.recv_wc.recv_buf.grh = &response->grh; 2169 2170 agent_send_response((const struct ib_mad_hdr *)response->mad, 2171 &response->grh, wc, 2172 port_priv->device, 2173 opa_smi_get_fwd_port(smp), 2174 qp_info->qp->qp_num, 2175 recv->header.wc.byte_len, 2176 true); 2177 2178 return IB_SMI_DISCARD; 2179 } 2180 2181 return IB_SMI_HANDLE; 2182 } 2183 2184 static enum smi_action 2185 handle_smi(struct ib_mad_port_private *port_priv, 2186 struct ib_mad_qp_info *qp_info, 2187 struct ib_wc *wc, 2188 int port_num, 2189 struct ib_mad_private *recv, 2190 struct ib_mad_private *response, 2191 bool opa) 2192 { 2193 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; 2194 2195 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && 2196 mad_hdr->class_version == OPA_SM_CLASS_VERSION) 2197 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, 2198 response); 2199 2200 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); 2201 } 2202 2203 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) 2204 { 2205 struct ib_mad_port_private *port_priv = cq->cq_context; 2206 struct ib_mad_list_head *mad_list = 2207 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2208 struct ib_mad_qp_info *qp_info; 2209 struct ib_mad_private_header *mad_priv_hdr; 2210 struct ib_mad_private *recv, *response = NULL; 2211 struct ib_mad_agent_private *mad_agent; 2212 int port_num; 2213 int ret = IB_MAD_RESULT_SUCCESS; 2214 size_t mad_size; 2215 u16 resp_mad_pkey_index = 0; 2216 bool opa; 2217 2218 if (list_empty_careful(&port_priv->port_list)) 2219 return; 2220 2221 if (wc->status != IB_WC_SUCCESS) { 2222 /* 2223 * Receive errors indicate that the QP has entered the error 2224 * state - error handling/shutdown code will cleanup 2225 */ 2226 return; 2227 } 2228 2229 qp_info = mad_list->mad_queue->qp_info; 2230 dequeue_mad(mad_list); 2231 2232 opa = rdma_cap_opa_mad(qp_info->port_priv->device, 2233 qp_info->port_priv->port_num); 2234 2235 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 2236 mad_list); 2237 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 2238 ib_dma_unmap_single(port_priv->device, 2239 recv->header.mapping, 2240 mad_priv_dma_size(recv), 2241 DMA_FROM_DEVICE); 2242 2243 /* Setup MAD receive work completion from "normal" work completion */ 2244 recv->header.wc = *wc; 2245 recv->header.recv_wc.wc = &recv->header.wc; 2246 2247 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { 2248 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); 2249 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2250 } else { 2251 recv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2252 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2253 } 2254 2255 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; 2256 recv->header.recv_wc.recv_buf.grh = &recv->grh; 2257 2258 if (atomic_read(&qp_info->snoop_count)) 2259 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); 2260 2261 /* Validate MAD */ 2262 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) 2263 goto out; 2264 2265 mad_size = recv->mad_size; 2266 response = alloc_mad_private(mad_size, GFP_KERNEL); 2267 if (!response) 2268 goto out; 2269 2270 if (rdma_cap_ib_switch(port_priv->device)) 2271 port_num = wc->port_num; 2272 else 2273 port_num = port_priv->port_num; 2274 2275 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == 2276 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 2277 if (handle_smi(port_priv, qp_info, wc, port_num, recv, 2278 response, opa) 2279 == IB_SMI_DISCARD) 2280 goto out; 2281 } 2282 2283 /* Give driver "right of first refusal" on incoming MAD */ 2284 if (port_priv->device->process_mad) { 2285 ret = port_priv->device->process_mad(port_priv->device, 0, 2286 port_priv->port_num, 2287 wc, &recv->grh, 2288 (const struct ib_mad_hdr *)recv->mad, 2289 recv->mad_size, 2290 (struct ib_mad_hdr *)response->mad, 2291 &mad_size, &resp_mad_pkey_index); 2292 2293 if (opa) 2294 wc->pkey_index = resp_mad_pkey_index; 2295 2296 if (ret & IB_MAD_RESULT_SUCCESS) { 2297 if (ret & IB_MAD_RESULT_CONSUMED) 2298 goto out; 2299 if (ret & IB_MAD_RESULT_REPLY) { 2300 agent_send_response((const struct ib_mad_hdr *)response->mad, 2301 &recv->grh, wc, 2302 port_priv->device, 2303 port_num, 2304 qp_info->qp->qp_num, 2305 mad_size, opa); 2306 goto out; 2307 } 2308 } 2309 } 2310 2311 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); 2312 if (mad_agent) { 2313 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); 2314 /* 2315 * recv is freed up in error cases in ib_mad_complete_recv 2316 * or via recv_handler in ib_mad_complete_recv() 2317 */ 2318 recv = NULL; 2319 } else if ((ret & IB_MAD_RESULT_SUCCESS) && 2320 generate_unmatched_resp(recv, response, &mad_size, opa)) { 2321 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, 2322 port_priv->device, port_num, 2323 qp_info->qp->qp_num, mad_size, opa); 2324 } 2325 2326 out: 2327 /* Post another receive request for this QP */ 2328 if (response) { 2329 ib_mad_post_receive_mads(qp_info, response); 2330 kfree(recv); 2331 } else 2332 ib_mad_post_receive_mads(qp_info, recv); 2333 } 2334 2335 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) 2336 { 2337 struct ib_mad_send_wr_private *mad_send_wr; 2338 unsigned long delay; 2339 2340 if (list_empty(&mad_agent_priv->wait_list)) { 2341 cancel_delayed_work(&mad_agent_priv->timed_work); 2342 } else { 2343 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2344 struct ib_mad_send_wr_private, 2345 agent_list); 2346 2347 if (time_after(mad_agent_priv->timeout, 2348 mad_send_wr->timeout)) { 2349 mad_agent_priv->timeout = mad_send_wr->timeout; 2350 delay = mad_send_wr->timeout - jiffies; 2351 if ((long)delay <= 0) 2352 delay = 1; 2353 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2354 &mad_agent_priv->timed_work, delay); 2355 } 2356 } 2357 } 2358 2359 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) 2360 { 2361 struct ib_mad_agent_private *mad_agent_priv; 2362 struct ib_mad_send_wr_private *temp_mad_send_wr; 2363 struct list_head *list_item; 2364 unsigned long delay; 2365 2366 mad_agent_priv = mad_send_wr->mad_agent_priv; 2367 list_del(&mad_send_wr->agent_list); 2368 2369 delay = mad_send_wr->timeout; 2370 mad_send_wr->timeout += jiffies; 2371 2372 if (delay) { 2373 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { 2374 temp_mad_send_wr = list_entry(list_item, 2375 struct ib_mad_send_wr_private, 2376 agent_list); 2377 if (time_after(mad_send_wr->timeout, 2378 temp_mad_send_wr->timeout)) 2379 break; 2380 } 2381 } 2382 else 2383 list_item = &mad_agent_priv->wait_list; 2384 list_add(&mad_send_wr->agent_list, list_item); 2385 2386 /* Reschedule a work item if we have a shorter timeout */ 2387 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) 2388 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2389 &mad_agent_priv->timed_work, delay); 2390 } 2391 2392 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2393 int timeout_ms) 2394 { 2395 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2396 wait_for_response(mad_send_wr); 2397 } 2398 2399 /* 2400 * Process a send work completion 2401 */ 2402 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 2403 struct ib_mad_send_wc *mad_send_wc) 2404 { 2405 struct ib_mad_agent_private *mad_agent_priv; 2406 unsigned long flags; 2407 int ret; 2408 2409 mad_agent_priv = mad_send_wr->mad_agent_priv; 2410 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2411 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 2412 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); 2413 if (ret == IB_RMPP_RESULT_CONSUMED) 2414 goto done; 2415 } else 2416 ret = IB_RMPP_RESULT_UNHANDLED; 2417 2418 if (mad_send_wc->status != IB_WC_SUCCESS && 2419 mad_send_wr->status == IB_WC_SUCCESS) { 2420 mad_send_wr->status = mad_send_wc->status; 2421 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2422 } 2423 2424 if (--mad_send_wr->refcount > 0) { 2425 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && 2426 mad_send_wr->status == IB_WC_SUCCESS) { 2427 wait_for_response(mad_send_wr); 2428 } 2429 goto done; 2430 } 2431 2432 /* Remove send from MAD agent and notify client of completion */ 2433 list_del(&mad_send_wr->agent_list); 2434 adjust_timeout(mad_agent_priv); 2435 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2436 2437 if (mad_send_wr->status != IB_WC_SUCCESS ) 2438 mad_send_wc->status = mad_send_wr->status; 2439 if (ret == IB_RMPP_RESULT_INTERNAL) 2440 ib_rmpp_send_handler(mad_send_wc); 2441 else 2442 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2443 mad_send_wc); 2444 2445 /* Release reference on agent taken when sending */ 2446 deref_mad_agent(mad_agent_priv); 2447 return; 2448 done: 2449 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2450 } 2451 2452 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) 2453 { 2454 struct ib_mad_port_private *port_priv = cq->cq_context; 2455 struct ib_mad_list_head *mad_list = 2456 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2457 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; 2458 struct ib_mad_qp_info *qp_info; 2459 struct ib_mad_queue *send_queue; 2460 struct ib_send_wr *bad_send_wr; 2461 struct ib_mad_send_wc mad_send_wc; 2462 unsigned long flags; 2463 int ret; 2464 2465 if (list_empty_careful(&port_priv->port_list)) 2466 return; 2467 2468 if (wc->status != IB_WC_SUCCESS) { 2469 if (!ib_mad_send_error(port_priv, wc)) 2470 return; 2471 } 2472 2473 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2474 mad_list); 2475 send_queue = mad_list->mad_queue; 2476 qp_info = send_queue->qp_info; 2477 2478 retry: 2479 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2480 mad_send_wr->header_mapping, 2481 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2482 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2483 mad_send_wr->payload_mapping, 2484 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2485 queued_send_wr = NULL; 2486 spin_lock_irqsave(&send_queue->lock, flags); 2487 list_del(&mad_list->list); 2488 2489 /* Move queued send to the send queue */ 2490 if (send_queue->count-- > send_queue->max_active) { 2491 mad_list = container_of(qp_info->overflow_list.next, 2492 struct ib_mad_list_head, list); 2493 queued_send_wr = container_of(mad_list, 2494 struct ib_mad_send_wr_private, 2495 mad_list); 2496 list_move_tail(&mad_list->list, &send_queue->list); 2497 } 2498 spin_unlock_irqrestore(&send_queue->lock, flags); 2499 2500 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2501 mad_send_wc.status = wc->status; 2502 mad_send_wc.vendor_err = wc->vendor_err; 2503 if (atomic_read(&qp_info->snoop_count)) 2504 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, 2505 IB_MAD_SNOOP_SEND_COMPLETIONS); 2506 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2507 2508 if (queued_send_wr) { 2509 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, 2510 &bad_send_wr); 2511 if (ret) { 2512 dev_err(&port_priv->device->dev, 2513 "ib_post_send failed: %d\n", ret); 2514 mad_send_wr = queued_send_wr; 2515 wc->status = IB_WC_LOC_QP_OP_ERR; 2516 goto retry; 2517 } 2518 } 2519 } 2520 2521 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) 2522 { 2523 struct ib_mad_send_wr_private *mad_send_wr; 2524 struct ib_mad_list_head *mad_list; 2525 unsigned long flags; 2526 2527 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 2528 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { 2529 mad_send_wr = container_of(mad_list, 2530 struct ib_mad_send_wr_private, 2531 mad_list); 2532 mad_send_wr->retry = 1; 2533 } 2534 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 2535 } 2536 2537 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 2538 struct ib_wc *wc) 2539 { 2540 struct ib_mad_list_head *mad_list = 2541 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2542 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; 2543 struct ib_mad_send_wr_private *mad_send_wr; 2544 int ret; 2545 2546 /* 2547 * Send errors will transition the QP to SQE - move 2548 * QP to RTS and repost flushed work requests 2549 */ 2550 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2551 mad_list); 2552 if (wc->status == IB_WC_WR_FLUSH_ERR) { 2553 if (mad_send_wr->retry) { 2554 /* Repost send */ 2555 struct ib_send_wr *bad_send_wr; 2556 2557 mad_send_wr->retry = 0; 2558 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, 2559 &bad_send_wr); 2560 if (!ret) 2561 return false; 2562 } 2563 } else { 2564 struct ib_qp_attr *attr; 2565 2566 /* Transition QP to RTS and fail offending send */ 2567 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2568 if (attr) { 2569 attr->qp_state = IB_QPS_RTS; 2570 attr->cur_qp_state = IB_QPS_SQE; 2571 ret = ib_modify_qp(qp_info->qp, attr, 2572 IB_QP_STATE | IB_QP_CUR_STATE); 2573 kfree(attr); 2574 if (ret) 2575 dev_err(&port_priv->device->dev, 2576 "%s - ib_modify_qp to RTS: %d\n", 2577 __func__, ret); 2578 else 2579 mark_sends_for_retry(qp_info); 2580 } 2581 } 2582 2583 return true; 2584 } 2585 2586 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) 2587 { 2588 unsigned long flags; 2589 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; 2590 struct ib_mad_send_wc mad_send_wc; 2591 struct list_head cancel_list; 2592 2593 INIT_LIST_HEAD(&cancel_list); 2594 2595 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2596 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2597 &mad_agent_priv->send_list, agent_list) { 2598 if (mad_send_wr->status == IB_WC_SUCCESS) { 2599 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2600 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2601 } 2602 } 2603 2604 /* Empty wait list to prevent receives from finding a request */ 2605 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); 2606 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2607 2608 /* Report all cancelled requests */ 2609 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 2610 mad_send_wc.vendor_err = 0; 2611 2612 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2613 &cancel_list, agent_list) { 2614 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2615 list_del(&mad_send_wr->agent_list); 2616 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2617 &mad_send_wc); 2618 atomic_dec(&mad_agent_priv->refcount); 2619 } 2620 } 2621 2622 static struct ib_mad_send_wr_private* 2623 find_send_wr(struct ib_mad_agent_private *mad_agent_priv, 2624 struct ib_mad_send_buf *send_buf) 2625 { 2626 struct ib_mad_send_wr_private *mad_send_wr; 2627 2628 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2629 agent_list) { 2630 if (&mad_send_wr->send_buf == send_buf) 2631 return mad_send_wr; 2632 } 2633 2634 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2635 agent_list) { 2636 if (is_rmpp_data_mad(mad_agent_priv, 2637 mad_send_wr->send_buf.mad) && 2638 &mad_send_wr->send_buf == send_buf) 2639 return mad_send_wr; 2640 } 2641 return NULL; 2642 } 2643 2644 int ib_modify_mad(struct ib_mad_agent *mad_agent, 2645 struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2646 { 2647 struct ib_mad_agent_private *mad_agent_priv; 2648 struct ib_mad_send_wr_private *mad_send_wr; 2649 unsigned long flags; 2650 int active; 2651 2652 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2653 agent); 2654 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2655 mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2656 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2657 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2658 return -EINVAL; 2659 } 2660 2661 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); 2662 if (!timeout_ms) { 2663 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2664 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2665 } 2666 2667 mad_send_wr->send_buf.timeout_ms = timeout_ms; 2668 if (active) 2669 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2670 else 2671 ib_reset_mad_timeout(mad_send_wr, timeout_ms); 2672 2673 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2674 return 0; 2675 } 2676 EXPORT_SYMBOL(ib_modify_mad); 2677 2678 void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2679 struct ib_mad_send_buf *send_buf) 2680 { 2681 ib_modify_mad(mad_agent, send_buf, 0); 2682 } 2683 EXPORT_SYMBOL(ib_cancel_mad); 2684 2685 static void local_completions(struct work_struct *work) 2686 { 2687 struct ib_mad_agent_private *mad_agent_priv; 2688 struct ib_mad_local_private *local; 2689 struct ib_mad_agent_private *recv_mad_agent; 2690 unsigned long flags; 2691 int free_mad; 2692 struct ib_wc wc; 2693 struct ib_mad_send_wc mad_send_wc; 2694 bool opa; 2695 2696 mad_agent_priv = 2697 container_of(work, struct ib_mad_agent_private, local_work); 2698 2699 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 2700 mad_agent_priv->qp_info->port_priv->port_num); 2701 2702 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2703 while (!list_empty(&mad_agent_priv->local_list)) { 2704 local = list_entry(mad_agent_priv->local_list.next, 2705 struct ib_mad_local_private, 2706 completion_list); 2707 list_del(&local->completion_list); 2708 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2709 free_mad = 0; 2710 if (local->mad_priv) { 2711 u8 base_version; 2712 recv_mad_agent = local->recv_mad_agent; 2713 if (!recv_mad_agent) { 2714 dev_err(&mad_agent_priv->agent.device->dev, 2715 "No receive MAD agent for local completion\n"); 2716 free_mad = 1; 2717 goto local_send_completion; 2718 } 2719 2720 /* 2721 * Defined behavior is to complete response 2722 * before request 2723 */ 2724 build_smp_wc(recv_mad_agent->agent.qp, 2725 local->mad_send_wr->send_wr.wr.wr_cqe, 2726 be16_to_cpu(IB_LID_PERMISSIVE), 2727 local->mad_send_wr->send_wr.pkey_index, 2728 recv_mad_agent->agent.port_num, &wc); 2729 2730 local->mad_priv->header.recv_wc.wc = &wc; 2731 2732 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; 2733 if (opa && base_version == OPA_MGMT_BASE_VERSION) { 2734 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; 2735 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2736 } else { 2737 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2738 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2739 } 2740 2741 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); 2742 list_add(&local->mad_priv->header.recv_wc.recv_buf.list, 2743 &local->mad_priv->header.recv_wc.rmpp_list); 2744 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2745 local->mad_priv->header.recv_wc.recv_buf.mad = 2746 (struct ib_mad *)local->mad_priv->mad; 2747 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) 2748 snoop_recv(recv_mad_agent->qp_info, 2749 &local->mad_priv->header.recv_wc, 2750 IB_MAD_SNOOP_RECVS); 2751 recv_mad_agent->agent.recv_handler( 2752 &recv_mad_agent->agent, 2753 &local->mad_send_wr->send_buf, 2754 &local->mad_priv->header.recv_wc); 2755 spin_lock_irqsave(&recv_mad_agent->lock, flags); 2756 atomic_dec(&recv_mad_agent->refcount); 2757 spin_unlock_irqrestore(&recv_mad_agent->lock, flags); 2758 } 2759 2760 local_send_completion: 2761 /* Complete send */ 2762 mad_send_wc.status = IB_WC_SUCCESS; 2763 mad_send_wc.vendor_err = 0; 2764 mad_send_wc.send_buf = &local->mad_send_wr->send_buf; 2765 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2766 snoop_send(mad_agent_priv->qp_info, 2767 &local->mad_send_wr->send_buf, 2768 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); 2769 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2770 &mad_send_wc); 2771 2772 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2773 atomic_dec(&mad_agent_priv->refcount); 2774 if (free_mad) 2775 kfree(local->mad_priv); 2776 kfree(local); 2777 } 2778 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2779 } 2780 2781 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) 2782 { 2783 int ret; 2784 2785 if (!mad_send_wr->retries_left) 2786 return -ETIMEDOUT; 2787 2788 mad_send_wr->retries_left--; 2789 mad_send_wr->send_buf.retries++; 2790 2791 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 2792 2793 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { 2794 ret = ib_retry_rmpp(mad_send_wr); 2795 switch (ret) { 2796 case IB_RMPP_RESULT_UNHANDLED: 2797 ret = ib_send_mad(mad_send_wr); 2798 break; 2799 case IB_RMPP_RESULT_CONSUMED: 2800 ret = 0; 2801 break; 2802 default: 2803 ret = -ECOMM; 2804 break; 2805 } 2806 } else 2807 ret = ib_send_mad(mad_send_wr); 2808 2809 if (!ret) { 2810 mad_send_wr->refcount++; 2811 list_add_tail(&mad_send_wr->agent_list, 2812 &mad_send_wr->mad_agent_priv->send_list); 2813 } 2814 return ret; 2815 } 2816 2817 static void timeout_sends(struct work_struct *work) 2818 { 2819 struct ib_mad_agent_private *mad_agent_priv; 2820 struct ib_mad_send_wr_private *mad_send_wr; 2821 struct ib_mad_send_wc mad_send_wc; 2822 unsigned long flags, delay; 2823 2824 mad_agent_priv = container_of(work, struct ib_mad_agent_private, 2825 timed_work.work); 2826 mad_send_wc.vendor_err = 0; 2827 2828 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2829 while (!list_empty(&mad_agent_priv->wait_list)) { 2830 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2831 struct ib_mad_send_wr_private, 2832 agent_list); 2833 2834 if (time_after(mad_send_wr->timeout, jiffies)) { 2835 delay = mad_send_wr->timeout - jiffies; 2836 if ((long)delay <= 0) 2837 delay = 1; 2838 queue_delayed_work(mad_agent_priv->qp_info-> 2839 port_priv->wq, 2840 &mad_agent_priv->timed_work, delay); 2841 break; 2842 } 2843 2844 list_del(&mad_send_wr->agent_list); 2845 if (mad_send_wr->status == IB_WC_SUCCESS && 2846 !retry_send(mad_send_wr)) 2847 continue; 2848 2849 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2850 2851 if (mad_send_wr->status == IB_WC_SUCCESS) 2852 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2853 else 2854 mad_send_wc.status = mad_send_wr->status; 2855 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2856 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2857 &mad_send_wc); 2858 2859 atomic_dec(&mad_agent_priv->refcount); 2860 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2861 } 2862 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2863 } 2864 2865 /* 2866 * Allocate receive MADs and post receive WRs for them 2867 */ 2868 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 2869 struct ib_mad_private *mad) 2870 { 2871 unsigned long flags; 2872 int post, ret; 2873 struct ib_mad_private *mad_priv; 2874 struct ib_sge sg_list; 2875 struct ib_recv_wr recv_wr, *bad_recv_wr; 2876 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; 2877 2878 /* Initialize common scatter list fields */ 2879 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; 2880 2881 /* Initialize common receive WR fields */ 2882 recv_wr.next = NULL; 2883 recv_wr.sg_list = &sg_list; 2884 recv_wr.num_sge = 1; 2885 2886 do { 2887 /* Allocate and map receive buffer */ 2888 if (mad) { 2889 mad_priv = mad; 2890 mad = NULL; 2891 } else { 2892 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), 2893 GFP_ATOMIC); 2894 if (!mad_priv) { 2895 ret = -ENOMEM; 2896 break; 2897 } 2898 } 2899 sg_list.length = mad_priv_dma_size(mad_priv); 2900 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, 2901 &mad_priv->grh, 2902 mad_priv_dma_size(mad_priv), 2903 DMA_FROM_DEVICE); 2904 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2905 sg_list.addr))) { 2906 ret = -ENOMEM; 2907 break; 2908 } 2909 mad_priv->header.mapping = sg_list.addr; 2910 mad_priv->header.mad_list.mad_queue = recv_queue; 2911 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; 2912 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; 2913 2914 /* Post receive WR */ 2915 spin_lock_irqsave(&recv_queue->lock, flags); 2916 post = (++recv_queue->count < recv_queue->max_active); 2917 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); 2918 spin_unlock_irqrestore(&recv_queue->lock, flags); 2919 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); 2920 if (ret) { 2921 spin_lock_irqsave(&recv_queue->lock, flags); 2922 list_del(&mad_priv->header.mad_list.list); 2923 recv_queue->count--; 2924 spin_unlock_irqrestore(&recv_queue->lock, flags); 2925 ib_dma_unmap_single(qp_info->port_priv->device, 2926 mad_priv->header.mapping, 2927 mad_priv_dma_size(mad_priv), 2928 DMA_FROM_DEVICE); 2929 kfree(mad_priv); 2930 dev_err(&qp_info->port_priv->device->dev, 2931 "ib_post_recv failed: %d\n", ret); 2932 break; 2933 } 2934 } while (post); 2935 2936 return ret; 2937 } 2938 2939 /* 2940 * Return all the posted receive MADs 2941 */ 2942 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) 2943 { 2944 struct ib_mad_private_header *mad_priv_hdr; 2945 struct ib_mad_private *recv; 2946 struct ib_mad_list_head *mad_list; 2947 2948 if (!qp_info->qp) 2949 return; 2950 2951 while (!list_empty(&qp_info->recv_queue.list)) { 2952 2953 mad_list = list_entry(qp_info->recv_queue.list.next, 2954 struct ib_mad_list_head, list); 2955 mad_priv_hdr = container_of(mad_list, 2956 struct ib_mad_private_header, 2957 mad_list); 2958 recv = container_of(mad_priv_hdr, struct ib_mad_private, 2959 header); 2960 2961 /* Remove from posted receive MAD list */ 2962 list_del(&mad_list->list); 2963 2964 ib_dma_unmap_single(qp_info->port_priv->device, 2965 recv->header.mapping, 2966 mad_priv_dma_size(recv), 2967 DMA_FROM_DEVICE); 2968 kfree(recv); 2969 } 2970 2971 qp_info->recv_queue.count = 0; 2972 } 2973 2974 /* 2975 * Start the port 2976 */ 2977 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) 2978 { 2979 int ret, i; 2980 struct ib_qp_attr *attr; 2981 struct ib_qp *qp; 2982 u16 pkey_index; 2983 2984 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2985 if (!attr) 2986 return -ENOMEM; 2987 2988 ret = ib_find_pkey(port_priv->device, port_priv->port_num, 2989 IB_DEFAULT_PKEY_FULL, &pkey_index); 2990 if (ret) 2991 pkey_index = 0; 2992 2993 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2994 qp = port_priv->qp_info[i].qp; 2995 if (!qp) 2996 continue; 2997 2998 /* 2999 * PKey index for QP1 is irrelevant but 3000 * one is needed for the Reset to Init transition 3001 */ 3002 attr->qp_state = IB_QPS_INIT; 3003 attr->pkey_index = pkey_index; 3004 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 3005 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 3006 IB_QP_PKEY_INDEX | IB_QP_QKEY); 3007 if (ret) { 3008 dev_err(&port_priv->device->dev, 3009 "Couldn't change QP%d state to INIT: %d\n", 3010 i, ret); 3011 goto out; 3012 } 3013 3014 attr->qp_state = IB_QPS_RTR; 3015 ret = ib_modify_qp(qp, attr, IB_QP_STATE); 3016 if (ret) { 3017 dev_err(&port_priv->device->dev, 3018 "Couldn't change QP%d state to RTR: %d\n", 3019 i, ret); 3020 goto out; 3021 } 3022 3023 attr->qp_state = IB_QPS_RTS; 3024 attr->sq_psn = IB_MAD_SEND_Q_PSN; 3025 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); 3026 if (ret) { 3027 dev_err(&port_priv->device->dev, 3028 "Couldn't change QP%d state to RTS: %d\n", 3029 i, ret); 3030 goto out; 3031 } 3032 } 3033 3034 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 3035 if (ret) { 3036 dev_err(&port_priv->device->dev, 3037 "Failed to request completion notification: %d\n", 3038 ret); 3039 goto out; 3040 } 3041 3042 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 3043 if (!port_priv->qp_info[i].qp) 3044 continue; 3045 3046 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); 3047 if (ret) { 3048 dev_err(&port_priv->device->dev, 3049 "Couldn't post receive WRs\n"); 3050 goto out; 3051 } 3052 } 3053 out: 3054 kfree(attr); 3055 return ret; 3056 } 3057 3058 static void qp_event_handler(struct ib_event *event, void *qp_context) 3059 { 3060 struct ib_mad_qp_info *qp_info = qp_context; 3061 3062 /* It's worse than that! He's dead, Jim! */ 3063 dev_err(&qp_info->port_priv->device->dev, 3064 "Fatal error (%d) on MAD QP (%d)\n", 3065 event->event, qp_info->qp->qp_num); 3066 } 3067 3068 static void init_mad_queue(struct ib_mad_qp_info *qp_info, 3069 struct ib_mad_queue *mad_queue) 3070 { 3071 mad_queue->qp_info = qp_info; 3072 mad_queue->count = 0; 3073 spin_lock_init(&mad_queue->lock); 3074 INIT_LIST_HEAD(&mad_queue->list); 3075 } 3076 3077 static void init_mad_qp(struct ib_mad_port_private *port_priv, 3078 struct ib_mad_qp_info *qp_info) 3079 { 3080 qp_info->port_priv = port_priv; 3081 init_mad_queue(qp_info, &qp_info->send_queue); 3082 init_mad_queue(qp_info, &qp_info->recv_queue); 3083 INIT_LIST_HEAD(&qp_info->overflow_list); 3084 spin_lock_init(&qp_info->snoop_lock); 3085 qp_info->snoop_table = NULL; 3086 qp_info->snoop_table_size = 0; 3087 atomic_set(&qp_info->snoop_count, 0); 3088 } 3089 3090 static int create_mad_qp(struct ib_mad_qp_info *qp_info, 3091 enum ib_qp_type qp_type) 3092 { 3093 struct ib_qp_init_attr qp_init_attr; 3094 int ret; 3095 3096 memset(&qp_init_attr, 0, sizeof qp_init_attr); 3097 qp_init_attr.send_cq = qp_info->port_priv->cq; 3098 qp_init_attr.recv_cq = qp_info->port_priv->cq; 3099 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 3100 qp_init_attr.cap.max_send_wr = mad_sendq_size; 3101 qp_init_attr.cap.max_recv_wr = mad_recvq_size; 3102 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 3103 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 3104 qp_init_attr.qp_type = qp_type; 3105 qp_init_attr.port_num = qp_info->port_priv->port_num; 3106 qp_init_attr.qp_context = qp_info; 3107 qp_init_attr.event_handler = qp_event_handler; 3108 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); 3109 if (IS_ERR(qp_info->qp)) { 3110 dev_err(&qp_info->port_priv->device->dev, 3111 "Couldn't create ib_mad QP%d\n", 3112 get_spl_qp_index(qp_type)); 3113 ret = PTR_ERR(qp_info->qp); 3114 goto error; 3115 } 3116 /* Use minimum queue sizes unless the CQ is resized */ 3117 qp_info->send_queue.max_active = mad_sendq_size; 3118 qp_info->recv_queue.max_active = mad_recvq_size; 3119 return 0; 3120 3121 error: 3122 return ret; 3123 } 3124 3125 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) 3126 { 3127 if (!qp_info->qp) 3128 return; 3129 3130 ib_destroy_qp(qp_info->qp); 3131 kfree(qp_info->snoop_table); 3132 } 3133 3134 /* 3135 * Open the port 3136 * Create the QP, PD, MR, and CQ if needed 3137 */ 3138 static int ib_mad_port_open(struct ib_device *device, 3139 int port_num) 3140 { 3141 int ret, cq_size; 3142 struct ib_mad_port_private *port_priv; 3143 unsigned long flags; 3144 char name[sizeof "ib_mad123"]; 3145 int has_smi; 3146 3147 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) 3148 return -EFAULT; 3149 3150 if (WARN_ON(rdma_cap_opa_mad(device, port_num) && 3151 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) 3152 return -EFAULT; 3153 3154 /* Create new device info */ 3155 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 3156 if (!port_priv) 3157 return -ENOMEM; 3158 3159 port_priv->device = device; 3160 port_priv->port_num = port_num; 3161 spin_lock_init(&port_priv->reg_lock); 3162 INIT_LIST_HEAD(&port_priv->agent_list); 3163 init_mad_qp(port_priv, &port_priv->qp_info[0]); 3164 init_mad_qp(port_priv, &port_priv->qp_info[1]); 3165 3166 cq_size = mad_sendq_size + mad_recvq_size; 3167 has_smi = rdma_cap_ib_smi(device, port_num); 3168 if (has_smi) 3169 cq_size *= 2; 3170 3171 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, 3172 IB_POLL_WORKQUEUE); 3173 if (IS_ERR(port_priv->cq)) { 3174 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); 3175 ret = PTR_ERR(port_priv->cq); 3176 goto error3; 3177 } 3178 3179 port_priv->pd = ib_alloc_pd(device, 0); 3180 if (IS_ERR(port_priv->pd)) { 3181 dev_err(&device->dev, "Couldn't create ib_mad PD\n"); 3182 ret = PTR_ERR(port_priv->pd); 3183 goto error4; 3184 } 3185 3186 if (has_smi) { 3187 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); 3188 if (ret) 3189 goto error6; 3190 } 3191 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); 3192 if (ret) 3193 goto error7; 3194 3195 snprintf(name, sizeof name, "ib_mad%d", port_num); 3196 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 3197 if (!port_priv->wq) { 3198 ret = -ENOMEM; 3199 goto error8; 3200 } 3201 3202 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3203 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 3204 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3205 3206 ret = ib_mad_port_start(port_priv); 3207 if (ret) { 3208 dev_err(&device->dev, "Couldn't start port\n"); 3209 goto error9; 3210 } 3211 3212 return 0; 3213 3214 error9: 3215 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3216 list_del_init(&port_priv->port_list); 3217 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3218 3219 destroy_workqueue(port_priv->wq); 3220 error8: 3221 destroy_mad_qp(&port_priv->qp_info[1]); 3222 error7: 3223 destroy_mad_qp(&port_priv->qp_info[0]); 3224 error6: 3225 ib_dealloc_pd(port_priv->pd); 3226 error4: 3227 ib_free_cq(port_priv->cq); 3228 cleanup_recv_queue(&port_priv->qp_info[1]); 3229 cleanup_recv_queue(&port_priv->qp_info[0]); 3230 error3: 3231 kfree(port_priv); 3232 3233 return ret; 3234 } 3235 3236 /* 3237 * Close the port 3238 * If there are no classes using the port, free the port 3239 * resources (CQ, MR, PD, QP) and remove the port's info structure 3240 */ 3241 static int ib_mad_port_close(struct ib_device *device, int port_num) 3242 { 3243 struct ib_mad_port_private *port_priv; 3244 unsigned long flags; 3245 3246 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3247 port_priv = __ib_get_mad_port(device, port_num); 3248 if (port_priv == NULL) { 3249 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3250 dev_err(&device->dev, "Port %d not found\n", port_num); 3251 return -ENODEV; 3252 } 3253 list_del_init(&port_priv->port_list); 3254 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3255 3256 destroy_workqueue(port_priv->wq); 3257 destroy_mad_qp(&port_priv->qp_info[1]); 3258 destroy_mad_qp(&port_priv->qp_info[0]); 3259 ib_dealloc_pd(port_priv->pd); 3260 ib_free_cq(port_priv->cq); 3261 cleanup_recv_queue(&port_priv->qp_info[1]); 3262 cleanup_recv_queue(&port_priv->qp_info[0]); 3263 /* XXX: Handle deallocation of MAD registration tables */ 3264 3265 kfree(port_priv); 3266 3267 return 0; 3268 } 3269 3270 static void ib_mad_init_device(struct ib_device *device) 3271 { 3272 int start, i; 3273 3274 start = rdma_start_port(device); 3275 3276 for (i = start; i <= rdma_end_port(device); i++) { 3277 if (!rdma_cap_ib_mad(device, i)) 3278 continue; 3279 3280 if (ib_mad_port_open(device, i)) { 3281 dev_err(&device->dev, "Couldn't open port %d\n", i); 3282 goto error; 3283 } 3284 if (ib_agent_port_open(device, i)) { 3285 dev_err(&device->dev, 3286 "Couldn't open port %d for agents\n", i); 3287 goto error_agent; 3288 } 3289 } 3290 return; 3291 3292 error_agent: 3293 if (ib_mad_port_close(device, i)) 3294 dev_err(&device->dev, "Couldn't close port %d\n", i); 3295 3296 error: 3297 while (--i >= start) { 3298 if (!rdma_cap_ib_mad(device, i)) 3299 continue; 3300 3301 if (ib_agent_port_close(device, i)) 3302 dev_err(&device->dev, 3303 "Couldn't close port %d for agents\n", i); 3304 if (ib_mad_port_close(device, i)) 3305 dev_err(&device->dev, "Couldn't close port %d\n", i); 3306 } 3307 } 3308 3309 static void ib_mad_remove_device(struct ib_device *device, void *client_data) 3310 { 3311 int i; 3312 3313 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 3314 if (!rdma_cap_ib_mad(device, i)) 3315 continue; 3316 3317 if (ib_agent_port_close(device, i)) 3318 dev_err(&device->dev, 3319 "Couldn't close port %d for agents\n", i); 3320 if (ib_mad_port_close(device, i)) 3321 dev_err(&device->dev, "Couldn't close port %d\n", i); 3322 } 3323 } 3324 3325 static struct ib_client mad_client = { 3326 .name = "mad", 3327 .add = ib_mad_init_device, 3328 .remove = ib_mad_remove_device 3329 }; 3330 3331 int ib_mad_init(void) 3332 { 3333 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); 3334 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); 3335 3336 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); 3337 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); 3338 3339 INIT_LIST_HEAD(&ib_mad_port_list); 3340 3341 if (ib_register_client(&mad_client)) { 3342 pr_err("Couldn't register ib_mad client\n"); 3343 return -EINVAL; 3344 } 3345 3346 return 0; 3347 } 3348 3349 void ib_mad_cleanup(void) 3350 { 3351 ib_unregister_client(&mad_client); 3352 } 3353