1 /* 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2009 HNR Consulting. All rights reserved. 6 * Copyright (c) 2014 Intel Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/module.h> 43 #include <linux/security.h> 44 #include <rdma/ib_cache.h> 45 46 #include "mad_priv.h" 47 #include "core_priv.h" 48 #include "mad_rmpp.h" 49 #include "smi.h" 50 #include "opa_smi.h" 51 #include "agent.h" 52 #include "core_priv.h" 53 54 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 55 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 56 57 module_param_named(send_queue_size, mad_sendq_size, int, 0444); 58 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); 59 module_param_named(recv_queue_size, mad_recvq_size, int, 0444); 60 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); 61 62 static struct list_head ib_mad_port_list; 63 static u32 ib_mad_client_id = 0; 64 65 /* Port list lock */ 66 static DEFINE_SPINLOCK(ib_mad_port_list_lock); 67 68 /* Forward declarations */ 69 static int method_in_use(struct ib_mad_mgmt_method_table **method, 70 struct ib_mad_reg_req *mad_reg_req); 71 static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 72 static struct ib_mad_agent_private *find_mad_agent( 73 struct ib_mad_port_private *port_priv, 74 const struct ib_mad_hdr *mad); 75 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 76 struct ib_mad_private *mad); 77 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 78 static void timeout_sends(struct work_struct *work); 79 static void local_completions(struct work_struct *work); 80 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 81 struct ib_mad_agent_private *agent_priv, 82 u8 mgmt_class); 83 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 84 struct ib_mad_agent_private *agent_priv); 85 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 86 struct ib_wc *wc); 87 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); 88 89 /* 90 * Returns a ib_mad_port_private structure or NULL for a device/port 91 * Assumes ib_mad_port_list_lock is being held 92 */ 93 static inline struct ib_mad_port_private * 94 __ib_get_mad_port(struct ib_device *device, int port_num) 95 { 96 struct ib_mad_port_private *entry; 97 98 list_for_each_entry(entry, &ib_mad_port_list, port_list) { 99 if (entry->device == device && entry->port_num == port_num) 100 return entry; 101 } 102 return NULL; 103 } 104 105 /* 106 * Wrapper function to return a ib_mad_port_private structure or NULL 107 * for a device/port 108 */ 109 static inline struct ib_mad_port_private * 110 ib_get_mad_port(struct ib_device *device, int port_num) 111 { 112 struct ib_mad_port_private *entry; 113 unsigned long flags; 114 115 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 116 entry = __ib_get_mad_port(device, port_num); 117 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 118 119 return entry; 120 } 121 122 static inline u8 convert_mgmt_class(u8 mgmt_class) 123 { 124 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ 125 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 126 0 : mgmt_class; 127 } 128 129 static int get_spl_qp_index(enum ib_qp_type qp_type) 130 { 131 switch (qp_type) 132 { 133 case IB_QPT_SMI: 134 return 0; 135 case IB_QPT_GSI: 136 return 1; 137 default: 138 return -1; 139 } 140 } 141 142 static int vendor_class_index(u8 mgmt_class) 143 { 144 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; 145 } 146 147 static int is_vendor_class(u8 mgmt_class) 148 { 149 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || 150 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) 151 return 0; 152 return 1; 153 } 154 155 static int is_vendor_oui(char *oui) 156 { 157 if (oui[0] || oui[1] || oui[2]) 158 return 1; 159 return 0; 160 } 161 162 static int is_vendor_method_in_use( 163 struct ib_mad_mgmt_vendor_class *vendor_class, 164 struct ib_mad_reg_req *mad_reg_req) 165 { 166 struct ib_mad_mgmt_method_table *method; 167 int i; 168 169 for (i = 0; i < MAX_MGMT_OUI; i++) { 170 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { 171 method = vendor_class->method_table[i]; 172 if (method) { 173 if (method_in_use(&method, mad_reg_req)) 174 return 1; 175 else 176 break; 177 } 178 } 179 } 180 return 0; 181 } 182 183 int ib_response_mad(const struct ib_mad_hdr *hdr) 184 { 185 return ((hdr->method & IB_MGMT_METHOD_RESP) || 186 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || 187 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && 188 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); 189 } 190 EXPORT_SYMBOL(ib_response_mad); 191 192 /* 193 * ib_register_mad_agent - Register to send/receive MADs 194 */ 195 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 196 u8 port_num, 197 enum ib_qp_type qp_type, 198 struct ib_mad_reg_req *mad_reg_req, 199 u8 rmpp_version, 200 ib_mad_send_handler send_handler, 201 ib_mad_recv_handler recv_handler, 202 void *context, 203 u32 registration_flags) 204 { 205 struct ib_mad_port_private *port_priv; 206 struct ib_mad_agent *ret = ERR_PTR(-EINVAL); 207 struct ib_mad_agent_private *mad_agent_priv; 208 struct ib_mad_reg_req *reg_req = NULL; 209 struct ib_mad_mgmt_class_table *class; 210 struct ib_mad_mgmt_vendor_class_table *vendor; 211 struct ib_mad_mgmt_vendor_class *vendor_class; 212 struct ib_mad_mgmt_method_table *method; 213 int ret2, qpn; 214 unsigned long flags; 215 u8 mgmt_class, vclass; 216 217 /* Validate parameters */ 218 qpn = get_spl_qp_index(qp_type); 219 if (qpn == -1) { 220 dev_notice(&device->dev, 221 "ib_register_mad_agent: invalid QP Type %d\n", 222 qp_type); 223 goto error1; 224 } 225 226 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { 227 dev_notice(&device->dev, 228 "ib_register_mad_agent: invalid RMPP Version %u\n", 229 rmpp_version); 230 goto error1; 231 } 232 233 /* Validate MAD registration request if supplied */ 234 if (mad_reg_req) { 235 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { 236 dev_notice(&device->dev, 237 "ib_register_mad_agent: invalid Class Version %u\n", 238 mad_reg_req->mgmt_class_version); 239 goto error1; 240 } 241 if (!recv_handler) { 242 dev_notice(&device->dev, 243 "ib_register_mad_agent: no recv_handler\n"); 244 goto error1; 245 } 246 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { 247 /* 248 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only 249 * one in this range currently allowed 250 */ 251 if (mad_reg_req->mgmt_class != 252 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 253 dev_notice(&device->dev, 254 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", 255 mad_reg_req->mgmt_class); 256 goto error1; 257 } 258 } else if (mad_reg_req->mgmt_class == 0) { 259 /* 260 * Class 0 is reserved in IBA and is used for 261 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 262 */ 263 dev_notice(&device->dev, 264 "ib_register_mad_agent: Invalid Mgmt Class 0\n"); 265 goto error1; 266 } else if (is_vendor_class(mad_reg_req->mgmt_class)) { 267 /* 268 * If class is in "new" vendor range, 269 * ensure supplied OUI is not zero 270 */ 271 if (!is_vendor_oui(mad_reg_req->oui)) { 272 dev_notice(&device->dev, 273 "ib_register_mad_agent: No OUI specified for class 0x%x\n", 274 mad_reg_req->mgmt_class); 275 goto error1; 276 } 277 } 278 /* Make sure class supplied is consistent with RMPP */ 279 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 280 if (rmpp_version) { 281 dev_notice(&device->dev, 282 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", 283 mad_reg_req->mgmt_class); 284 goto error1; 285 } 286 } 287 288 /* Make sure class supplied is consistent with QP type */ 289 if (qp_type == IB_QPT_SMI) { 290 if ((mad_reg_req->mgmt_class != 291 IB_MGMT_CLASS_SUBN_LID_ROUTED) && 292 (mad_reg_req->mgmt_class != 293 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 294 dev_notice(&device->dev, 295 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", 296 mad_reg_req->mgmt_class); 297 goto error1; 298 } 299 } else { 300 if ((mad_reg_req->mgmt_class == 301 IB_MGMT_CLASS_SUBN_LID_ROUTED) || 302 (mad_reg_req->mgmt_class == 303 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 304 dev_notice(&device->dev, 305 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", 306 mad_reg_req->mgmt_class); 307 goto error1; 308 } 309 } 310 } else { 311 /* No registration request supplied */ 312 if (!send_handler) 313 goto error1; 314 if (registration_flags & IB_MAD_USER_RMPP) 315 goto error1; 316 } 317 318 /* Validate device and port */ 319 port_priv = ib_get_mad_port(device, port_num); 320 if (!port_priv) { 321 dev_notice(&device->dev, 322 "ib_register_mad_agent: Invalid port %d\n", 323 port_num); 324 ret = ERR_PTR(-ENODEV); 325 goto error1; 326 } 327 328 /* Verify the QP requested is supported. For example, Ethernet devices 329 * will not have QP0 */ 330 if (!port_priv->qp_info[qpn].qp) { 331 dev_notice(&device->dev, 332 "ib_register_mad_agent: QP %d not supported\n", qpn); 333 ret = ERR_PTR(-EPROTONOSUPPORT); 334 goto error1; 335 } 336 337 /* Allocate structures */ 338 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); 339 if (!mad_agent_priv) { 340 ret = ERR_PTR(-ENOMEM); 341 goto error1; 342 } 343 344 if (mad_reg_req) { 345 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); 346 if (!reg_req) { 347 ret = ERR_PTR(-ENOMEM); 348 goto error3; 349 } 350 } 351 352 /* Now, fill in the various structures */ 353 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 354 mad_agent_priv->reg_req = reg_req; 355 mad_agent_priv->agent.rmpp_version = rmpp_version; 356 mad_agent_priv->agent.device = device; 357 mad_agent_priv->agent.recv_handler = recv_handler; 358 mad_agent_priv->agent.send_handler = send_handler; 359 mad_agent_priv->agent.context = context; 360 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 361 mad_agent_priv->agent.port_num = port_num; 362 mad_agent_priv->agent.flags = registration_flags; 363 spin_lock_init(&mad_agent_priv->lock); 364 INIT_LIST_HEAD(&mad_agent_priv->send_list); 365 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 366 INIT_LIST_HEAD(&mad_agent_priv->done_list); 367 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 368 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); 369 INIT_LIST_HEAD(&mad_agent_priv->local_list); 370 INIT_WORK(&mad_agent_priv->local_work, local_completions); 371 atomic_set(&mad_agent_priv->refcount, 1); 372 init_completion(&mad_agent_priv->comp); 373 374 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); 375 if (ret2) { 376 ret = ERR_PTR(ret2); 377 goto error4; 378 } 379 380 spin_lock_irqsave(&port_priv->reg_lock, flags); 381 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; 382 383 /* 384 * Make sure MAD registration (if supplied) 385 * is non overlapping with any existing ones 386 */ 387 if (mad_reg_req) { 388 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); 389 if (!is_vendor_class(mgmt_class)) { 390 class = port_priv->version[mad_reg_req-> 391 mgmt_class_version].class; 392 if (class) { 393 method = class->method_table[mgmt_class]; 394 if (method) { 395 if (method_in_use(&method, 396 mad_reg_req)) 397 goto error5; 398 } 399 } 400 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, 401 mgmt_class); 402 } else { 403 /* "New" vendor class range */ 404 vendor = port_priv->version[mad_reg_req-> 405 mgmt_class_version].vendor; 406 if (vendor) { 407 vclass = vendor_class_index(mgmt_class); 408 vendor_class = vendor->vendor_class[vclass]; 409 if (vendor_class) { 410 if (is_vendor_method_in_use( 411 vendor_class, 412 mad_reg_req)) 413 goto error5; 414 } 415 } 416 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 417 } 418 if (ret2) { 419 ret = ERR_PTR(ret2); 420 goto error5; 421 } 422 } 423 424 /* Add mad agent into port's agent list */ 425 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 426 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 427 428 return &mad_agent_priv->agent; 429 error5: 430 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 431 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 432 error4: 433 kfree(reg_req); 434 error3: 435 kfree(mad_agent_priv); 436 error1: 437 return ret; 438 } 439 EXPORT_SYMBOL(ib_register_mad_agent); 440 441 static inline int is_snooping_sends(int mad_snoop_flags) 442 { 443 return (mad_snoop_flags & 444 (/*IB_MAD_SNOOP_POSTED_SENDS | 445 IB_MAD_SNOOP_RMPP_SENDS |*/ 446 IB_MAD_SNOOP_SEND_COMPLETIONS /*| 447 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); 448 } 449 450 static inline int is_snooping_recvs(int mad_snoop_flags) 451 { 452 return (mad_snoop_flags & 453 (IB_MAD_SNOOP_RECVS /*| 454 IB_MAD_SNOOP_RMPP_RECVS*/)); 455 } 456 457 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, 458 struct ib_mad_snoop_private *mad_snoop_priv) 459 { 460 struct ib_mad_snoop_private **new_snoop_table; 461 unsigned long flags; 462 int i; 463 464 spin_lock_irqsave(&qp_info->snoop_lock, flags); 465 /* Check for empty slot in array. */ 466 for (i = 0; i < qp_info->snoop_table_size; i++) 467 if (!qp_info->snoop_table[i]) 468 break; 469 470 if (i == qp_info->snoop_table_size) { 471 /* Grow table. */ 472 new_snoop_table = krealloc(qp_info->snoop_table, 473 sizeof mad_snoop_priv * 474 (qp_info->snoop_table_size + 1), 475 GFP_ATOMIC); 476 if (!new_snoop_table) { 477 i = -ENOMEM; 478 goto out; 479 } 480 481 qp_info->snoop_table = new_snoop_table; 482 qp_info->snoop_table_size++; 483 } 484 qp_info->snoop_table[i] = mad_snoop_priv; 485 atomic_inc(&qp_info->snoop_count); 486 out: 487 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 488 return i; 489 } 490 491 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, 492 u8 port_num, 493 enum ib_qp_type qp_type, 494 int mad_snoop_flags, 495 ib_mad_snoop_handler snoop_handler, 496 ib_mad_recv_handler recv_handler, 497 void *context) 498 { 499 struct ib_mad_port_private *port_priv; 500 struct ib_mad_agent *ret; 501 struct ib_mad_snoop_private *mad_snoop_priv; 502 int qpn; 503 int err; 504 505 /* Validate parameters */ 506 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || 507 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { 508 ret = ERR_PTR(-EINVAL); 509 goto error1; 510 } 511 qpn = get_spl_qp_index(qp_type); 512 if (qpn == -1) { 513 ret = ERR_PTR(-EINVAL); 514 goto error1; 515 } 516 port_priv = ib_get_mad_port(device, port_num); 517 if (!port_priv) { 518 ret = ERR_PTR(-ENODEV); 519 goto error1; 520 } 521 /* Allocate structures */ 522 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 523 if (!mad_snoop_priv) { 524 ret = ERR_PTR(-ENOMEM); 525 goto error1; 526 } 527 528 /* Now, fill in the various structures */ 529 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 530 mad_snoop_priv->agent.device = device; 531 mad_snoop_priv->agent.recv_handler = recv_handler; 532 mad_snoop_priv->agent.snoop_handler = snoop_handler; 533 mad_snoop_priv->agent.context = context; 534 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 535 mad_snoop_priv->agent.port_num = port_num; 536 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 537 init_completion(&mad_snoop_priv->comp); 538 539 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); 540 if (err) { 541 ret = ERR_PTR(err); 542 goto error2; 543 } 544 545 mad_snoop_priv->snoop_index = register_snoop_agent( 546 &port_priv->qp_info[qpn], 547 mad_snoop_priv); 548 if (mad_snoop_priv->snoop_index < 0) { 549 ret = ERR_PTR(mad_snoop_priv->snoop_index); 550 goto error3; 551 } 552 553 atomic_set(&mad_snoop_priv->refcount, 1); 554 return &mad_snoop_priv->agent; 555 error3: 556 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); 557 error2: 558 kfree(mad_snoop_priv); 559 error1: 560 return ret; 561 } 562 EXPORT_SYMBOL(ib_register_mad_snoop); 563 564 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 565 { 566 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 567 complete(&mad_agent_priv->comp); 568 } 569 570 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) 571 { 572 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 573 complete(&mad_snoop_priv->comp); 574 } 575 576 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 577 { 578 struct ib_mad_port_private *port_priv; 579 unsigned long flags; 580 581 /* Note that we could still be handling received MADs */ 582 583 /* 584 * Canceling all sends results in dropping received response 585 * MADs, preventing us from queuing additional work 586 */ 587 cancel_mads(mad_agent_priv); 588 port_priv = mad_agent_priv->qp_info->port_priv; 589 cancel_delayed_work(&mad_agent_priv->timed_work); 590 591 spin_lock_irqsave(&port_priv->reg_lock, flags); 592 remove_mad_reg_req(mad_agent_priv); 593 list_del(&mad_agent_priv->agent_list); 594 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 595 596 flush_workqueue(port_priv->wq); 597 ib_cancel_rmpp_recvs(mad_agent_priv); 598 599 deref_mad_agent(mad_agent_priv); 600 wait_for_completion(&mad_agent_priv->comp); 601 602 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 603 604 kfree(mad_agent_priv->reg_req); 605 kfree(mad_agent_priv); 606 } 607 608 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) 609 { 610 struct ib_mad_qp_info *qp_info; 611 unsigned long flags; 612 613 qp_info = mad_snoop_priv->qp_info; 614 spin_lock_irqsave(&qp_info->snoop_lock, flags); 615 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; 616 atomic_dec(&qp_info->snoop_count); 617 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 618 619 deref_snoop_agent(mad_snoop_priv); 620 wait_for_completion(&mad_snoop_priv->comp); 621 622 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); 623 624 kfree(mad_snoop_priv); 625 } 626 627 /* 628 * ib_unregister_mad_agent - Unregisters a client from using MAD services 629 */ 630 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) 631 { 632 struct ib_mad_agent_private *mad_agent_priv; 633 struct ib_mad_snoop_private *mad_snoop_priv; 634 635 /* If the TID is zero, the agent can only snoop. */ 636 if (mad_agent->hi_tid) { 637 mad_agent_priv = container_of(mad_agent, 638 struct ib_mad_agent_private, 639 agent); 640 unregister_mad_agent(mad_agent_priv); 641 } else { 642 mad_snoop_priv = container_of(mad_agent, 643 struct ib_mad_snoop_private, 644 agent); 645 unregister_mad_snoop(mad_snoop_priv); 646 } 647 } 648 EXPORT_SYMBOL(ib_unregister_mad_agent); 649 650 static void dequeue_mad(struct ib_mad_list_head *mad_list) 651 { 652 struct ib_mad_queue *mad_queue; 653 unsigned long flags; 654 655 BUG_ON(!mad_list->mad_queue); 656 mad_queue = mad_list->mad_queue; 657 spin_lock_irqsave(&mad_queue->lock, flags); 658 list_del(&mad_list->list); 659 mad_queue->count--; 660 spin_unlock_irqrestore(&mad_queue->lock, flags); 661 } 662 663 static void snoop_send(struct ib_mad_qp_info *qp_info, 664 struct ib_mad_send_buf *send_buf, 665 struct ib_mad_send_wc *mad_send_wc, 666 int mad_snoop_flags) 667 { 668 struct ib_mad_snoop_private *mad_snoop_priv; 669 unsigned long flags; 670 int i; 671 672 spin_lock_irqsave(&qp_info->snoop_lock, flags); 673 for (i = 0; i < qp_info->snoop_table_size; i++) { 674 mad_snoop_priv = qp_info->snoop_table[i]; 675 if (!mad_snoop_priv || 676 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 677 continue; 678 679 atomic_inc(&mad_snoop_priv->refcount); 680 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 681 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 682 send_buf, mad_send_wc); 683 deref_snoop_agent(mad_snoop_priv); 684 spin_lock_irqsave(&qp_info->snoop_lock, flags); 685 } 686 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 687 } 688 689 static void snoop_recv(struct ib_mad_qp_info *qp_info, 690 struct ib_mad_recv_wc *mad_recv_wc, 691 int mad_snoop_flags) 692 { 693 struct ib_mad_snoop_private *mad_snoop_priv; 694 unsigned long flags; 695 int i; 696 697 spin_lock_irqsave(&qp_info->snoop_lock, flags); 698 for (i = 0; i < qp_info->snoop_table_size; i++) { 699 mad_snoop_priv = qp_info->snoop_table[i]; 700 if (!mad_snoop_priv || 701 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 702 continue; 703 704 atomic_inc(&mad_snoop_priv->refcount); 705 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 706 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, 707 mad_recv_wc); 708 deref_snoop_agent(mad_snoop_priv); 709 spin_lock_irqsave(&qp_info->snoop_lock, flags); 710 } 711 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 712 } 713 714 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, 715 u16 pkey_index, u8 port_num, struct ib_wc *wc) 716 { 717 memset(wc, 0, sizeof *wc); 718 wc->wr_cqe = cqe; 719 wc->status = IB_WC_SUCCESS; 720 wc->opcode = IB_WC_RECV; 721 wc->pkey_index = pkey_index; 722 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 723 wc->src_qp = IB_QP0; 724 wc->qp = qp; 725 wc->slid = slid; 726 wc->sl = 0; 727 wc->dlid_path_bits = 0; 728 wc->port_num = port_num; 729 } 730 731 static size_t mad_priv_size(const struct ib_mad_private *mp) 732 { 733 return sizeof(struct ib_mad_private) + mp->mad_size; 734 } 735 736 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) 737 { 738 size_t size = sizeof(struct ib_mad_private) + mad_size; 739 struct ib_mad_private *ret = kzalloc(size, flags); 740 741 if (ret) 742 ret->mad_size = mad_size; 743 744 return ret; 745 } 746 747 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) 748 { 749 return rdma_max_mad_size(port_priv->device, port_priv->port_num); 750 } 751 752 static size_t mad_priv_dma_size(const struct ib_mad_private *mp) 753 { 754 return sizeof(struct ib_grh) + mp->mad_size; 755 } 756 757 /* 758 * Return 0 if SMP is to be sent 759 * Return 1 if SMP was consumed locally (whether or not solicited) 760 * Return < 0 if error 761 */ 762 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 763 struct ib_mad_send_wr_private *mad_send_wr) 764 { 765 int ret = 0; 766 struct ib_smp *smp = mad_send_wr->send_buf.mad; 767 struct opa_smp *opa_smp = (struct opa_smp *)smp; 768 unsigned long flags; 769 struct ib_mad_local_private *local; 770 struct ib_mad_private *mad_priv; 771 struct ib_mad_port_private *port_priv; 772 struct ib_mad_agent_private *recv_mad_agent = NULL; 773 struct ib_device *device = mad_agent_priv->agent.device; 774 u8 port_num; 775 struct ib_wc mad_wc; 776 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; 777 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); 778 u16 out_mad_pkey_index = 0; 779 u16 drslid; 780 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 781 mad_agent_priv->qp_info->port_priv->port_num); 782 783 if (rdma_cap_ib_switch(device) && 784 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 785 port_num = send_wr->port_num; 786 else 787 port_num = mad_agent_priv->agent.port_num; 788 789 /* 790 * Directed route handling starts if the initial LID routed part of 791 * a request or the ending LID routed part of a response is empty. 792 * If we are at the start of the LID routed part, don't update the 793 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. 794 */ 795 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { 796 u32 opa_drslid; 797 798 if ((opa_get_smp_direction(opa_smp) 799 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 800 OPA_LID_PERMISSIVE && 801 opa_smi_handle_dr_smp_send(opa_smp, 802 rdma_cap_ib_switch(device), 803 port_num) == IB_SMI_DISCARD) { 804 ret = -EINVAL; 805 dev_err(&device->dev, "OPA Invalid directed route\n"); 806 goto out; 807 } 808 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 809 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && 810 opa_drslid & 0xffff0000) { 811 ret = -EINVAL; 812 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 813 opa_drslid); 814 goto out; 815 } 816 drslid = (u16)(opa_drslid & 0x0000ffff); 817 818 /* Check to post send on QP or process locally */ 819 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && 820 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) 821 goto out; 822 } else { 823 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 824 IB_LID_PERMISSIVE && 825 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == 826 IB_SMI_DISCARD) { 827 ret = -EINVAL; 828 dev_err(&device->dev, "Invalid directed route\n"); 829 goto out; 830 } 831 drslid = be16_to_cpu(smp->dr_slid); 832 833 /* Check to post send on QP or process locally */ 834 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && 835 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) 836 goto out; 837 } 838 839 local = kmalloc(sizeof *local, GFP_ATOMIC); 840 if (!local) { 841 ret = -ENOMEM; 842 goto out; 843 } 844 local->mad_priv = NULL; 845 local->recv_mad_agent = NULL; 846 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); 847 if (!mad_priv) { 848 ret = -ENOMEM; 849 kfree(local); 850 goto out; 851 } 852 853 build_smp_wc(mad_agent_priv->agent.qp, 854 send_wr->wr.wr_cqe, drslid, 855 send_wr->pkey_index, 856 send_wr->port_num, &mad_wc); 857 858 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { 859 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len 860 + mad_send_wr->send_buf.data_len 861 + sizeof(struct ib_grh); 862 } 863 864 /* No GRH for DR SMP */ 865 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, 866 (const struct ib_mad_hdr *)smp, mad_size, 867 (struct ib_mad_hdr *)mad_priv->mad, 868 &mad_size, &out_mad_pkey_index); 869 switch (ret) 870 { 871 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 872 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && 873 mad_agent_priv->agent.recv_handler) { 874 local->mad_priv = mad_priv; 875 local->recv_mad_agent = mad_agent_priv; 876 /* 877 * Reference MAD agent until receive 878 * side of local completion handled 879 */ 880 atomic_inc(&mad_agent_priv->refcount); 881 } else 882 kfree(mad_priv); 883 break; 884 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 885 kfree(mad_priv); 886 break; 887 case IB_MAD_RESULT_SUCCESS: 888 /* Treat like an incoming receive MAD */ 889 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 890 mad_agent_priv->agent.port_num); 891 if (port_priv) { 892 memcpy(mad_priv->mad, smp, mad_priv->mad_size); 893 recv_mad_agent = find_mad_agent(port_priv, 894 (const struct ib_mad_hdr *)mad_priv->mad); 895 } 896 if (!port_priv || !recv_mad_agent) { 897 /* 898 * No receiving agent so drop packet and 899 * generate send completion. 900 */ 901 kfree(mad_priv); 902 break; 903 } 904 local->mad_priv = mad_priv; 905 local->recv_mad_agent = recv_mad_agent; 906 break; 907 default: 908 kfree(mad_priv); 909 kfree(local); 910 ret = -EINVAL; 911 goto out; 912 } 913 914 local->mad_send_wr = mad_send_wr; 915 if (opa) { 916 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; 917 local->return_wc_byte_len = mad_size; 918 } 919 /* Reference MAD agent until send side of local completion handled */ 920 atomic_inc(&mad_agent_priv->refcount); 921 /* Queue local completion to local list */ 922 spin_lock_irqsave(&mad_agent_priv->lock, flags); 923 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); 924 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 925 queue_work(mad_agent_priv->qp_info->port_priv->wq, 926 &mad_agent_priv->local_work); 927 ret = 1; 928 out: 929 return ret; 930 } 931 932 static int get_pad_size(int hdr_len, int data_len, size_t mad_size) 933 { 934 int seg_size, pad; 935 936 seg_size = mad_size - hdr_len; 937 if (data_len && seg_size) { 938 pad = seg_size - data_len % seg_size; 939 return pad == seg_size ? 0 : pad; 940 } else 941 return seg_size; 942 } 943 944 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) 945 { 946 struct ib_rmpp_segment *s, *t; 947 948 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { 949 list_del(&s->list); 950 kfree(s); 951 } 952 } 953 954 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, 955 size_t mad_size, gfp_t gfp_mask) 956 { 957 struct ib_mad_send_buf *send_buf = &send_wr->send_buf; 958 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; 959 struct ib_rmpp_segment *seg = NULL; 960 int left, seg_size, pad; 961 962 send_buf->seg_size = mad_size - send_buf->hdr_len; 963 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; 964 seg_size = send_buf->seg_size; 965 pad = send_wr->pad; 966 967 /* Allocate data segments. */ 968 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { 969 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); 970 if (!seg) { 971 free_send_rmpp_list(send_wr); 972 return -ENOMEM; 973 } 974 seg->num = ++send_buf->seg_count; 975 list_add_tail(&seg->list, &send_wr->rmpp_list); 976 } 977 978 /* Zero any padding */ 979 if (pad) 980 memset(seg->data + seg_size - pad, 0, pad); 981 982 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> 983 agent.rmpp_version; 984 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 985 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 986 987 send_wr->cur_seg = container_of(send_wr->rmpp_list.next, 988 struct ib_rmpp_segment, list); 989 send_wr->last_ack_seg = send_wr->cur_seg; 990 return 0; 991 } 992 993 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) 994 { 995 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); 996 } 997 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); 998 999 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 1000 u32 remote_qpn, u16 pkey_index, 1001 int rmpp_active, 1002 int hdr_len, int data_len, 1003 gfp_t gfp_mask, 1004 u8 base_version) 1005 { 1006 struct ib_mad_agent_private *mad_agent_priv; 1007 struct ib_mad_send_wr_private *mad_send_wr; 1008 int pad, message_size, ret, size; 1009 void *buf; 1010 size_t mad_size; 1011 bool opa; 1012 1013 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 1014 agent); 1015 1016 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); 1017 1018 if (opa && base_version == OPA_MGMT_BASE_VERSION) 1019 mad_size = sizeof(struct opa_mad); 1020 else 1021 mad_size = sizeof(struct ib_mad); 1022 1023 pad = get_pad_size(hdr_len, data_len, mad_size); 1024 message_size = hdr_len + data_len + pad; 1025 1026 if (ib_mad_kernel_rmpp_agent(mad_agent)) { 1027 if (!rmpp_active && message_size > mad_size) 1028 return ERR_PTR(-EINVAL); 1029 } else 1030 if (rmpp_active || message_size > mad_size) 1031 return ERR_PTR(-EINVAL); 1032 1033 size = rmpp_active ? hdr_len : mad_size; 1034 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); 1035 if (!buf) 1036 return ERR_PTR(-ENOMEM); 1037 1038 mad_send_wr = buf + size; 1039 INIT_LIST_HEAD(&mad_send_wr->rmpp_list); 1040 mad_send_wr->send_buf.mad = buf; 1041 mad_send_wr->send_buf.hdr_len = hdr_len; 1042 mad_send_wr->send_buf.data_len = data_len; 1043 mad_send_wr->pad = pad; 1044 1045 mad_send_wr->mad_agent_priv = mad_agent_priv; 1046 mad_send_wr->sg_list[0].length = hdr_len; 1047 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; 1048 1049 /* OPA MADs don't have to be the full 2048 bytes */ 1050 if (opa && base_version == OPA_MGMT_BASE_VERSION && 1051 data_len < mad_size - hdr_len) 1052 mad_send_wr->sg_list[1].length = data_len; 1053 else 1054 mad_send_wr->sg_list[1].length = mad_size - hdr_len; 1055 1056 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; 1057 1058 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1059 1060 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1061 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; 1062 mad_send_wr->send_wr.wr.num_sge = 2; 1063 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; 1064 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; 1065 mad_send_wr->send_wr.remote_qpn = remote_qpn; 1066 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; 1067 mad_send_wr->send_wr.pkey_index = pkey_index; 1068 1069 if (rmpp_active) { 1070 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); 1071 if (ret) { 1072 kfree(buf); 1073 return ERR_PTR(ret); 1074 } 1075 } 1076 1077 mad_send_wr->send_buf.mad_agent = mad_agent; 1078 atomic_inc(&mad_agent_priv->refcount); 1079 return &mad_send_wr->send_buf; 1080 } 1081 EXPORT_SYMBOL(ib_create_send_mad); 1082 1083 int ib_get_mad_data_offset(u8 mgmt_class) 1084 { 1085 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 1086 return IB_MGMT_SA_HDR; 1087 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1088 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1089 (mgmt_class == IB_MGMT_CLASS_BIS)) 1090 return IB_MGMT_DEVICE_HDR; 1091 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1092 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 1093 return IB_MGMT_VENDOR_HDR; 1094 else 1095 return IB_MGMT_MAD_HDR; 1096 } 1097 EXPORT_SYMBOL(ib_get_mad_data_offset); 1098 1099 int ib_is_mad_class_rmpp(u8 mgmt_class) 1100 { 1101 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 1102 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1103 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1104 (mgmt_class == IB_MGMT_CLASS_BIS) || 1105 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1106 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 1107 return 1; 1108 return 0; 1109 } 1110 EXPORT_SYMBOL(ib_is_mad_class_rmpp); 1111 1112 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 1113 { 1114 struct ib_mad_send_wr_private *mad_send_wr; 1115 struct list_head *list; 1116 1117 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1118 send_buf); 1119 list = &mad_send_wr->cur_seg->list; 1120 1121 if (mad_send_wr->cur_seg->num < seg_num) { 1122 list_for_each_entry(mad_send_wr->cur_seg, list, list) 1123 if (mad_send_wr->cur_seg->num == seg_num) 1124 break; 1125 } else if (mad_send_wr->cur_seg->num > seg_num) { 1126 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) 1127 if (mad_send_wr->cur_seg->num == seg_num) 1128 break; 1129 } 1130 return mad_send_wr->cur_seg->data; 1131 } 1132 EXPORT_SYMBOL(ib_get_rmpp_segment); 1133 1134 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) 1135 { 1136 if (mad_send_wr->send_buf.seg_count) 1137 return ib_get_rmpp_segment(&mad_send_wr->send_buf, 1138 mad_send_wr->seg_num); 1139 else 1140 return mad_send_wr->send_buf.mad + 1141 mad_send_wr->send_buf.hdr_len; 1142 } 1143 1144 void ib_free_send_mad(struct ib_mad_send_buf *send_buf) 1145 { 1146 struct ib_mad_agent_private *mad_agent_priv; 1147 struct ib_mad_send_wr_private *mad_send_wr; 1148 1149 mad_agent_priv = container_of(send_buf->mad_agent, 1150 struct ib_mad_agent_private, agent); 1151 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1152 send_buf); 1153 1154 free_send_rmpp_list(mad_send_wr); 1155 kfree(send_buf->mad); 1156 deref_mad_agent(mad_agent_priv); 1157 } 1158 EXPORT_SYMBOL(ib_free_send_mad); 1159 1160 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 1161 { 1162 struct ib_mad_qp_info *qp_info; 1163 struct list_head *list; 1164 struct ib_send_wr *bad_send_wr; 1165 struct ib_mad_agent *mad_agent; 1166 struct ib_sge *sge; 1167 unsigned long flags; 1168 int ret; 1169 1170 /* Set WR ID to find mad_send_wr upon completion */ 1171 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1172 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1173 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1174 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1175 1176 mad_agent = mad_send_wr->send_buf.mad_agent; 1177 sge = mad_send_wr->sg_list; 1178 sge[0].addr = ib_dma_map_single(mad_agent->device, 1179 mad_send_wr->send_buf.mad, 1180 sge[0].length, 1181 DMA_TO_DEVICE); 1182 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) 1183 return -ENOMEM; 1184 1185 mad_send_wr->header_mapping = sge[0].addr; 1186 1187 sge[1].addr = ib_dma_map_single(mad_agent->device, 1188 ib_get_payload(mad_send_wr), 1189 sge[1].length, 1190 DMA_TO_DEVICE); 1191 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { 1192 ib_dma_unmap_single(mad_agent->device, 1193 mad_send_wr->header_mapping, 1194 sge[0].length, DMA_TO_DEVICE); 1195 return -ENOMEM; 1196 } 1197 mad_send_wr->payload_mapping = sge[1].addr; 1198 1199 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1200 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1201 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, 1202 &bad_send_wr); 1203 list = &qp_info->send_queue.list; 1204 } else { 1205 ret = 0; 1206 list = &qp_info->overflow_list; 1207 } 1208 1209 if (!ret) { 1210 qp_info->send_queue.count++; 1211 list_add_tail(&mad_send_wr->mad_list.list, list); 1212 } 1213 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1214 if (ret) { 1215 ib_dma_unmap_single(mad_agent->device, 1216 mad_send_wr->header_mapping, 1217 sge[0].length, DMA_TO_DEVICE); 1218 ib_dma_unmap_single(mad_agent->device, 1219 mad_send_wr->payload_mapping, 1220 sge[1].length, DMA_TO_DEVICE); 1221 } 1222 return ret; 1223 } 1224 1225 /* 1226 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 1227 * with the registered client 1228 */ 1229 int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 1230 struct ib_mad_send_buf **bad_send_buf) 1231 { 1232 struct ib_mad_agent_private *mad_agent_priv; 1233 struct ib_mad_send_buf *next_send_buf; 1234 struct ib_mad_send_wr_private *mad_send_wr; 1235 unsigned long flags; 1236 int ret = -EINVAL; 1237 1238 /* Walk list of send WRs and post each on send list */ 1239 for (; send_buf; send_buf = next_send_buf) { 1240 mad_send_wr = container_of(send_buf, 1241 struct ib_mad_send_wr_private, 1242 send_buf); 1243 mad_agent_priv = mad_send_wr->mad_agent_priv; 1244 1245 ret = ib_mad_enforce_security(mad_agent_priv, 1246 mad_send_wr->send_wr.pkey_index); 1247 if (ret) 1248 goto error; 1249 1250 if (!send_buf->mad_agent->send_handler || 1251 (send_buf->timeout_ms && 1252 !send_buf->mad_agent->recv_handler)) { 1253 ret = -EINVAL; 1254 goto error; 1255 } 1256 1257 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1258 if (mad_agent_priv->agent.rmpp_version) { 1259 ret = -EINVAL; 1260 goto error; 1261 } 1262 } 1263 1264 /* 1265 * Save pointer to next work request to post in case the 1266 * current one completes, and the user modifies the work 1267 * request associated with the completion 1268 */ 1269 next_send_buf = send_buf->next; 1270 mad_send_wr->send_wr.ah = send_buf->ah; 1271 1272 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1273 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1274 ret = handle_outgoing_dr_smp(mad_agent_priv, 1275 mad_send_wr); 1276 if (ret < 0) /* error */ 1277 goto error; 1278 else if (ret == 1) /* locally consumed */ 1279 continue; 1280 } 1281 1282 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; 1283 /* Timeout will be updated after send completes */ 1284 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); 1285 mad_send_wr->max_retries = send_buf->retries; 1286 mad_send_wr->retries_left = send_buf->retries; 1287 send_buf->retries = 0; 1288 /* Reference for work request to QP + response */ 1289 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 1290 mad_send_wr->status = IB_WC_SUCCESS; 1291 1292 /* Reference MAD agent until send completes */ 1293 atomic_inc(&mad_agent_priv->refcount); 1294 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1295 list_add_tail(&mad_send_wr->agent_list, 1296 &mad_agent_priv->send_list); 1297 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1298 1299 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1300 ret = ib_send_rmpp_mad(mad_send_wr); 1301 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) 1302 ret = ib_send_mad(mad_send_wr); 1303 } else 1304 ret = ib_send_mad(mad_send_wr); 1305 if (ret < 0) { 1306 /* Fail send request */ 1307 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1308 list_del(&mad_send_wr->agent_list); 1309 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1310 atomic_dec(&mad_agent_priv->refcount); 1311 goto error; 1312 } 1313 } 1314 return 0; 1315 error: 1316 if (bad_send_buf) 1317 *bad_send_buf = send_buf; 1318 return ret; 1319 } 1320 EXPORT_SYMBOL(ib_post_send_mad); 1321 1322 /* 1323 * ib_free_recv_mad - Returns data buffers used to receive 1324 * a MAD to the access layer 1325 */ 1326 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1327 { 1328 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; 1329 struct ib_mad_private_header *mad_priv_hdr; 1330 struct ib_mad_private *priv; 1331 struct list_head free_list; 1332 1333 INIT_LIST_HEAD(&free_list); 1334 list_splice_init(&mad_recv_wc->rmpp_list, &free_list); 1335 1336 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, 1337 &free_list, list) { 1338 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, 1339 recv_buf); 1340 mad_priv_hdr = container_of(mad_recv_wc, 1341 struct ib_mad_private_header, 1342 recv_wc); 1343 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1344 header); 1345 kfree(priv); 1346 } 1347 } 1348 EXPORT_SYMBOL(ib_free_recv_mad); 1349 1350 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, 1351 u8 rmpp_version, 1352 ib_mad_send_handler send_handler, 1353 ib_mad_recv_handler recv_handler, 1354 void *context) 1355 { 1356 return ERR_PTR(-EINVAL); /* XXX: for now */ 1357 } 1358 EXPORT_SYMBOL(ib_redirect_mad_qp); 1359 1360 int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 1361 struct ib_wc *wc) 1362 { 1363 dev_err(&mad_agent->device->dev, 1364 "ib_process_mad_wc() not implemented yet\n"); 1365 return 0; 1366 } 1367 EXPORT_SYMBOL(ib_process_mad_wc); 1368 1369 static int method_in_use(struct ib_mad_mgmt_method_table **method, 1370 struct ib_mad_reg_req *mad_reg_req) 1371 { 1372 int i; 1373 1374 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { 1375 if ((*method)->agent[i]) { 1376 pr_err("Method %d already in use\n", i); 1377 return -EINVAL; 1378 } 1379 } 1380 return 0; 1381 } 1382 1383 static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1384 { 1385 /* Allocate management method table */ 1386 *method = kzalloc(sizeof **method, GFP_ATOMIC); 1387 return (*method) ? 0 : (-ENOMEM); 1388 } 1389 1390 /* 1391 * Check to see if there are any methods still in use 1392 */ 1393 static int check_method_table(struct ib_mad_mgmt_method_table *method) 1394 { 1395 int i; 1396 1397 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) 1398 if (method->agent[i]) 1399 return 1; 1400 return 0; 1401 } 1402 1403 /* 1404 * Check to see if there are any method tables for this class still in use 1405 */ 1406 static int check_class_table(struct ib_mad_mgmt_class_table *class) 1407 { 1408 int i; 1409 1410 for (i = 0; i < MAX_MGMT_CLASS; i++) 1411 if (class->method_table[i]) 1412 return 1; 1413 return 0; 1414 } 1415 1416 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) 1417 { 1418 int i; 1419 1420 for (i = 0; i < MAX_MGMT_OUI; i++) 1421 if (vendor_class->method_table[i]) 1422 return 1; 1423 return 0; 1424 } 1425 1426 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, 1427 const char *oui) 1428 { 1429 int i; 1430 1431 for (i = 0; i < MAX_MGMT_OUI; i++) 1432 /* Is there matching OUI for this vendor class ? */ 1433 if (!memcmp(vendor_class->oui[i], oui, 3)) 1434 return i; 1435 1436 return -1; 1437 } 1438 1439 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) 1440 { 1441 int i; 1442 1443 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) 1444 if (vendor->vendor_class[i]) 1445 return 1; 1446 1447 return 0; 1448 } 1449 1450 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, 1451 struct ib_mad_agent_private *agent) 1452 { 1453 int i; 1454 1455 /* Remove any methods for this mad agent */ 1456 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { 1457 if (method->agent[i] == agent) { 1458 method->agent[i] = NULL; 1459 } 1460 } 1461 } 1462 1463 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1464 struct ib_mad_agent_private *agent_priv, 1465 u8 mgmt_class) 1466 { 1467 struct ib_mad_port_private *port_priv; 1468 struct ib_mad_mgmt_class_table **class; 1469 struct ib_mad_mgmt_method_table **method; 1470 int i, ret; 1471 1472 port_priv = agent_priv->qp_info->port_priv; 1473 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1474 if (!*class) { 1475 /* Allocate management class table for "new" class version */ 1476 *class = kzalloc(sizeof **class, GFP_ATOMIC); 1477 if (!*class) { 1478 ret = -ENOMEM; 1479 goto error1; 1480 } 1481 1482 /* Allocate method table for this management class */ 1483 method = &(*class)->method_table[mgmt_class]; 1484 if ((ret = allocate_method_table(method))) 1485 goto error2; 1486 } else { 1487 method = &(*class)->method_table[mgmt_class]; 1488 if (!*method) { 1489 /* Allocate method table for this management class */ 1490 if ((ret = allocate_method_table(method))) 1491 goto error1; 1492 } 1493 } 1494 1495 /* Now, make sure methods are not already in use */ 1496 if (method_in_use(method, mad_reg_req)) 1497 goto error3; 1498 1499 /* Finally, add in methods being registered */ 1500 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1501 (*method)->agent[i] = agent_priv; 1502 1503 return 0; 1504 1505 error3: 1506 /* Remove any methods for this mad agent */ 1507 remove_methods_mad_agent(*method, agent_priv); 1508 /* Now, check to see if there are any methods in use */ 1509 if (!check_method_table(*method)) { 1510 /* If not, release management method table */ 1511 kfree(*method); 1512 *method = NULL; 1513 } 1514 ret = -EINVAL; 1515 goto error1; 1516 error2: 1517 kfree(*class); 1518 *class = NULL; 1519 error1: 1520 return ret; 1521 } 1522 1523 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1524 struct ib_mad_agent_private *agent_priv) 1525 { 1526 struct ib_mad_port_private *port_priv; 1527 struct ib_mad_mgmt_vendor_class_table **vendor_table; 1528 struct ib_mad_mgmt_vendor_class_table *vendor = NULL; 1529 struct ib_mad_mgmt_vendor_class *vendor_class = NULL; 1530 struct ib_mad_mgmt_method_table **method; 1531 int i, ret = -ENOMEM; 1532 u8 vclass; 1533 1534 /* "New" vendor (with OUI) class */ 1535 vclass = vendor_class_index(mad_reg_req->mgmt_class); 1536 port_priv = agent_priv->qp_info->port_priv; 1537 vendor_table = &port_priv->version[ 1538 mad_reg_req->mgmt_class_version].vendor; 1539 if (!*vendor_table) { 1540 /* Allocate mgmt vendor class table for "new" class version */ 1541 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); 1542 if (!vendor) 1543 goto error1; 1544 1545 *vendor_table = vendor; 1546 } 1547 if (!(*vendor_table)->vendor_class[vclass]) { 1548 /* Allocate table for this management vendor class */ 1549 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); 1550 if (!vendor_class) 1551 goto error2; 1552 1553 (*vendor_table)->vendor_class[vclass] = vendor_class; 1554 } 1555 for (i = 0; i < MAX_MGMT_OUI; i++) { 1556 /* Is there matching OUI for this vendor class ? */ 1557 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], 1558 mad_reg_req->oui, 3)) { 1559 method = &(*vendor_table)->vendor_class[ 1560 vclass]->method_table[i]; 1561 BUG_ON(!*method); 1562 goto check_in_use; 1563 } 1564 } 1565 for (i = 0; i < MAX_MGMT_OUI; i++) { 1566 /* OUI slot available ? */ 1567 if (!is_vendor_oui((*vendor_table)->vendor_class[ 1568 vclass]->oui[i])) { 1569 method = &(*vendor_table)->vendor_class[ 1570 vclass]->method_table[i]; 1571 BUG_ON(*method); 1572 /* Allocate method table for this OUI */ 1573 if ((ret = allocate_method_table(method))) 1574 goto error3; 1575 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], 1576 mad_reg_req->oui, 3); 1577 goto check_in_use; 1578 } 1579 } 1580 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); 1581 goto error3; 1582 1583 check_in_use: 1584 /* Now, make sure methods are not already in use */ 1585 if (method_in_use(method, mad_reg_req)) 1586 goto error4; 1587 1588 /* Finally, add in methods being registered */ 1589 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1590 (*method)->agent[i] = agent_priv; 1591 1592 return 0; 1593 1594 error4: 1595 /* Remove any methods for this mad agent */ 1596 remove_methods_mad_agent(*method, agent_priv); 1597 /* Now, check to see if there are any methods in use */ 1598 if (!check_method_table(*method)) { 1599 /* If not, release management method table */ 1600 kfree(*method); 1601 *method = NULL; 1602 } 1603 ret = -EINVAL; 1604 error3: 1605 if (vendor_class) { 1606 (*vendor_table)->vendor_class[vclass] = NULL; 1607 kfree(vendor_class); 1608 } 1609 error2: 1610 if (vendor) { 1611 *vendor_table = NULL; 1612 kfree(vendor); 1613 } 1614 error1: 1615 return ret; 1616 } 1617 1618 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) 1619 { 1620 struct ib_mad_port_private *port_priv; 1621 struct ib_mad_mgmt_class_table *class; 1622 struct ib_mad_mgmt_method_table *method; 1623 struct ib_mad_mgmt_vendor_class_table *vendor; 1624 struct ib_mad_mgmt_vendor_class *vendor_class; 1625 int index; 1626 u8 mgmt_class; 1627 1628 /* 1629 * Was MAD registration request supplied 1630 * with original registration ? 1631 */ 1632 if (!agent_priv->reg_req) { 1633 goto out; 1634 } 1635 1636 port_priv = agent_priv->qp_info->port_priv; 1637 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); 1638 class = port_priv->version[ 1639 agent_priv->reg_req->mgmt_class_version].class; 1640 if (!class) 1641 goto vendor_check; 1642 1643 method = class->method_table[mgmt_class]; 1644 if (method) { 1645 /* Remove any methods for this mad agent */ 1646 remove_methods_mad_agent(method, agent_priv); 1647 /* Now, check to see if there are any methods still in use */ 1648 if (!check_method_table(method)) { 1649 /* If not, release management method table */ 1650 kfree(method); 1651 class->method_table[mgmt_class] = NULL; 1652 /* Any management classes left ? */ 1653 if (!check_class_table(class)) { 1654 /* If not, release management class table */ 1655 kfree(class); 1656 port_priv->version[ 1657 agent_priv->reg_req-> 1658 mgmt_class_version].class = NULL; 1659 } 1660 } 1661 } 1662 1663 vendor_check: 1664 if (!is_vendor_class(mgmt_class)) 1665 goto out; 1666 1667 /* normalize mgmt_class to vendor range 2 */ 1668 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); 1669 vendor = port_priv->version[ 1670 agent_priv->reg_req->mgmt_class_version].vendor; 1671 1672 if (!vendor) 1673 goto out; 1674 1675 vendor_class = vendor->vendor_class[mgmt_class]; 1676 if (vendor_class) { 1677 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); 1678 if (index < 0) 1679 goto out; 1680 method = vendor_class->method_table[index]; 1681 if (method) { 1682 /* Remove any methods for this mad agent */ 1683 remove_methods_mad_agent(method, agent_priv); 1684 /* 1685 * Now, check to see if there are 1686 * any methods still in use 1687 */ 1688 if (!check_method_table(method)) { 1689 /* If not, release management method table */ 1690 kfree(method); 1691 vendor_class->method_table[index] = NULL; 1692 memset(vendor_class->oui[index], 0, 3); 1693 /* Any OUIs left ? */ 1694 if (!check_vendor_class(vendor_class)) { 1695 /* If not, release vendor class table */ 1696 kfree(vendor_class); 1697 vendor->vendor_class[mgmt_class] = NULL; 1698 /* Any other vendor classes left ? */ 1699 if (!check_vendor_table(vendor)) { 1700 kfree(vendor); 1701 port_priv->version[ 1702 agent_priv->reg_req-> 1703 mgmt_class_version]. 1704 vendor = NULL; 1705 } 1706 } 1707 } 1708 } 1709 } 1710 1711 out: 1712 return; 1713 } 1714 1715 static struct ib_mad_agent_private * 1716 find_mad_agent(struct ib_mad_port_private *port_priv, 1717 const struct ib_mad_hdr *mad_hdr) 1718 { 1719 struct ib_mad_agent_private *mad_agent = NULL; 1720 unsigned long flags; 1721 1722 spin_lock_irqsave(&port_priv->reg_lock, flags); 1723 if (ib_response_mad(mad_hdr)) { 1724 u32 hi_tid; 1725 struct ib_mad_agent_private *entry; 1726 1727 /* 1728 * Routing is based on high 32 bits of transaction ID 1729 * of MAD. 1730 */ 1731 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; 1732 list_for_each_entry(entry, &port_priv->agent_list, agent_list) { 1733 if (entry->agent.hi_tid == hi_tid) { 1734 mad_agent = entry; 1735 break; 1736 } 1737 } 1738 } else { 1739 struct ib_mad_mgmt_class_table *class; 1740 struct ib_mad_mgmt_method_table *method; 1741 struct ib_mad_mgmt_vendor_class_table *vendor; 1742 struct ib_mad_mgmt_vendor_class *vendor_class; 1743 const struct ib_vendor_mad *vendor_mad; 1744 int index; 1745 1746 /* 1747 * Routing is based on version, class, and method 1748 * For "newer" vendor MADs, also based on OUI 1749 */ 1750 if (mad_hdr->class_version >= MAX_MGMT_VERSION) 1751 goto out; 1752 if (!is_vendor_class(mad_hdr->mgmt_class)) { 1753 class = port_priv->version[ 1754 mad_hdr->class_version].class; 1755 if (!class) 1756 goto out; 1757 if (convert_mgmt_class(mad_hdr->mgmt_class) >= 1758 ARRAY_SIZE(class->method_table)) 1759 goto out; 1760 method = class->method_table[convert_mgmt_class( 1761 mad_hdr->mgmt_class)]; 1762 if (method) 1763 mad_agent = method->agent[mad_hdr->method & 1764 ~IB_MGMT_METHOD_RESP]; 1765 } else { 1766 vendor = port_priv->version[ 1767 mad_hdr->class_version].vendor; 1768 if (!vendor) 1769 goto out; 1770 vendor_class = vendor->vendor_class[vendor_class_index( 1771 mad_hdr->mgmt_class)]; 1772 if (!vendor_class) 1773 goto out; 1774 /* Find matching OUI */ 1775 vendor_mad = (const struct ib_vendor_mad *)mad_hdr; 1776 index = find_vendor_oui(vendor_class, vendor_mad->oui); 1777 if (index == -1) 1778 goto out; 1779 method = vendor_class->method_table[index]; 1780 if (method) { 1781 mad_agent = method->agent[mad_hdr->method & 1782 ~IB_MGMT_METHOD_RESP]; 1783 } 1784 } 1785 } 1786 1787 if (mad_agent) { 1788 if (mad_agent->agent.recv_handler) 1789 atomic_inc(&mad_agent->refcount); 1790 else { 1791 dev_notice(&port_priv->device->dev, 1792 "No receive handler for client %p on port %d\n", 1793 &mad_agent->agent, port_priv->port_num); 1794 mad_agent = NULL; 1795 } 1796 } 1797 out: 1798 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 1799 1800 return mad_agent; 1801 } 1802 1803 static int validate_mad(const struct ib_mad_hdr *mad_hdr, 1804 const struct ib_mad_qp_info *qp_info, 1805 bool opa) 1806 { 1807 int valid = 0; 1808 u32 qp_num = qp_info->qp->qp_num; 1809 1810 /* Make sure MAD base version is understood */ 1811 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && 1812 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { 1813 pr_err("MAD received with unsupported base version %d %s\n", 1814 mad_hdr->base_version, opa ? "(opa)" : ""); 1815 goto out; 1816 } 1817 1818 /* Filter SMI packets sent to other than QP0 */ 1819 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || 1820 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 1821 if (qp_num == 0) 1822 valid = 1; 1823 } else { 1824 /* CM attributes other than ClassPortInfo only use Send method */ 1825 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && 1826 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && 1827 (mad_hdr->method != IB_MGMT_METHOD_SEND)) 1828 goto out; 1829 /* Filter GSI packets sent to QP0 */ 1830 if (qp_num != 0) 1831 valid = 1; 1832 } 1833 1834 out: 1835 return valid; 1836 } 1837 1838 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, 1839 const struct ib_mad_hdr *mad_hdr) 1840 { 1841 struct ib_rmpp_mad *rmpp_mad; 1842 1843 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; 1844 return !mad_agent_priv->agent.rmpp_version || 1845 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || 1846 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 1847 IB_MGMT_RMPP_FLAG_ACTIVE) || 1848 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1849 } 1850 1851 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, 1852 const struct ib_mad_recv_wc *rwc) 1853 { 1854 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == 1855 rwc->recv_buf.mad->mad_hdr.mgmt_class; 1856 } 1857 1858 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, 1859 const struct ib_mad_send_wr_private *wr, 1860 const struct ib_mad_recv_wc *rwc ) 1861 { 1862 struct rdma_ah_attr attr; 1863 u8 send_resp, rcv_resp; 1864 union ib_gid sgid; 1865 struct ib_device *device = mad_agent_priv->agent.device; 1866 u8 port_num = mad_agent_priv->agent.port_num; 1867 u8 lmc; 1868 bool has_grh; 1869 1870 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); 1871 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); 1872 1873 if (send_resp == rcv_resp) 1874 /* both requests, or both responses. GIDs different */ 1875 return 0; 1876 1877 if (rdma_query_ah(wr->send_buf.ah, &attr)) 1878 /* Assume not equal, to avoid false positives. */ 1879 return 0; 1880 1881 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH); 1882 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH)) 1883 /* one has GID, other does not. Assume different */ 1884 return 0; 1885 1886 if (!send_resp && rcv_resp) { 1887 /* is request/response. */ 1888 if (!has_grh) { 1889 if (ib_get_cached_lmc(device, port_num, &lmc)) 1890 return 0; 1891 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^ 1892 rwc->wc->dlid_path_bits) & 1893 ((1 << lmc) - 1))); 1894 } else { 1895 const struct ib_global_route *grh = 1896 rdma_ah_read_grh(&attr); 1897 1898 if (ib_get_cached_gid(device, port_num, 1899 grh->sgid_index, &sgid, NULL)) 1900 return 0; 1901 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 1902 16); 1903 } 1904 } 1905 1906 if (!has_grh) 1907 return rdma_ah_get_dlid(&attr) == rwc->wc->slid; 1908 else 1909 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw, 1910 rwc->recv_buf.grh->sgid.raw, 1911 16); 1912 } 1913 1914 static inline int is_direct(u8 class) 1915 { 1916 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); 1917 } 1918 1919 struct ib_mad_send_wr_private* 1920 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, 1921 const struct ib_mad_recv_wc *wc) 1922 { 1923 struct ib_mad_send_wr_private *wr; 1924 const struct ib_mad_hdr *mad_hdr; 1925 1926 mad_hdr = &wc->recv_buf.mad->mad_hdr; 1927 1928 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { 1929 if ((wr->tid == mad_hdr->tid) && 1930 rcv_has_same_class(wr, wc) && 1931 /* 1932 * Don't check GID for direct routed MADs. 1933 * These might have permissive LIDs. 1934 */ 1935 (is_direct(mad_hdr->mgmt_class) || 1936 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1937 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1938 } 1939 1940 /* 1941 * It's possible to receive the response before we've 1942 * been notified that the send has completed 1943 */ 1944 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { 1945 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && 1946 wr->tid == mad_hdr->tid && 1947 wr->timeout && 1948 rcv_has_same_class(wr, wc) && 1949 /* 1950 * Don't check GID for direct routed MADs. 1951 * These might have permissive LIDs. 1952 */ 1953 (is_direct(mad_hdr->mgmt_class) || 1954 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1955 /* Verify request has not been canceled */ 1956 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1957 } 1958 return NULL; 1959 } 1960 1961 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) 1962 { 1963 mad_send_wr->timeout = 0; 1964 if (mad_send_wr->refcount == 1) 1965 list_move_tail(&mad_send_wr->agent_list, 1966 &mad_send_wr->mad_agent_priv->done_list); 1967 } 1968 1969 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1970 struct ib_mad_recv_wc *mad_recv_wc) 1971 { 1972 struct ib_mad_send_wr_private *mad_send_wr; 1973 struct ib_mad_send_wc mad_send_wc; 1974 unsigned long flags; 1975 int ret; 1976 1977 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1978 ret = ib_mad_enforce_security(mad_agent_priv, 1979 mad_recv_wc->wc->pkey_index); 1980 if (ret) { 1981 ib_free_recv_mad(mad_recv_wc); 1982 deref_mad_agent(mad_agent_priv); 1983 return; 1984 } 1985 1986 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1987 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1988 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 1989 mad_recv_wc); 1990 if (!mad_recv_wc) { 1991 deref_mad_agent(mad_agent_priv); 1992 return; 1993 } 1994 } 1995 1996 /* Complete corresponding request */ 1997 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { 1998 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1999 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 2000 if (!mad_send_wr) { 2001 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2002 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) 2003 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) 2004 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) 2005 & IB_MGMT_RMPP_FLAG_ACTIVE)) { 2006 /* user rmpp is in effect 2007 * and this is an active RMPP MAD 2008 */ 2009 mad_agent_priv->agent.recv_handler( 2010 &mad_agent_priv->agent, NULL, 2011 mad_recv_wc); 2012 atomic_dec(&mad_agent_priv->refcount); 2013 } else { 2014 /* not user rmpp, revert to normal behavior and 2015 * drop the mad */ 2016 ib_free_recv_mad(mad_recv_wc); 2017 deref_mad_agent(mad_agent_priv); 2018 return; 2019 } 2020 } else { 2021 ib_mark_mad_done(mad_send_wr); 2022 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2023 2024 /* Defined behavior is to complete response before request */ 2025 mad_agent_priv->agent.recv_handler( 2026 &mad_agent_priv->agent, 2027 &mad_send_wr->send_buf, 2028 mad_recv_wc); 2029 atomic_dec(&mad_agent_priv->refcount); 2030 2031 mad_send_wc.status = IB_WC_SUCCESS; 2032 mad_send_wc.vendor_err = 0; 2033 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2034 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2035 } 2036 } else { 2037 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, 2038 mad_recv_wc); 2039 deref_mad_agent(mad_agent_priv); 2040 } 2041 2042 return; 2043 } 2044 2045 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, 2046 const struct ib_mad_qp_info *qp_info, 2047 const struct ib_wc *wc, 2048 int port_num, 2049 struct ib_mad_private *recv, 2050 struct ib_mad_private *response) 2051 { 2052 enum smi_forward_action retsmi; 2053 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2054 2055 if (smi_handle_dr_smp_recv(smp, 2056 rdma_cap_ib_switch(port_priv->device), 2057 port_num, 2058 port_priv->device->phys_port_cnt) == 2059 IB_SMI_DISCARD) 2060 return IB_SMI_DISCARD; 2061 2062 retsmi = smi_check_forward_dr_smp(smp); 2063 if (retsmi == IB_SMI_LOCAL) 2064 return IB_SMI_HANDLE; 2065 2066 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2067 if (smi_handle_dr_smp_send(smp, 2068 rdma_cap_ib_switch(port_priv->device), 2069 port_num) == IB_SMI_DISCARD) 2070 return IB_SMI_DISCARD; 2071 2072 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2073 return IB_SMI_DISCARD; 2074 } else if (rdma_cap_ib_switch(port_priv->device)) { 2075 /* forward case for switches */ 2076 memcpy(response, recv, mad_priv_size(response)); 2077 response->header.recv_wc.wc = &response->header.wc; 2078 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2079 response->header.recv_wc.recv_buf.grh = &response->grh; 2080 2081 agent_send_response((const struct ib_mad_hdr *)response->mad, 2082 &response->grh, wc, 2083 port_priv->device, 2084 smi_get_fwd_port(smp), 2085 qp_info->qp->qp_num, 2086 response->mad_size, 2087 false); 2088 2089 return IB_SMI_DISCARD; 2090 } 2091 return IB_SMI_HANDLE; 2092 } 2093 2094 static bool generate_unmatched_resp(const struct ib_mad_private *recv, 2095 struct ib_mad_private *response, 2096 size_t *resp_len, bool opa) 2097 { 2098 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; 2099 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; 2100 2101 if (recv_hdr->method == IB_MGMT_METHOD_GET || 2102 recv_hdr->method == IB_MGMT_METHOD_SET) { 2103 memcpy(response, recv, mad_priv_size(response)); 2104 response->header.recv_wc.wc = &response->header.wc; 2105 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2106 response->header.recv_wc.recv_buf.grh = &response->grh; 2107 resp_hdr->method = IB_MGMT_METHOD_GET_RESP; 2108 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 2109 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2110 resp_hdr->status |= IB_SMP_DIRECTION; 2111 2112 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { 2113 if (recv_hdr->mgmt_class == 2114 IB_MGMT_CLASS_SUBN_LID_ROUTED || 2115 recv_hdr->mgmt_class == 2116 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2117 *resp_len = opa_get_smp_header_size( 2118 (struct opa_smp *)recv->mad); 2119 else 2120 *resp_len = sizeof(struct ib_mad_hdr); 2121 } 2122 2123 return true; 2124 } else { 2125 return false; 2126 } 2127 } 2128 2129 static enum smi_action 2130 handle_opa_smi(struct ib_mad_port_private *port_priv, 2131 struct ib_mad_qp_info *qp_info, 2132 struct ib_wc *wc, 2133 int port_num, 2134 struct ib_mad_private *recv, 2135 struct ib_mad_private *response) 2136 { 2137 enum smi_forward_action retsmi; 2138 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2139 2140 if (opa_smi_handle_dr_smp_recv(smp, 2141 rdma_cap_ib_switch(port_priv->device), 2142 port_num, 2143 port_priv->device->phys_port_cnt) == 2144 IB_SMI_DISCARD) 2145 return IB_SMI_DISCARD; 2146 2147 retsmi = opa_smi_check_forward_dr_smp(smp); 2148 if (retsmi == IB_SMI_LOCAL) 2149 return IB_SMI_HANDLE; 2150 2151 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2152 if (opa_smi_handle_dr_smp_send(smp, 2153 rdma_cap_ib_switch(port_priv->device), 2154 port_num) == IB_SMI_DISCARD) 2155 return IB_SMI_DISCARD; 2156 2157 if (opa_smi_check_local_smp(smp, port_priv->device) == 2158 IB_SMI_DISCARD) 2159 return IB_SMI_DISCARD; 2160 2161 } else if (rdma_cap_ib_switch(port_priv->device)) { 2162 /* forward case for switches */ 2163 memcpy(response, recv, mad_priv_size(response)); 2164 response->header.recv_wc.wc = &response->header.wc; 2165 response->header.recv_wc.recv_buf.opa_mad = 2166 (struct opa_mad *)response->mad; 2167 response->header.recv_wc.recv_buf.grh = &response->grh; 2168 2169 agent_send_response((const struct ib_mad_hdr *)response->mad, 2170 &response->grh, wc, 2171 port_priv->device, 2172 opa_smi_get_fwd_port(smp), 2173 qp_info->qp->qp_num, 2174 recv->header.wc.byte_len, 2175 true); 2176 2177 return IB_SMI_DISCARD; 2178 } 2179 2180 return IB_SMI_HANDLE; 2181 } 2182 2183 static enum smi_action 2184 handle_smi(struct ib_mad_port_private *port_priv, 2185 struct ib_mad_qp_info *qp_info, 2186 struct ib_wc *wc, 2187 int port_num, 2188 struct ib_mad_private *recv, 2189 struct ib_mad_private *response, 2190 bool opa) 2191 { 2192 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; 2193 2194 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && 2195 mad_hdr->class_version == OPA_SM_CLASS_VERSION) 2196 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, 2197 response); 2198 2199 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); 2200 } 2201 2202 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) 2203 { 2204 struct ib_mad_port_private *port_priv = cq->cq_context; 2205 struct ib_mad_list_head *mad_list = 2206 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2207 struct ib_mad_qp_info *qp_info; 2208 struct ib_mad_private_header *mad_priv_hdr; 2209 struct ib_mad_private *recv, *response = NULL; 2210 struct ib_mad_agent_private *mad_agent; 2211 int port_num; 2212 int ret = IB_MAD_RESULT_SUCCESS; 2213 size_t mad_size; 2214 u16 resp_mad_pkey_index = 0; 2215 bool opa; 2216 2217 if (list_empty_careful(&port_priv->port_list)) 2218 return; 2219 2220 if (wc->status != IB_WC_SUCCESS) { 2221 /* 2222 * Receive errors indicate that the QP has entered the error 2223 * state - error handling/shutdown code will cleanup 2224 */ 2225 return; 2226 } 2227 2228 qp_info = mad_list->mad_queue->qp_info; 2229 dequeue_mad(mad_list); 2230 2231 opa = rdma_cap_opa_mad(qp_info->port_priv->device, 2232 qp_info->port_priv->port_num); 2233 2234 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 2235 mad_list); 2236 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 2237 ib_dma_unmap_single(port_priv->device, 2238 recv->header.mapping, 2239 mad_priv_dma_size(recv), 2240 DMA_FROM_DEVICE); 2241 2242 /* Setup MAD receive work completion from "normal" work completion */ 2243 recv->header.wc = *wc; 2244 recv->header.recv_wc.wc = &recv->header.wc; 2245 2246 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { 2247 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); 2248 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2249 } else { 2250 recv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2251 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2252 } 2253 2254 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; 2255 recv->header.recv_wc.recv_buf.grh = &recv->grh; 2256 2257 if (atomic_read(&qp_info->snoop_count)) 2258 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); 2259 2260 /* Validate MAD */ 2261 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) 2262 goto out; 2263 2264 mad_size = recv->mad_size; 2265 response = alloc_mad_private(mad_size, GFP_KERNEL); 2266 if (!response) 2267 goto out; 2268 2269 if (rdma_cap_ib_switch(port_priv->device)) 2270 port_num = wc->port_num; 2271 else 2272 port_num = port_priv->port_num; 2273 2274 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == 2275 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 2276 if (handle_smi(port_priv, qp_info, wc, port_num, recv, 2277 response, opa) 2278 == IB_SMI_DISCARD) 2279 goto out; 2280 } 2281 2282 /* Give driver "right of first refusal" on incoming MAD */ 2283 if (port_priv->device->process_mad) { 2284 ret = port_priv->device->process_mad(port_priv->device, 0, 2285 port_priv->port_num, 2286 wc, &recv->grh, 2287 (const struct ib_mad_hdr *)recv->mad, 2288 recv->mad_size, 2289 (struct ib_mad_hdr *)response->mad, 2290 &mad_size, &resp_mad_pkey_index); 2291 2292 if (opa) 2293 wc->pkey_index = resp_mad_pkey_index; 2294 2295 if (ret & IB_MAD_RESULT_SUCCESS) { 2296 if (ret & IB_MAD_RESULT_CONSUMED) 2297 goto out; 2298 if (ret & IB_MAD_RESULT_REPLY) { 2299 agent_send_response((const struct ib_mad_hdr *)response->mad, 2300 &recv->grh, wc, 2301 port_priv->device, 2302 port_num, 2303 qp_info->qp->qp_num, 2304 mad_size, opa); 2305 goto out; 2306 } 2307 } 2308 } 2309 2310 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); 2311 if (mad_agent) { 2312 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); 2313 /* 2314 * recv is freed up in error cases in ib_mad_complete_recv 2315 * or via recv_handler in ib_mad_complete_recv() 2316 */ 2317 recv = NULL; 2318 } else if ((ret & IB_MAD_RESULT_SUCCESS) && 2319 generate_unmatched_resp(recv, response, &mad_size, opa)) { 2320 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, 2321 port_priv->device, port_num, 2322 qp_info->qp->qp_num, mad_size, opa); 2323 } 2324 2325 out: 2326 /* Post another receive request for this QP */ 2327 if (response) { 2328 ib_mad_post_receive_mads(qp_info, response); 2329 kfree(recv); 2330 } else 2331 ib_mad_post_receive_mads(qp_info, recv); 2332 } 2333 2334 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) 2335 { 2336 struct ib_mad_send_wr_private *mad_send_wr; 2337 unsigned long delay; 2338 2339 if (list_empty(&mad_agent_priv->wait_list)) { 2340 cancel_delayed_work(&mad_agent_priv->timed_work); 2341 } else { 2342 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2343 struct ib_mad_send_wr_private, 2344 agent_list); 2345 2346 if (time_after(mad_agent_priv->timeout, 2347 mad_send_wr->timeout)) { 2348 mad_agent_priv->timeout = mad_send_wr->timeout; 2349 delay = mad_send_wr->timeout - jiffies; 2350 if ((long)delay <= 0) 2351 delay = 1; 2352 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2353 &mad_agent_priv->timed_work, delay); 2354 } 2355 } 2356 } 2357 2358 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) 2359 { 2360 struct ib_mad_agent_private *mad_agent_priv; 2361 struct ib_mad_send_wr_private *temp_mad_send_wr; 2362 struct list_head *list_item; 2363 unsigned long delay; 2364 2365 mad_agent_priv = mad_send_wr->mad_agent_priv; 2366 list_del(&mad_send_wr->agent_list); 2367 2368 delay = mad_send_wr->timeout; 2369 mad_send_wr->timeout += jiffies; 2370 2371 if (delay) { 2372 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { 2373 temp_mad_send_wr = list_entry(list_item, 2374 struct ib_mad_send_wr_private, 2375 agent_list); 2376 if (time_after(mad_send_wr->timeout, 2377 temp_mad_send_wr->timeout)) 2378 break; 2379 } 2380 } 2381 else 2382 list_item = &mad_agent_priv->wait_list; 2383 list_add(&mad_send_wr->agent_list, list_item); 2384 2385 /* Reschedule a work item if we have a shorter timeout */ 2386 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) 2387 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2388 &mad_agent_priv->timed_work, delay); 2389 } 2390 2391 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2392 int timeout_ms) 2393 { 2394 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2395 wait_for_response(mad_send_wr); 2396 } 2397 2398 /* 2399 * Process a send work completion 2400 */ 2401 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 2402 struct ib_mad_send_wc *mad_send_wc) 2403 { 2404 struct ib_mad_agent_private *mad_agent_priv; 2405 unsigned long flags; 2406 int ret; 2407 2408 mad_agent_priv = mad_send_wr->mad_agent_priv; 2409 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2410 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 2411 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); 2412 if (ret == IB_RMPP_RESULT_CONSUMED) 2413 goto done; 2414 } else 2415 ret = IB_RMPP_RESULT_UNHANDLED; 2416 2417 if (mad_send_wc->status != IB_WC_SUCCESS && 2418 mad_send_wr->status == IB_WC_SUCCESS) { 2419 mad_send_wr->status = mad_send_wc->status; 2420 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2421 } 2422 2423 if (--mad_send_wr->refcount > 0) { 2424 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && 2425 mad_send_wr->status == IB_WC_SUCCESS) { 2426 wait_for_response(mad_send_wr); 2427 } 2428 goto done; 2429 } 2430 2431 /* Remove send from MAD agent and notify client of completion */ 2432 list_del(&mad_send_wr->agent_list); 2433 adjust_timeout(mad_agent_priv); 2434 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2435 2436 if (mad_send_wr->status != IB_WC_SUCCESS ) 2437 mad_send_wc->status = mad_send_wr->status; 2438 if (ret == IB_RMPP_RESULT_INTERNAL) 2439 ib_rmpp_send_handler(mad_send_wc); 2440 else 2441 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2442 mad_send_wc); 2443 2444 /* Release reference on agent taken when sending */ 2445 deref_mad_agent(mad_agent_priv); 2446 return; 2447 done: 2448 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2449 } 2450 2451 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) 2452 { 2453 struct ib_mad_port_private *port_priv = cq->cq_context; 2454 struct ib_mad_list_head *mad_list = 2455 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2456 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; 2457 struct ib_mad_qp_info *qp_info; 2458 struct ib_mad_queue *send_queue; 2459 struct ib_send_wr *bad_send_wr; 2460 struct ib_mad_send_wc mad_send_wc; 2461 unsigned long flags; 2462 int ret; 2463 2464 if (list_empty_careful(&port_priv->port_list)) 2465 return; 2466 2467 if (wc->status != IB_WC_SUCCESS) { 2468 if (!ib_mad_send_error(port_priv, wc)) 2469 return; 2470 } 2471 2472 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2473 mad_list); 2474 send_queue = mad_list->mad_queue; 2475 qp_info = send_queue->qp_info; 2476 2477 retry: 2478 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2479 mad_send_wr->header_mapping, 2480 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2481 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2482 mad_send_wr->payload_mapping, 2483 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2484 queued_send_wr = NULL; 2485 spin_lock_irqsave(&send_queue->lock, flags); 2486 list_del(&mad_list->list); 2487 2488 /* Move queued send to the send queue */ 2489 if (send_queue->count-- > send_queue->max_active) { 2490 mad_list = container_of(qp_info->overflow_list.next, 2491 struct ib_mad_list_head, list); 2492 queued_send_wr = container_of(mad_list, 2493 struct ib_mad_send_wr_private, 2494 mad_list); 2495 list_move_tail(&mad_list->list, &send_queue->list); 2496 } 2497 spin_unlock_irqrestore(&send_queue->lock, flags); 2498 2499 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2500 mad_send_wc.status = wc->status; 2501 mad_send_wc.vendor_err = wc->vendor_err; 2502 if (atomic_read(&qp_info->snoop_count)) 2503 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, 2504 IB_MAD_SNOOP_SEND_COMPLETIONS); 2505 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2506 2507 if (queued_send_wr) { 2508 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, 2509 &bad_send_wr); 2510 if (ret) { 2511 dev_err(&port_priv->device->dev, 2512 "ib_post_send failed: %d\n", ret); 2513 mad_send_wr = queued_send_wr; 2514 wc->status = IB_WC_LOC_QP_OP_ERR; 2515 goto retry; 2516 } 2517 } 2518 } 2519 2520 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) 2521 { 2522 struct ib_mad_send_wr_private *mad_send_wr; 2523 struct ib_mad_list_head *mad_list; 2524 unsigned long flags; 2525 2526 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 2527 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { 2528 mad_send_wr = container_of(mad_list, 2529 struct ib_mad_send_wr_private, 2530 mad_list); 2531 mad_send_wr->retry = 1; 2532 } 2533 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 2534 } 2535 2536 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 2537 struct ib_wc *wc) 2538 { 2539 struct ib_mad_list_head *mad_list = 2540 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2541 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; 2542 struct ib_mad_send_wr_private *mad_send_wr; 2543 int ret; 2544 2545 /* 2546 * Send errors will transition the QP to SQE - move 2547 * QP to RTS and repost flushed work requests 2548 */ 2549 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2550 mad_list); 2551 if (wc->status == IB_WC_WR_FLUSH_ERR) { 2552 if (mad_send_wr->retry) { 2553 /* Repost send */ 2554 struct ib_send_wr *bad_send_wr; 2555 2556 mad_send_wr->retry = 0; 2557 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, 2558 &bad_send_wr); 2559 if (!ret) 2560 return false; 2561 } 2562 } else { 2563 struct ib_qp_attr *attr; 2564 2565 /* Transition QP to RTS and fail offending send */ 2566 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2567 if (attr) { 2568 attr->qp_state = IB_QPS_RTS; 2569 attr->cur_qp_state = IB_QPS_SQE; 2570 ret = ib_modify_qp(qp_info->qp, attr, 2571 IB_QP_STATE | IB_QP_CUR_STATE); 2572 kfree(attr); 2573 if (ret) 2574 dev_err(&port_priv->device->dev, 2575 "%s - ib_modify_qp to RTS: %d\n", 2576 __func__, ret); 2577 else 2578 mark_sends_for_retry(qp_info); 2579 } 2580 } 2581 2582 return true; 2583 } 2584 2585 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) 2586 { 2587 unsigned long flags; 2588 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; 2589 struct ib_mad_send_wc mad_send_wc; 2590 struct list_head cancel_list; 2591 2592 INIT_LIST_HEAD(&cancel_list); 2593 2594 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2595 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2596 &mad_agent_priv->send_list, agent_list) { 2597 if (mad_send_wr->status == IB_WC_SUCCESS) { 2598 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2599 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2600 } 2601 } 2602 2603 /* Empty wait list to prevent receives from finding a request */ 2604 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); 2605 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2606 2607 /* Report all cancelled requests */ 2608 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 2609 mad_send_wc.vendor_err = 0; 2610 2611 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2612 &cancel_list, agent_list) { 2613 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2614 list_del(&mad_send_wr->agent_list); 2615 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2616 &mad_send_wc); 2617 atomic_dec(&mad_agent_priv->refcount); 2618 } 2619 } 2620 2621 static struct ib_mad_send_wr_private* 2622 find_send_wr(struct ib_mad_agent_private *mad_agent_priv, 2623 struct ib_mad_send_buf *send_buf) 2624 { 2625 struct ib_mad_send_wr_private *mad_send_wr; 2626 2627 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2628 agent_list) { 2629 if (&mad_send_wr->send_buf == send_buf) 2630 return mad_send_wr; 2631 } 2632 2633 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2634 agent_list) { 2635 if (is_rmpp_data_mad(mad_agent_priv, 2636 mad_send_wr->send_buf.mad) && 2637 &mad_send_wr->send_buf == send_buf) 2638 return mad_send_wr; 2639 } 2640 return NULL; 2641 } 2642 2643 int ib_modify_mad(struct ib_mad_agent *mad_agent, 2644 struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2645 { 2646 struct ib_mad_agent_private *mad_agent_priv; 2647 struct ib_mad_send_wr_private *mad_send_wr; 2648 unsigned long flags; 2649 int active; 2650 2651 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2652 agent); 2653 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2654 mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2655 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2656 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2657 return -EINVAL; 2658 } 2659 2660 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); 2661 if (!timeout_ms) { 2662 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2663 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2664 } 2665 2666 mad_send_wr->send_buf.timeout_ms = timeout_ms; 2667 if (active) 2668 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2669 else 2670 ib_reset_mad_timeout(mad_send_wr, timeout_ms); 2671 2672 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2673 return 0; 2674 } 2675 EXPORT_SYMBOL(ib_modify_mad); 2676 2677 void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2678 struct ib_mad_send_buf *send_buf) 2679 { 2680 ib_modify_mad(mad_agent, send_buf, 0); 2681 } 2682 EXPORT_SYMBOL(ib_cancel_mad); 2683 2684 static void local_completions(struct work_struct *work) 2685 { 2686 struct ib_mad_agent_private *mad_agent_priv; 2687 struct ib_mad_local_private *local; 2688 struct ib_mad_agent_private *recv_mad_agent; 2689 unsigned long flags; 2690 int free_mad; 2691 struct ib_wc wc; 2692 struct ib_mad_send_wc mad_send_wc; 2693 bool opa; 2694 2695 mad_agent_priv = 2696 container_of(work, struct ib_mad_agent_private, local_work); 2697 2698 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 2699 mad_agent_priv->qp_info->port_priv->port_num); 2700 2701 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2702 while (!list_empty(&mad_agent_priv->local_list)) { 2703 local = list_entry(mad_agent_priv->local_list.next, 2704 struct ib_mad_local_private, 2705 completion_list); 2706 list_del(&local->completion_list); 2707 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2708 free_mad = 0; 2709 if (local->mad_priv) { 2710 u8 base_version; 2711 recv_mad_agent = local->recv_mad_agent; 2712 if (!recv_mad_agent) { 2713 dev_err(&mad_agent_priv->agent.device->dev, 2714 "No receive MAD agent for local completion\n"); 2715 free_mad = 1; 2716 goto local_send_completion; 2717 } 2718 2719 /* 2720 * Defined behavior is to complete response 2721 * before request 2722 */ 2723 build_smp_wc(recv_mad_agent->agent.qp, 2724 local->mad_send_wr->send_wr.wr.wr_cqe, 2725 be16_to_cpu(IB_LID_PERMISSIVE), 2726 local->mad_send_wr->send_wr.pkey_index, 2727 recv_mad_agent->agent.port_num, &wc); 2728 2729 local->mad_priv->header.recv_wc.wc = &wc; 2730 2731 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; 2732 if (opa && base_version == OPA_MGMT_BASE_VERSION) { 2733 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; 2734 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2735 } else { 2736 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2737 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2738 } 2739 2740 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); 2741 list_add(&local->mad_priv->header.recv_wc.recv_buf.list, 2742 &local->mad_priv->header.recv_wc.rmpp_list); 2743 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2744 local->mad_priv->header.recv_wc.recv_buf.mad = 2745 (struct ib_mad *)local->mad_priv->mad; 2746 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) 2747 snoop_recv(recv_mad_agent->qp_info, 2748 &local->mad_priv->header.recv_wc, 2749 IB_MAD_SNOOP_RECVS); 2750 recv_mad_agent->agent.recv_handler( 2751 &recv_mad_agent->agent, 2752 &local->mad_send_wr->send_buf, 2753 &local->mad_priv->header.recv_wc); 2754 spin_lock_irqsave(&recv_mad_agent->lock, flags); 2755 atomic_dec(&recv_mad_agent->refcount); 2756 spin_unlock_irqrestore(&recv_mad_agent->lock, flags); 2757 } 2758 2759 local_send_completion: 2760 /* Complete send */ 2761 mad_send_wc.status = IB_WC_SUCCESS; 2762 mad_send_wc.vendor_err = 0; 2763 mad_send_wc.send_buf = &local->mad_send_wr->send_buf; 2764 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2765 snoop_send(mad_agent_priv->qp_info, 2766 &local->mad_send_wr->send_buf, 2767 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); 2768 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2769 &mad_send_wc); 2770 2771 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2772 atomic_dec(&mad_agent_priv->refcount); 2773 if (free_mad) 2774 kfree(local->mad_priv); 2775 kfree(local); 2776 } 2777 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2778 } 2779 2780 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) 2781 { 2782 int ret; 2783 2784 if (!mad_send_wr->retries_left) 2785 return -ETIMEDOUT; 2786 2787 mad_send_wr->retries_left--; 2788 mad_send_wr->send_buf.retries++; 2789 2790 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 2791 2792 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { 2793 ret = ib_retry_rmpp(mad_send_wr); 2794 switch (ret) { 2795 case IB_RMPP_RESULT_UNHANDLED: 2796 ret = ib_send_mad(mad_send_wr); 2797 break; 2798 case IB_RMPP_RESULT_CONSUMED: 2799 ret = 0; 2800 break; 2801 default: 2802 ret = -ECOMM; 2803 break; 2804 } 2805 } else 2806 ret = ib_send_mad(mad_send_wr); 2807 2808 if (!ret) { 2809 mad_send_wr->refcount++; 2810 list_add_tail(&mad_send_wr->agent_list, 2811 &mad_send_wr->mad_agent_priv->send_list); 2812 } 2813 return ret; 2814 } 2815 2816 static void timeout_sends(struct work_struct *work) 2817 { 2818 struct ib_mad_agent_private *mad_agent_priv; 2819 struct ib_mad_send_wr_private *mad_send_wr; 2820 struct ib_mad_send_wc mad_send_wc; 2821 unsigned long flags, delay; 2822 2823 mad_agent_priv = container_of(work, struct ib_mad_agent_private, 2824 timed_work.work); 2825 mad_send_wc.vendor_err = 0; 2826 2827 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2828 while (!list_empty(&mad_agent_priv->wait_list)) { 2829 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2830 struct ib_mad_send_wr_private, 2831 agent_list); 2832 2833 if (time_after(mad_send_wr->timeout, jiffies)) { 2834 delay = mad_send_wr->timeout - jiffies; 2835 if ((long)delay <= 0) 2836 delay = 1; 2837 queue_delayed_work(mad_agent_priv->qp_info-> 2838 port_priv->wq, 2839 &mad_agent_priv->timed_work, delay); 2840 break; 2841 } 2842 2843 list_del(&mad_send_wr->agent_list); 2844 if (mad_send_wr->status == IB_WC_SUCCESS && 2845 !retry_send(mad_send_wr)) 2846 continue; 2847 2848 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2849 2850 if (mad_send_wr->status == IB_WC_SUCCESS) 2851 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2852 else 2853 mad_send_wc.status = mad_send_wr->status; 2854 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2855 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2856 &mad_send_wc); 2857 2858 atomic_dec(&mad_agent_priv->refcount); 2859 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2860 } 2861 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2862 } 2863 2864 /* 2865 * Allocate receive MADs and post receive WRs for them 2866 */ 2867 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 2868 struct ib_mad_private *mad) 2869 { 2870 unsigned long flags; 2871 int post, ret; 2872 struct ib_mad_private *mad_priv; 2873 struct ib_sge sg_list; 2874 struct ib_recv_wr recv_wr, *bad_recv_wr; 2875 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; 2876 2877 /* Initialize common scatter list fields */ 2878 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; 2879 2880 /* Initialize common receive WR fields */ 2881 recv_wr.next = NULL; 2882 recv_wr.sg_list = &sg_list; 2883 recv_wr.num_sge = 1; 2884 2885 do { 2886 /* Allocate and map receive buffer */ 2887 if (mad) { 2888 mad_priv = mad; 2889 mad = NULL; 2890 } else { 2891 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), 2892 GFP_ATOMIC); 2893 if (!mad_priv) { 2894 ret = -ENOMEM; 2895 break; 2896 } 2897 } 2898 sg_list.length = mad_priv_dma_size(mad_priv); 2899 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, 2900 &mad_priv->grh, 2901 mad_priv_dma_size(mad_priv), 2902 DMA_FROM_DEVICE); 2903 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2904 sg_list.addr))) { 2905 ret = -ENOMEM; 2906 break; 2907 } 2908 mad_priv->header.mapping = sg_list.addr; 2909 mad_priv->header.mad_list.mad_queue = recv_queue; 2910 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; 2911 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; 2912 2913 /* Post receive WR */ 2914 spin_lock_irqsave(&recv_queue->lock, flags); 2915 post = (++recv_queue->count < recv_queue->max_active); 2916 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); 2917 spin_unlock_irqrestore(&recv_queue->lock, flags); 2918 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); 2919 if (ret) { 2920 spin_lock_irqsave(&recv_queue->lock, flags); 2921 list_del(&mad_priv->header.mad_list.list); 2922 recv_queue->count--; 2923 spin_unlock_irqrestore(&recv_queue->lock, flags); 2924 ib_dma_unmap_single(qp_info->port_priv->device, 2925 mad_priv->header.mapping, 2926 mad_priv_dma_size(mad_priv), 2927 DMA_FROM_DEVICE); 2928 kfree(mad_priv); 2929 dev_err(&qp_info->port_priv->device->dev, 2930 "ib_post_recv failed: %d\n", ret); 2931 break; 2932 } 2933 } while (post); 2934 2935 return ret; 2936 } 2937 2938 /* 2939 * Return all the posted receive MADs 2940 */ 2941 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) 2942 { 2943 struct ib_mad_private_header *mad_priv_hdr; 2944 struct ib_mad_private *recv; 2945 struct ib_mad_list_head *mad_list; 2946 2947 if (!qp_info->qp) 2948 return; 2949 2950 while (!list_empty(&qp_info->recv_queue.list)) { 2951 2952 mad_list = list_entry(qp_info->recv_queue.list.next, 2953 struct ib_mad_list_head, list); 2954 mad_priv_hdr = container_of(mad_list, 2955 struct ib_mad_private_header, 2956 mad_list); 2957 recv = container_of(mad_priv_hdr, struct ib_mad_private, 2958 header); 2959 2960 /* Remove from posted receive MAD list */ 2961 list_del(&mad_list->list); 2962 2963 ib_dma_unmap_single(qp_info->port_priv->device, 2964 recv->header.mapping, 2965 mad_priv_dma_size(recv), 2966 DMA_FROM_DEVICE); 2967 kfree(recv); 2968 } 2969 2970 qp_info->recv_queue.count = 0; 2971 } 2972 2973 /* 2974 * Start the port 2975 */ 2976 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) 2977 { 2978 int ret, i; 2979 struct ib_qp_attr *attr; 2980 struct ib_qp *qp; 2981 u16 pkey_index; 2982 2983 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2984 if (!attr) 2985 return -ENOMEM; 2986 2987 ret = ib_find_pkey(port_priv->device, port_priv->port_num, 2988 IB_DEFAULT_PKEY_FULL, &pkey_index); 2989 if (ret) 2990 pkey_index = 0; 2991 2992 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2993 qp = port_priv->qp_info[i].qp; 2994 if (!qp) 2995 continue; 2996 2997 /* 2998 * PKey index for QP1 is irrelevant but 2999 * one is needed for the Reset to Init transition 3000 */ 3001 attr->qp_state = IB_QPS_INIT; 3002 attr->pkey_index = pkey_index; 3003 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 3004 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 3005 IB_QP_PKEY_INDEX | IB_QP_QKEY); 3006 if (ret) { 3007 dev_err(&port_priv->device->dev, 3008 "Couldn't change QP%d state to INIT: %d\n", 3009 i, ret); 3010 goto out; 3011 } 3012 3013 attr->qp_state = IB_QPS_RTR; 3014 ret = ib_modify_qp(qp, attr, IB_QP_STATE); 3015 if (ret) { 3016 dev_err(&port_priv->device->dev, 3017 "Couldn't change QP%d state to RTR: %d\n", 3018 i, ret); 3019 goto out; 3020 } 3021 3022 attr->qp_state = IB_QPS_RTS; 3023 attr->sq_psn = IB_MAD_SEND_Q_PSN; 3024 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); 3025 if (ret) { 3026 dev_err(&port_priv->device->dev, 3027 "Couldn't change QP%d state to RTS: %d\n", 3028 i, ret); 3029 goto out; 3030 } 3031 } 3032 3033 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 3034 if (ret) { 3035 dev_err(&port_priv->device->dev, 3036 "Failed to request completion notification: %d\n", 3037 ret); 3038 goto out; 3039 } 3040 3041 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 3042 if (!port_priv->qp_info[i].qp) 3043 continue; 3044 3045 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); 3046 if (ret) { 3047 dev_err(&port_priv->device->dev, 3048 "Couldn't post receive WRs\n"); 3049 goto out; 3050 } 3051 } 3052 out: 3053 kfree(attr); 3054 return ret; 3055 } 3056 3057 static void qp_event_handler(struct ib_event *event, void *qp_context) 3058 { 3059 struct ib_mad_qp_info *qp_info = qp_context; 3060 3061 /* It's worse than that! He's dead, Jim! */ 3062 dev_err(&qp_info->port_priv->device->dev, 3063 "Fatal error (%d) on MAD QP (%d)\n", 3064 event->event, qp_info->qp->qp_num); 3065 } 3066 3067 static void init_mad_queue(struct ib_mad_qp_info *qp_info, 3068 struct ib_mad_queue *mad_queue) 3069 { 3070 mad_queue->qp_info = qp_info; 3071 mad_queue->count = 0; 3072 spin_lock_init(&mad_queue->lock); 3073 INIT_LIST_HEAD(&mad_queue->list); 3074 } 3075 3076 static void init_mad_qp(struct ib_mad_port_private *port_priv, 3077 struct ib_mad_qp_info *qp_info) 3078 { 3079 qp_info->port_priv = port_priv; 3080 init_mad_queue(qp_info, &qp_info->send_queue); 3081 init_mad_queue(qp_info, &qp_info->recv_queue); 3082 INIT_LIST_HEAD(&qp_info->overflow_list); 3083 spin_lock_init(&qp_info->snoop_lock); 3084 qp_info->snoop_table = NULL; 3085 qp_info->snoop_table_size = 0; 3086 atomic_set(&qp_info->snoop_count, 0); 3087 } 3088 3089 static int create_mad_qp(struct ib_mad_qp_info *qp_info, 3090 enum ib_qp_type qp_type) 3091 { 3092 struct ib_qp_init_attr qp_init_attr; 3093 int ret; 3094 3095 memset(&qp_init_attr, 0, sizeof qp_init_attr); 3096 qp_init_attr.send_cq = qp_info->port_priv->cq; 3097 qp_init_attr.recv_cq = qp_info->port_priv->cq; 3098 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 3099 qp_init_attr.cap.max_send_wr = mad_sendq_size; 3100 qp_init_attr.cap.max_recv_wr = mad_recvq_size; 3101 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 3102 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 3103 qp_init_attr.qp_type = qp_type; 3104 qp_init_attr.port_num = qp_info->port_priv->port_num; 3105 qp_init_attr.qp_context = qp_info; 3106 qp_init_attr.event_handler = qp_event_handler; 3107 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); 3108 if (IS_ERR(qp_info->qp)) { 3109 dev_err(&qp_info->port_priv->device->dev, 3110 "Couldn't create ib_mad QP%d\n", 3111 get_spl_qp_index(qp_type)); 3112 ret = PTR_ERR(qp_info->qp); 3113 goto error; 3114 } 3115 /* Use minimum queue sizes unless the CQ is resized */ 3116 qp_info->send_queue.max_active = mad_sendq_size; 3117 qp_info->recv_queue.max_active = mad_recvq_size; 3118 return 0; 3119 3120 error: 3121 return ret; 3122 } 3123 3124 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) 3125 { 3126 if (!qp_info->qp) 3127 return; 3128 3129 ib_destroy_qp(qp_info->qp); 3130 kfree(qp_info->snoop_table); 3131 } 3132 3133 /* 3134 * Open the port 3135 * Create the QP, PD, MR, and CQ if needed 3136 */ 3137 static int ib_mad_port_open(struct ib_device *device, 3138 int port_num) 3139 { 3140 int ret, cq_size; 3141 struct ib_mad_port_private *port_priv; 3142 unsigned long flags; 3143 char name[sizeof "ib_mad123"]; 3144 int has_smi; 3145 3146 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) 3147 return -EFAULT; 3148 3149 if (WARN_ON(rdma_cap_opa_mad(device, port_num) && 3150 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) 3151 return -EFAULT; 3152 3153 /* Create new device info */ 3154 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 3155 if (!port_priv) 3156 return -ENOMEM; 3157 3158 port_priv->device = device; 3159 port_priv->port_num = port_num; 3160 spin_lock_init(&port_priv->reg_lock); 3161 INIT_LIST_HEAD(&port_priv->agent_list); 3162 init_mad_qp(port_priv, &port_priv->qp_info[0]); 3163 init_mad_qp(port_priv, &port_priv->qp_info[1]); 3164 3165 cq_size = mad_sendq_size + mad_recvq_size; 3166 has_smi = rdma_cap_ib_smi(device, port_num); 3167 if (has_smi) 3168 cq_size *= 2; 3169 3170 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, 3171 IB_POLL_WORKQUEUE); 3172 if (IS_ERR(port_priv->cq)) { 3173 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); 3174 ret = PTR_ERR(port_priv->cq); 3175 goto error3; 3176 } 3177 3178 port_priv->pd = ib_alloc_pd(device, 0); 3179 if (IS_ERR(port_priv->pd)) { 3180 dev_err(&device->dev, "Couldn't create ib_mad PD\n"); 3181 ret = PTR_ERR(port_priv->pd); 3182 goto error4; 3183 } 3184 3185 if (has_smi) { 3186 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); 3187 if (ret) 3188 goto error6; 3189 } 3190 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); 3191 if (ret) 3192 goto error7; 3193 3194 snprintf(name, sizeof name, "ib_mad%d", port_num); 3195 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 3196 if (!port_priv->wq) { 3197 ret = -ENOMEM; 3198 goto error8; 3199 } 3200 3201 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3202 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 3203 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3204 3205 ret = ib_mad_port_start(port_priv); 3206 if (ret) { 3207 dev_err(&device->dev, "Couldn't start port\n"); 3208 goto error9; 3209 } 3210 3211 return 0; 3212 3213 error9: 3214 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3215 list_del_init(&port_priv->port_list); 3216 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3217 3218 destroy_workqueue(port_priv->wq); 3219 error8: 3220 destroy_mad_qp(&port_priv->qp_info[1]); 3221 error7: 3222 destroy_mad_qp(&port_priv->qp_info[0]); 3223 error6: 3224 ib_dealloc_pd(port_priv->pd); 3225 error4: 3226 ib_free_cq(port_priv->cq); 3227 cleanup_recv_queue(&port_priv->qp_info[1]); 3228 cleanup_recv_queue(&port_priv->qp_info[0]); 3229 error3: 3230 kfree(port_priv); 3231 3232 return ret; 3233 } 3234 3235 /* 3236 * Close the port 3237 * If there are no classes using the port, free the port 3238 * resources (CQ, MR, PD, QP) and remove the port's info structure 3239 */ 3240 static int ib_mad_port_close(struct ib_device *device, int port_num) 3241 { 3242 struct ib_mad_port_private *port_priv; 3243 unsigned long flags; 3244 3245 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3246 port_priv = __ib_get_mad_port(device, port_num); 3247 if (port_priv == NULL) { 3248 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3249 dev_err(&device->dev, "Port %d not found\n", port_num); 3250 return -ENODEV; 3251 } 3252 list_del_init(&port_priv->port_list); 3253 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3254 3255 destroy_workqueue(port_priv->wq); 3256 destroy_mad_qp(&port_priv->qp_info[1]); 3257 destroy_mad_qp(&port_priv->qp_info[0]); 3258 ib_dealloc_pd(port_priv->pd); 3259 ib_free_cq(port_priv->cq); 3260 cleanup_recv_queue(&port_priv->qp_info[1]); 3261 cleanup_recv_queue(&port_priv->qp_info[0]); 3262 /* XXX: Handle deallocation of MAD registration tables */ 3263 3264 kfree(port_priv); 3265 3266 return 0; 3267 } 3268 3269 static void ib_mad_init_device(struct ib_device *device) 3270 { 3271 int start, i; 3272 3273 start = rdma_start_port(device); 3274 3275 for (i = start; i <= rdma_end_port(device); i++) { 3276 if (!rdma_cap_ib_mad(device, i)) 3277 continue; 3278 3279 if (ib_mad_port_open(device, i)) { 3280 dev_err(&device->dev, "Couldn't open port %d\n", i); 3281 goto error; 3282 } 3283 if (ib_agent_port_open(device, i)) { 3284 dev_err(&device->dev, 3285 "Couldn't open port %d for agents\n", i); 3286 goto error_agent; 3287 } 3288 } 3289 return; 3290 3291 error_agent: 3292 if (ib_mad_port_close(device, i)) 3293 dev_err(&device->dev, "Couldn't close port %d\n", i); 3294 3295 error: 3296 while (--i >= start) { 3297 if (!rdma_cap_ib_mad(device, i)) 3298 continue; 3299 3300 if (ib_agent_port_close(device, i)) 3301 dev_err(&device->dev, 3302 "Couldn't close port %d for agents\n", i); 3303 if (ib_mad_port_close(device, i)) 3304 dev_err(&device->dev, "Couldn't close port %d\n", i); 3305 } 3306 } 3307 3308 static void ib_mad_remove_device(struct ib_device *device, void *client_data) 3309 { 3310 int i; 3311 3312 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 3313 if (!rdma_cap_ib_mad(device, i)) 3314 continue; 3315 3316 if (ib_agent_port_close(device, i)) 3317 dev_err(&device->dev, 3318 "Couldn't close port %d for agents\n", i); 3319 if (ib_mad_port_close(device, i)) 3320 dev_err(&device->dev, "Couldn't close port %d\n", i); 3321 } 3322 } 3323 3324 static struct ib_client mad_client = { 3325 .name = "mad", 3326 .add = ib_mad_init_device, 3327 .remove = ib_mad_remove_device 3328 }; 3329 3330 int ib_mad_init(void) 3331 { 3332 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); 3333 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); 3334 3335 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); 3336 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); 3337 3338 INIT_LIST_HEAD(&ib_mad_port_list); 3339 3340 if (ib_register_client(&mad_client)) { 3341 pr_err("Couldn't register ib_mad client\n"); 3342 return -EINVAL; 3343 } 3344 3345 return 0; 3346 } 3347 3348 void ib_mad_cleanup(void) 3349 { 3350 ib_unregister_client(&mad_client); 3351 } 3352