1 /* 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2009 HNR Consulting. All rights reserved. 6 * Copyright (c) 2014 Intel Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/module.h> 43 #include <rdma/ib_cache.h> 44 45 #include "mad_priv.h" 46 #include "mad_rmpp.h" 47 #include "smi.h" 48 #include "opa_smi.h" 49 #include "agent.h" 50 51 MODULE_LICENSE("Dual BSD/GPL"); 52 MODULE_DESCRIPTION("kernel IB MAD API"); 53 MODULE_AUTHOR("Hal Rosenstock"); 54 MODULE_AUTHOR("Sean Hefty"); 55 56 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 57 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 58 59 module_param_named(send_queue_size, mad_sendq_size, int, 0444); 60 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); 61 module_param_named(recv_queue_size, mad_recvq_size, int, 0444); 62 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); 63 64 static struct list_head ib_mad_port_list; 65 static u32 ib_mad_client_id = 0; 66 67 /* Port list lock */ 68 static DEFINE_SPINLOCK(ib_mad_port_list_lock); 69 70 /* Forward declarations */ 71 static int method_in_use(struct ib_mad_mgmt_method_table **method, 72 struct ib_mad_reg_req *mad_reg_req); 73 static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 74 static struct ib_mad_agent_private *find_mad_agent( 75 struct ib_mad_port_private *port_priv, 76 const struct ib_mad_hdr *mad); 77 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 78 struct ib_mad_private *mad); 79 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 80 static void timeout_sends(struct work_struct *work); 81 static void local_completions(struct work_struct *work); 82 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 83 struct ib_mad_agent_private *agent_priv, 84 u8 mgmt_class); 85 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 86 struct ib_mad_agent_private *agent_priv); 87 88 /* 89 * Returns a ib_mad_port_private structure or NULL for a device/port 90 * Assumes ib_mad_port_list_lock is being held 91 */ 92 static inline struct ib_mad_port_private * 93 __ib_get_mad_port(struct ib_device *device, int port_num) 94 { 95 struct ib_mad_port_private *entry; 96 97 list_for_each_entry(entry, &ib_mad_port_list, port_list) { 98 if (entry->device == device && entry->port_num == port_num) 99 return entry; 100 } 101 return NULL; 102 } 103 104 /* 105 * Wrapper function to return a ib_mad_port_private structure or NULL 106 * for a device/port 107 */ 108 static inline struct ib_mad_port_private * 109 ib_get_mad_port(struct ib_device *device, int port_num) 110 { 111 struct ib_mad_port_private *entry; 112 unsigned long flags; 113 114 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 115 entry = __ib_get_mad_port(device, port_num); 116 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 117 118 return entry; 119 } 120 121 static inline u8 convert_mgmt_class(u8 mgmt_class) 122 { 123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ 124 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 125 0 : mgmt_class; 126 } 127 128 static int get_spl_qp_index(enum ib_qp_type qp_type) 129 { 130 switch (qp_type) 131 { 132 case IB_QPT_SMI: 133 return 0; 134 case IB_QPT_GSI: 135 return 1; 136 default: 137 return -1; 138 } 139 } 140 141 static int vendor_class_index(u8 mgmt_class) 142 { 143 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; 144 } 145 146 static int is_vendor_class(u8 mgmt_class) 147 { 148 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || 149 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) 150 return 0; 151 return 1; 152 } 153 154 static int is_vendor_oui(char *oui) 155 { 156 if (oui[0] || oui[1] || oui[2]) 157 return 1; 158 return 0; 159 } 160 161 static int is_vendor_method_in_use( 162 struct ib_mad_mgmt_vendor_class *vendor_class, 163 struct ib_mad_reg_req *mad_reg_req) 164 { 165 struct ib_mad_mgmt_method_table *method; 166 int i; 167 168 for (i = 0; i < MAX_MGMT_OUI; i++) { 169 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { 170 method = vendor_class->method_table[i]; 171 if (method) { 172 if (method_in_use(&method, mad_reg_req)) 173 return 1; 174 else 175 break; 176 } 177 } 178 } 179 return 0; 180 } 181 182 int ib_response_mad(const struct ib_mad_hdr *hdr) 183 { 184 return ((hdr->method & IB_MGMT_METHOD_RESP) || 185 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || 186 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && 187 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); 188 } 189 EXPORT_SYMBOL(ib_response_mad); 190 191 /* 192 * ib_register_mad_agent - Register to send/receive MADs 193 */ 194 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 195 u8 port_num, 196 enum ib_qp_type qp_type, 197 struct ib_mad_reg_req *mad_reg_req, 198 u8 rmpp_version, 199 ib_mad_send_handler send_handler, 200 ib_mad_recv_handler recv_handler, 201 void *context, 202 u32 registration_flags) 203 { 204 struct ib_mad_port_private *port_priv; 205 struct ib_mad_agent *ret = ERR_PTR(-EINVAL); 206 struct ib_mad_agent_private *mad_agent_priv; 207 struct ib_mad_reg_req *reg_req = NULL; 208 struct ib_mad_mgmt_class_table *class; 209 struct ib_mad_mgmt_vendor_class_table *vendor; 210 struct ib_mad_mgmt_vendor_class *vendor_class; 211 struct ib_mad_mgmt_method_table *method; 212 int ret2, qpn; 213 unsigned long flags; 214 u8 mgmt_class, vclass; 215 216 /* Validate parameters */ 217 qpn = get_spl_qp_index(qp_type); 218 if (qpn == -1) { 219 dev_notice(&device->dev, 220 "ib_register_mad_agent: invalid QP Type %d\n", 221 qp_type); 222 goto error1; 223 } 224 225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { 226 dev_notice(&device->dev, 227 "ib_register_mad_agent: invalid RMPP Version %u\n", 228 rmpp_version); 229 goto error1; 230 } 231 232 /* Validate MAD registration request if supplied */ 233 if (mad_reg_req) { 234 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { 235 dev_notice(&device->dev, 236 "ib_register_mad_agent: invalid Class Version %u\n", 237 mad_reg_req->mgmt_class_version); 238 goto error1; 239 } 240 if (!recv_handler) { 241 dev_notice(&device->dev, 242 "ib_register_mad_agent: no recv_handler\n"); 243 goto error1; 244 } 245 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { 246 /* 247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only 248 * one in this range currently allowed 249 */ 250 if (mad_reg_req->mgmt_class != 251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 252 dev_notice(&device->dev, 253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", 254 mad_reg_req->mgmt_class); 255 goto error1; 256 } 257 } else if (mad_reg_req->mgmt_class == 0) { 258 /* 259 * Class 0 is reserved in IBA and is used for 260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 261 */ 262 dev_notice(&device->dev, 263 "ib_register_mad_agent: Invalid Mgmt Class 0\n"); 264 goto error1; 265 } else if (is_vendor_class(mad_reg_req->mgmt_class)) { 266 /* 267 * If class is in "new" vendor range, 268 * ensure supplied OUI is not zero 269 */ 270 if (!is_vendor_oui(mad_reg_req->oui)) { 271 dev_notice(&device->dev, 272 "ib_register_mad_agent: No OUI specified for class 0x%x\n", 273 mad_reg_req->mgmt_class); 274 goto error1; 275 } 276 } 277 /* Make sure class supplied is consistent with RMPP */ 278 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 279 if (rmpp_version) { 280 dev_notice(&device->dev, 281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", 282 mad_reg_req->mgmt_class); 283 goto error1; 284 } 285 } 286 287 /* Make sure class supplied is consistent with QP type */ 288 if (qp_type == IB_QPT_SMI) { 289 if ((mad_reg_req->mgmt_class != 290 IB_MGMT_CLASS_SUBN_LID_ROUTED) && 291 (mad_reg_req->mgmt_class != 292 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 293 dev_notice(&device->dev, 294 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", 295 mad_reg_req->mgmt_class); 296 goto error1; 297 } 298 } else { 299 if ((mad_reg_req->mgmt_class == 300 IB_MGMT_CLASS_SUBN_LID_ROUTED) || 301 (mad_reg_req->mgmt_class == 302 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 303 dev_notice(&device->dev, 304 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", 305 mad_reg_req->mgmt_class); 306 goto error1; 307 } 308 } 309 } else { 310 /* No registration request supplied */ 311 if (!send_handler) 312 goto error1; 313 if (registration_flags & IB_MAD_USER_RMPP) 314 goto error1; 315 } 316 317 /* Validate device and port */ 318 port_priv = ib_get_mad_port(device, port_num); 319 if (!port_priv) { 320 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); 321 ret = ERR_PTR(-ENODEV); 322 goto error1; 323 } 324 325 /* Verify the QP requested is supported. For example, Ethernet devices 326 * will not have QP0 */ 327 if (!port_priv->qp_info[qpn].qp) { 328 dev_notice(&device->dev, 329 "ib_register_mad_agent: QP %d not supported\n", qpn); 330 ret = ERR_PTR(-EPROTONOSUPPORT); 331 goto error1; 332 } 333 334 /* Allocate structures */ 335 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); 336 if (!mad_agent_priv) { 337 ret = ERR_PTR(-ENOMEM); 338 goto error1; 339 } 340 341 if (mad_reg_req) { 342 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); 343 if (!reg_req) { 344 ret = ERR_PTR(-ENOMEM); 345 goto error3; 346 } 347 } 348 349 /* Now, fill in the various structures */ 350 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 351 mad_agent_priv->reg_req = reg_req; 352 mad_agent_priv->agent.rmpp_version = rmpp_version; 353 mad_agent_priv->agent.device = device; 354 mad_agent_priv->agent.recv_handler = recv_handler; 355 mad_agent_priv->agent.send_handler = send_handler; 356 mad_agent_priv->agent.context = context; 357 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 358 mad_agent_priv->agent.port_num = port_num; 359 mad_agent_priv->agent.flags = registration_flags; 360 spin_lock_init(&mad_agent_priv->lock); 361 INIT_LIST_HEAD(&mad_agent_priv->send_list); 362 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 363 INIT_LIST_HEAD(&mad_agent_priv->done_list); 364 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 365 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); 366 INIT_LIST_HEAD(&mad_agent_priv->local_list); 367 INIT_WORK(&mad_agent_priv->local_work, local_completions); 368 atomic_set(&mad_agent_priv->refcount, 1); 369 init_completion(&mad_agent_priv->comp); 370 371 spin_lock_irqsave(&port_priv->reg_lock, flags); 372 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; 373 374 /* 375 * Make sure MAD registration (if supplied) 376 * is non overlapping with any existing ones 377 */ 378 if (mad_reg_req) { 379 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); 380 if (!is_vendor_class(mgmt_class)) { 381 class = port_priv->version[mad_reg_req-> 382 mgmt_class_version].class; 383 if (class) { 384 method = class->method_table[mgmt_class]; 385 if (method) { 386 if (method_in_use(&method, 387 mad_reg_req)) 388 goto error4; 389 } 390 } 391 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, 392 mgmt_class); 393 } else { 394 /* "New" vendor class range */ 395 vendor = port_priv->version[mad_reg_req-> 396 mgmt_class_version].vendor; 397 if (vendor) { 398 vclass = vendor_class_index(mgmt_class); 399 vendor_class = vendor->vendor_class[vclass]; 400 if (vendor_class) { 401 if (is_vendor_method_in_use( 402 vendor_class, 403 mad_reg_req)) 404 goto error4; 405 } 406 } 407 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 408 } 409 if (ret2) { 410 ret = ERR_PTR(ret2); 411 goto error4; 412 } 413 } 414 415 /* Add mad agent into port's agent list */ 416 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 417 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 418 419 return &mad_agent_priv->agent; 420 421 error4: 422 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 423 kfree(reg_req); 424 error3: 425 kfree(mad_agent_priv); 426 error1: 427 return ret; 428 } 429 EXPORT_SYMBOL(ib_register_mad_agent); 430 431 static inline int is_snooping_sends(int mad_snoop_flags) 432 { 433 return (mad_snoop_flags & 434 (/*IB_MAD_SNOOP_POSTED_SENDS | 435 IB_MAD_SNOOP_RMPP_SENDS |*/ 436 IB_MAD_SNOOP_SEND_COMPLETIONS /*| 437 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); 438 } 439 440 static inline int is_snooping_recvs(int mad_snoop_flags) 441 { 442 return (mad_snoop_flags & 443 (IB_MAD_SNOOP_RECVS /*| 444 IB_MAD_SNOOP_RMPP_RECVS*/)); 445 } 446 447 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, 448 struct ib_mad_snoop_private *mad_snoop_priv) 449 { 450 struct ib_mad_snoop_private **new_snoop_table; 451 unsigned long flags; 452 int i; 453 454 spin_lock_irqsave(&qp_info->snoop_lock, flags); 455 /* Check for empty slot in array. */ 456 for (i = 0; i < qp_info->snoop_table_size; i++) 457 if (!qp_info->snoop_table[i]) 458 break; 459 460 if (i == qp_info->snoop_table_size) { 461 /* Grow table. */ 462 new_snoop_table = krealloc(qp_info->snoop_table, 463 sizeof mad_snoop_priv * 464 (qp_info->snoop_table_size + 1), 465 GFP_ATOMIC); 466 if (!new_snoop_table) { 467 i = -ENOMEM; 468 goto out; 469 } 470 471 qp_info->snoop_table = new_snoop_table; 472 qp_info->snoop_table_size++; 473 } 474 qp_info->snoop_table[i] = mad_snoop_priv; 475 atomic_inc(&qp_info->snoop_count); 476 out: 477 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 478 return i; 479 } 480 481 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, 482 u8 port_num, 483 enum ib_qp_type qp_type, 484 int mad_snoop_flags, 485 ib_mad_snoop_handler snoop_handler, 486 ib_mad_recv_handler recv_handler, 487 void *context) 488 { 489 struct ib_mad_port_private *port_priv; 490 struct ib_mad_agent *ret; 491 struct ib_mad_snoop_private *mad_snoop_priv; 492 int qpn; 493 494 /* Validate parameters */ 495 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || 496 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { 497 ret = ERR_PTR(-EINVAL); 498 goto error1; 499 } 500 qpn = get_spl_qp_index(qp_type); 501 if (qpn == -1) { 502 ret = ERR_PTR(-EINVAL); 503 goto error1; 504 } 505 port_priv = ib_get_mad_port(device, port_num); 506 if (!port_priv) { 507 ret = ERR_PTR(-ENODEV); 508 goto error1; 509 } 510 /* Allocate structures */ 511 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 512 if (!mad_snoop_priv) { 513 ret = ERR_PTR(-ENOMEM); 514 goto error1; 515 } 516 517 /* Now, fill in the various structures */ 518 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 519 mad_snoop_priv->agent.device = device; 520 mad_snoop_priv->agent.recv_handler = recv_handler; 521 mad_snoop_priv->agent.snoop_handler = snoop_handler; 522 mad_snoop_priv->agent.context = context; 523 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 524 mad_snoop_priv->agent.port_num = port_num; 525 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 526 init_completion(&mad_snoop_priv->comp); 527 mad_snoop_priv->snoop_index = register_snoop_agent( 528 &port_priv->qp_info[qpn], 529 mad_snoop_priv); 530 if (mad_snoop_priv->snoop_index < 0) { 531 ret = ERR_PTR(mad_snoop_priv->snoop_index); 532 goto error2; 533 } 534 535 atomic_set(&mad_snoop_priv->refcount, 1); 536 return &mad_snoop_priv->agent; 537 538 error2: 539 kfree(mad_snoop_priv); 540 error1: 541 return ret; 542 } 543 EXPORT_SYMBOL(ib_register_mad_snoop); 544 545 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 546 { 547 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 548 complete(&mad_agent_priv->comp); 549 } 550 551 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) 552 { 553 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 554 complete(&mad_snoop_priv->comp); 555 } 556 557 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 558 { 559 struct ib_mad_port_private *port_priv; 560 unsigned long flags; 561 562 /* Note that we could still be handling received MADs */ 563 564 /* 565 * Canceling all sends results in dropping received response 566 * MADs, preventing us from queuing additional work 567 */ 568 cancel_mads(mad_agent_priv); 569 port_priv = mad_agent_priv->qp_info->port_priv; 570 cancel_delayed_work(&mad_agent_priv->timed_work); 571 572 spin_lock_irqsave(&port_priv->reg_lock, flags); 573 remove_mad_reg_req(mad_agent_priv); 574 list_del(&mad_agent_priv->agent_list); 575 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 576 577 flush_workqueue(port_priv->wq); 578 ib_cancel_rmpp_recvs(mad_agent_priv); 579 580 deref_mad_agent(mad_agent_priv); 581 wait_for_completion(&mad_agent_priv->comp); 582 583 kfree(mad_agent_priv->reg_req); 584 kfree(mad_agent_priv); 585 } 586 587 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) 588 { 589 struct ib_mad_qp_info *qp_info; 590 unsigned long flags; 591 592 qp_info = mad_snoop_priv->qp_info; 593 spin_lock_irqsave(&qp_info->snoop_lock, flags); 594 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; 595 atomic_dec(&qp_info->snoop_count); 596 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 597 598 deref_snoop_agent(mad_snoop_priv); 599 wait_for_completion(&mad_snoop_priv->comp); 600 601 kfree(mad_snoop_priv); 602 } 603 604 /* 605 * ib_unregister_mad_agent - Unregisters a client from using MAD services 606 */ 607 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) 608 { 609 struct ib_mad_agent_private *mad_agent_priv; 610 struct ib_mad_snoop_private *mad_snoop_priv; 611 612 /* If the TID is zero, the agent can only snoop. */ 613 if (mad_agent->hi_tid) { 614 mad_agent_priv = container_of(mad_agent, 615 struct ib_mad_agent_private, 616 agent); 617 unregister_mad_agent(mad_agent_priv); 618 } else { 619 mad_snoop_priv = container_of(mad_agent, 620 struct ib_mad_snoop_private, 621 agent); 622 unregister_mad_snoop(mad_snoop_priv); 623 } 624 return 0; 625 } 626 EXPORT_SYMBOL(ib_unregister_mad_agent); 627 628 static void dequeue_mad(struct ib_mad_list_head *mad_list) 629 { 630 struct ib_mad_queue *mad_queue; 631 unsigned long flags; 632 633 BUG_ON(!mad_list->mad_queue); 634 mad_queue = mad_list->mad_queue; 635 spin_lock_irqsave(&mad_queue->lock, flags); 636 list_del(&mad_list->list); 637 mad_queue->count--; 638 spin_unlock_irqrestore(&mad_queue->lock, flags); 639 } 640 641 static void snoop_send(struct ib_mad_qp_info *qp_info, 642 struct ib_mad_send_buf *send_buf, 643 struct ib_mad_send_wc *mad_send_wc, 644 int mad_snoop_flags) 645 { 646 struct ib_mad_snoop_private *mad_snoop_priv; 647 unsigned long flags; 648 int i; 649 650 spin_lock_irqsave(&qp_info->snoop_lock, flags); 651 for (i = 0; i < qp_info->snoop_table_size; i++) { 652 mad_snoop_priv = qp_info->snoop_table[i]; 653 if (!mad_snoop_priv || 654 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 655 continue; 656 657 atomic_inc(&mad_snoop_priv->refcount); 658 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 659 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 660 send_buf, mad_send_wc); 661 deref_snoop_agent(mad_snoop_priv); 662 spin_lock_irqsave(&qp_info->snoop_lock, flags); 663 } 664 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 665 } 666 667 static void snoop_recv(struct ib_mad_qp_info *qp_info, 668 struct ib_mad_recv_wc *mad_recv_wc, 669 int mad_snoop_flags) 670 { 671 struct ib_mad_snoop_private *mad_snoop_priv; 672 unsigned long flags; 673 int i; 674 675 spin_lock_irqsave(&qp_info->snoop_lock, flags); 676 for (i = 0; i < qp_info->snoop_table_size; i++) { 677 mad_snoop_priv = qp_info->snoop_table[i]; 678 if (!mad_snoop_priv || 679 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 680 continue; 681 682 atomic_inc(&mad_snoop_priv->refcount); 683 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 684 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, 685 mad_recv_wc); 686 deref_snoop_agent(mad_snoop_priv); 687 spin_lock_irqsave(&qp_info->snoop_lock, flags); 688 } 689 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 690 } 691 692 static void build_smp_wc(struct ib_qp *qp, 693 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, 694 struct ib_wc *wc) 695 { 696 memset(wc, 0, sizeof *wc); 697 wc->wr_id = wr_id; 698 wc->status = IB_WC_SUCCESS; 699 wc->opcode = IB_WC_RECV; 700 wc->pkey_index = pkey_index; 701 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 702 wc->src_qp = IB_QP0; 703 wc->qp = qp; 704 wc->slid = slid; 705 wc->sl = 0; 706 wc->dlid_path_bits = 0; 707 wc->port_num = port_num; 708 } 709 710 static size_t mad_priv_size(const struct ib_mad_private *mp) 711 { 712 return sizeof(struct ib_mad_private) + mp->mad_size; 713 } 714 715 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) 716 { 717 size_t size = sizeof(struct ib_mad_private) + mad_size; 718 struct ib_mad_private *ret = kzalloc(size, flags); 719 720 if (ret) 721 ret->mad_size = mad_size; 722 723 return ret; 724 } 725 726 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) 727 { 728 return rdma_max_mad_size(port_priv->device, port_priv->port_num); 729 } 730 731 static size_t mad_priv_dma_size(const struct ib_mad_private *mp) 732 { 733 return sizeof(struct ib_grh) + mp->mad_size; 734 } 735 736 /* 737 * Return 0 if SMP is to be sent 738 * Return 1 if SMP was consumed locally (whether or not solicited) 739 * Return < 0 if error 740 */ 741 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 742 struct ib_mad_send_wr_private *mad_send_wr) 743 { 744 int ret = 0; 745 struct ib_smp *smp = mad_send_wr->send_buf.mad; 746 struct opa_smp *opa_smp = (struct opa_smp *)smp; 747 unsigned long flags; 748 struct ib_mad_local_private *local; 749 struct ib_mad_private *mad_priv; 750 struct ib_mad_port_private *port_priv; 751 struct ib_mad_agent_private *recv_mad_agent = NULL; 752 struct ib_device *device = mad_agent_priv->agent.device; 753 u8 port_num; 754 struct ib_wc mad_wc; 755 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; 756 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); 757 u16 out_mad_pkey_index = 0; 758 u16 drslid; 759 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 760 mad_agent_priv->qp_info->port_priv->port_num); 761 762 if (rdma_cap_ib_switch(device) && 763 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 764 port_num = send_wr->port_num; 765 else 766 port_num = mad_agent_priv->agent.port_num; 767 768 /* 769 * Directed route handling starts if the initial LID routed part of 770 * a request or the ending LID routed part of a response is empty. 771 * If we are at the start of the LID routed part, don't update the 772 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. 773 */ 774 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) { 775 u32 opa_drslid; 776 777 if ((opa_get_smp_direction(opa_smp) 778 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 779 OPA_LID_PERMISSIVE && 780 opa_smi_handle_dr_smp_send(opa_smp, 781 rdma_cap_ib_switch(device), 782 port_num) == IB_SMI_DISCARD) { 783 ret = -EINVAL; 784 dev_err(&device->dev, "OPA Invalid directed route\n"); 785 goto out; 786 } 787 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 788 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && 789 opa_drslid & 0xffff0000) { 790 ret = -EINVAL; 791 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 792 opa_drslid); 793 goto out; 794 } 795 drslid = (u16)(opa_drslid & 0x0000ffff); 796 797 /* Check to post send on QP or process locally */ 798 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && 799 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) 800 goto out; 801 } else { 802 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 803 IB_LID_PERMISSIVE && 804 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == 805 IB_SMI_DISCARD) { 806 ret = -EINVAL; 807 dev_err(&device->dev, "Invalid directed route\n"); 808 goto out; 809 } 810 drslid = be16_to_cpu(smp->dr_slid); 811 812 /* Check to post send on QP or process locally */ 813 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && 814 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) 815 goto out; 816 } 817 818 local = kmalloc(sizeof *local, GFP_ATOMIC); 819 if (!local) { 820 ret = -ENOMEM; 821 dev_err(&device->dev, "No memory for ib_mad_local_private\n"); 822 goto out; 823 } 824 local->mad_priv = NULL; 825 local->recv_mad_agent = NULL; 826 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); 827 if (!mad_priv) { 828 ret = -ENOMEM; 829 dev_err(&device->dev, "No memory for local response MAD\n"); 830 kfree(local); 831 goto out; 832 } 833 834 build_smp_wc(mad_agent_priv->agent.qp, 835 send_wr->wr.wr_id, drslid, 836 send_wr->pkey_index, 837 send_wr->port_num, &mad_wc); 838 839 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { 840 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len 841 + mad_send_wr->send_buf.data_len 842 + sizeof(struct ib_grh); 843 } 844 845 /* No GRH for DR SMP */ 846 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, 847 (const struct ib_mad_hdr *)smp, mad_size, 848 (struct ib_mad_hdr *)mad_priv->mad, 849 &mad_size, &out_mad_pkey_index); 850 switch (ret) 851 { 852 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 853 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && 854 mad_agent_priv->agent.recv_handler) { 855 local->mad_priv = mad_priv; 856 local->recv_mad_agent = mad_agent_priv; 857 /* 858 * Reference MAD agent until receive 859 * side of local completion handled 860 */ 861 atomic_inc(&mad_agent_priv->refcount); 862 } else 863 kfree(mad_priv); 864 break; 865 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 866 kfree(mad_priv); 867 break; 868 case IB_MAD_RESULT_SUCCESS: 869 /* Treat like an incoming receive MAD */ 870 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 871 mad_agent_priv->agent.port_num); 872 if (port_priv) { 873 memcpy(mad_priv->mad, smp, mad_priv->mad_size); 874 recv_mad_agent = find_mad_agent(port_priv, 875 (const struct ib_mad_hdr *)mad_priv->mad); 876 } 877 if (!port_priv || !recv_mad_agent) { 878 /* 879 * No receiving agent so drop packet and 880 * generate send completion. 881 */ 882 kfree(mad_priv); 883 break; 884 } 885 local->mad_priv = mad_priv; 886 local->recv_mad_agent = recv_mad_agent; 887 break; 888 default: 889 kfree(mad_priv); 890 kfree(local); 891 ret = -EINVAL; 892 goto out; 893 } 894 895 local->mad_send_wr = mad_send_wr; 896 if (opa) { 897 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; 898 local->return_wc_byte_len = mad_size; 899 } 900 /* Reference MAD agent until send side of local completion handled */ 901 atomic_inc(&mad_agent_priv->refcount); 902 /* Queue local completion to local list */ 903 spin_lock_irqsave(&mad_agent_priv->lock, flags); 904 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); 905 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 906 queue_work(mad_agent_priv->qp_info->port_priv->wq, 907 &mad_agent_priv->local_work); 908 ret = 1; 909 out: 910 return ret; 911 } 912 913 static int get_pad_size(int hdr_len, int data_len, size_t mad_size) 914 { 915 int seg_size, pad; 916 917 seg_size = mad_size - hdr_len; 918 if (data_len && seg_size) { 919 pad = seg_size - data_len % seg_size; 920 return pad == seg_size ? 0 : pad; 921 } else 922 return seg_size; 923 } 924 925 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) 926 { 927 struct ib_rmpp_segment *s, *t; 928 929 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { 930 list_del(&s->list); 931 kfree(s); 932 } 933 } 934 935 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, 936 size_t mad_size, gfp_t gfp_mask) 937 { 938 struct ib_mad_send_buf *send_buf = &send_wr->send_buf; 939 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; 940 struct ib_rmpp_segment *seg = NULL; 941 int left, seg_size, pad; 942 943 send_buf->seg_size = mad_size - send_buf->hdr_len; 944 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; 945 seg_size = send_buf->seg_size; 946 pad = send_wr->pad; 947 948 /* Allocate data segments. */ 949 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { 950 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); 951 if (!seg) { 952 dev_err(&send_buf->mad_agent->device->dev, 953 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n", 954 sizeof (*seg) + seg_size, gfp_mask); 955 free_send_rmpp_list(send_wr); 956 return -ENOMEM; 957 } 958 seg->num = ++send_buf->seg_count; 959 list_add_tail(&seg->list, &send_wr->rmpp_list); 960 } 961 962 /* Zero any padding */ 963 if (pad) 964 memset(seg->data + seg_size - pad, 0, pad); 965 966 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> 967 agent.rmpp_version; 968 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 969 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 970 971 send_wr->cur_seg = container_of(send_wr->rmpp_list.next, 972 struct ib_rmpp_segment, list); 973 send_wr->last_ack_seg = send_wr->cur_seg; 974 return 0; 975 } 976 977 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) 978 { 979 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); 980 } 981 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); 982 983 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 984 u32 remote_qpn, u16 pkey_index, 985 int rmpp_active, 986 int hdr_len, int data_len, 987 gfp_t gfp_mask, 988 u8 base_version) 989 { 990 struct ib_mad_agent_private *mad_agent_priv; 991 struct ib_mad_send_wr_private *mad_send_wr; 992 int pad, message_size, ret, size; 993 void *buf; 994 size_t mad_size; 995 bool opa; 996 997 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 998 agent); 999 1000 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); 1001 1002 if (opa && base_version == OPA_MGMT_BASE_VERSION) 1003 mad_size = sizeof(struct opa_mad); 1004 else 1005 mad_size = sizeof(struct ib_mad); 1006 1007 pad = get_pad_size(hdr_len, data_len, mad_size); 1008 message_size = hdr_len + data_len + pad; 1009 1010 if (ib_mad_kernel_rmpp_agent(mad_agent)) { 1011 if (!rmpp_active && message_size > mad_size) 1012 return ERR_PTR(-EINVAL); 1013 } else 1014 if (rmpp_active || message_size > mad_size) 1015 return ERR_PTR(-EINVAL); 1016 1017 size = rmpp_active ? hdr_len : mad_size; 1018 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); 1019 if (!buf) 1020 return ERR_PTR(-ENOMEM); 1021 1022 mad_send_wr = buf + size; 1023 INIT_LIST_HEAD(&mad_send_wr->rmpp_list); 1024 mad_send_wr->send_buf.mad = buf; 1025 mad_send_wr->send_buf.hdr_len = hdr_len; 1026 mad_send_wr->send_buf.data_len = data_len; 1027 mad_send_wr->pad = pad; 1028 1029 mad_send_wr->mad_agent_priv = mad_agent_priv; 1030 mad_send_wr->sg_list[0].length = hdr_len; 1031 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; 1032 1033 /* OPA MADs don't have to be the full 2048 bytes */ 1034 if (opa && base_version == OPA_MGMT_BASE_VERSION && 1035 data_len < mad_size - hdr_len) 1036 mad_send_wr->sg_list[1].length = data_len; 1037 else 1038 mad_send_wr->sg_list[1].length = mad_size - hdr_len; 1039 1040 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; 1041 1042 mad_send_wr->send_wr.wr.wr_id = (unsigned long) mad_send_wr; 1043 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; 1044 mad_send_wr->send_wr.wr.num_sge = 2; 1045 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; 1046 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; 1047 mad_send_wr->send_wr.remote_qpn = remote_qpn; 1048 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; 1049 mad_send_wr->send_wr.pkey_index = pkey_index; 1050 1051 if (rmpp_active) { 1052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); 1053 if (ret) { 1054 kfree(buf); 1055 return ERR_PTR(ret); 1056 } 1057 } 1058 1059 mad_send_wr->send_buf.mad_agent = mad_agent; 1060 atomic_inc(&mad_agent_priv->refcount); 1061 return &mad_send_wr->send_buf; 1062 } 1063 EXPORT_SYMBOL(ib_create_send_mad); 1064 1065 int ib_get_mad_data_offset(u8 mgmt_class) 1066 { 1067 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 1068 return IB_MGMT_SA_HDR; 1069 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1070 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1071 (mgmt_class == IB_MGMT_CLASS_BIS)) 1072 return IB_MGMT_DEVICE_HDR; 1073 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1074 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 1075 return IB_MGMT_VENDOR_HDR; 1076 else 1077 return IB_MGMT_MAD_HDR; 1078 } 1079 EXPORT_SYMBOL(ib_get_mad_data_offset); 1080 1081 int ib_is_mad_class_rmpp(u8 mgmt_class) 1082 { 1083 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 1084 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1085 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1086 (mgmt_class == IB_MGMT_CLASS_BIS) || 1087 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1088 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 1089 return 1; 1090 return 0; 1091 } 1092 EXPORT_SYMBOL(ib_is_mad_class_rmpp); 1093 1094 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 1095 { 1096 struct ib_mad_send_wr_private *mad_send_wr; 1097 struct list_head *list; 1098 1099 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1100 send_buf); 1101 list = &mad_send_wr->cur_seg->list; 1102 1103 if (mad_send_wr->cur_seg->num < seg_num) { 1104 list_for_each_entry(mad_send_wr->cur_seg, list, list) 1105 if (mad_send_wr->cur_seg->num == seg_num) 1106 break; 1107 } else if (mad_send_wr->cur_seg->num > seg_num) { 1108 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) 1109 if (mad_send_wr->cur_seg->num == seg_num) 1110 break; 1111 } 1112 return mad_send_wr->cur_seg->data; 1113 } 1114 EXPORT_SYMBOL(ib_get_rmpp_segment); 1115 1116 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) 1117 { 1118 if (mad_send_wr->send_buf.seg_count) 1119 return ib_get_rmpp_segment(&mad_send_wr->send_buf, 1120 mad_send_wr->seg_num); 1121 else 1122 return mad_send_wr->send_buf.mad + 1123 mad_send_wr->send_buf.hdr_len; 1124 } 1125 1126 void ib_free_send_mad(struct ib_mad_send_buf *send_buf) 1127 { 1128 struct ib_mad_agent_private *mad_agent_priv; 1129 struct ib_mad_send_wr_private *mad_send_wr; 1130 1131 mad_agent_priv = container_of(send_buf->mad_agent, 1132 struct ib_mad_agent_private, agent); 1133 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1134 send_buf); 1135 1136 free_send_rmpp_list(mad_send_wr); 1137 kfree(send_buf->mad); 1138 deref_mad_agent(mad_agent_priv); 1139 } 1140 EXPORT_SYMBOL(ib_free_send_mad); 1141 1142 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 1143 { 1144 struct ib_mad_qp_info *qp_info; 1145 struct list_head *list; 1146 struct ib_send_wr *bad_send_wr; 1147 struct ib_mad_agent *mad_agent; 1148 struct ib_sge *sge; 1149 unsigned long flags; 1150 int ret; 1151 1152 /* Set WR ID to find mad_send_wr upon completion */ 1153 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1154 mad_send_wr->send_wr.wr.wr_id = (unsigned long)&mad_send_wr->mad_list; 1155 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1156 1157 mad_agent = mad_send_wr->send_buf.mad_agent; 1158 sge = mad_send_wr->sg_list; 1159 sge[0].addr = ib_dma_map_single(mad_agent->device, 1160 mad_send_wr->send_buf.mad, 1161 sge[0].length, 1162 DMA_TO_DEVICE); 1163 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) 1164 return -ENOMEM; 1165 1166 mad_send_wr->header_mapping = sge[0].addr; 1167 1168 sge[1].addr = ib_dma_map_single(mad_agent->device, 1169 ib_get_payload(mad_send_wr), 1170 sge[1].length, 1171 DMA_TO_DEVICE); 1172 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { 1173 ib_dma_unmap_single(mad_agent->device, 1174 mad_send_wr->header_mapping, 1175 sge[0].length, DMA_TO_DEVICE); 1176 return -ENOMEM; 1177 } 1178 mad_send_wr->payload_mapping = sge[1].addr; 1179 1180 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1181 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1182 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, 1183 &bad_send_wr); 1184 list = &qp_info->send_queue.list; 1185 } else { 1186 ret = 0; 1187 list = &qp_info->overflow_list; 1188 } 1189 1190 if (!ret) { 1191 qp_info->send_queue.count++; 1192 list_add_tail(&mad_send_wr->mad_list.list, list); 1193 } 1194 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1195 if (ret) { 1196 ib_dma_unmap_single(mad_agent->device, 1197 mad_send_wr->header_mapping, 1198 sge[0].length, DMA_TO_DEVICE); 1199 ib_dma_unmap_single(mad_agent->device, 1200 mad_send_wr->payload_mapping, 1201 sge[1].length, DMA_TO_DEVICE); 1202 } 1203 return ret; 1204 } 1205 1206 /* 1207 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 1208 * with the registered client 1209 */ 1210 int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 1211 struct ib_mad_send_buf **bad_send_buf) 1212 { 1213 struct ib_mad_agent_private *mad_agent_priv; 1214 struct ib_mad_send_buf *next_send_buf; 1215 struct ib_mad_send_wr_private *mad_send_wr; 1216 unsigned long flags; 1217 int ret = -EINVAL; 1218 1219 /* Walk list of send WRs and post each on send list */ 1220 for (; send_buf; send_buf = next_send_buf) { 1221 1222 mad_send_wr = container_of(send_buf, 1223 struct ib_mad_send_wr_private, 1224 send_buf); 1225 mad_agent_priv = mad_send_wr->mad_agent_priv; 1226 1227 if (!send_buf->mad_agent->send_handler || 1228 (send_buf->timeout_ms && 1229 !send_buf->mad_agent->recv_handler)) { 1230 ret = -EINVAL; 1231 goto error; 1232 } 1233 1234 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1235 if (mad_agent_priv->agent.rmpp_version) { 1236 ret = -EINVAL; 1237 goto error; 1238 } 1239 } 1240 1241 /* 1242 * Save pointer to next work request to post in case the 1243 * current one completes, and the user modifies the work 1244 * request associated with the completion 1245 */ 1246 next_send_buf = send_buf->next; 1247 mad_send_wr->send_wr.ah = send_buf->ah; 1248 1249 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1251 ret = handle_outgoing_dr_smp(mad_agent_priv, 1252 mad_send_wr); 1253 if (ret < 0) /* error */ 1254 goto error; 1255 else if (ret == 1) /* locally consumed */ 1256 continue; 1257 } 1258 1259 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; 1260 /* Timeout will be updated after send completes */ 1261 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); 1262 mad_send_wr->max_retries = send_buf->retries; 1263 mad_send_wr->retries_left = send_buf->retries; 1264 send_buf->retries = 0; 1265 /* Reference for work request to QP + response */ 1266 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 1267 mad_send_wr->status = IB_WC_SUCCESS; 1268 1269 /* Reference MAD agent until send completes */ 1270 atomic_inc(&mad_agent_priv->refcount); 1271 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1272 list_add_tail(&mad_send_wr->agent_list, 1273 &mad_agent_priv->send_list); 1274 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1275 1276 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1277 ret = ib_send_rmpp_mad(mad_send_wr); 1278 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) 1279 ret = ib_send_mad(mad_send_wr); 1280 } else 1281 ret = ib_send_mad(mad_send_wr); 1282 if (ret < 0) { 1283 /* Fail send request */ 1284 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1285 list_del(&mad_send_wr->agent_list); 1286 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1287 atomic_dec(&mad_agent_priv->refcount); 1288 goto error; 1289 } 1290 } 1291 return 0; 1292 error: 1293 if (bad_send_buf) 1294 *bad_send_buf = send_buf; 1295 return ret; 1296 } 1297 EXPORT_SYMBOL(ib_post_send_mad); 1298 1299 /* 1300 * ib_free_recv_mad - Returns data buffers used to receive 1301 * a MAD to the access layer 1302 */ 1303 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1304 { 1305 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; 1306 struct ib_mad_private_header *mad_priv_hdr; 1307 struct ib_mad_private *priv; 1308 struct list_head free_list; 1309 1310 INIT_LIST_HEAD(&free_list); 1311 list_splice_init(&mad_recv_wc->rmpp_list, &free_list); 1312 1313 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, 1314 &free_list, list) { 1315 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, 1316 recv_buf); 1317 mad_priv_hdr = container_of(mad_recv_wc, 1318 struct ib_mad_private_header, 1319 recv_wc); 1320 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1321 header); 1322 kfree(priv); 1323 } 1324 } 1325 EXPORT_SYMBOL(ib_free_recv_mad); 1326 1327 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, 1328 u8 rmpp_version, 1329 ib_mad_send_handler send_handler, 1330 ib_mad_recv_handler recv_handler, 1331 void *context) 1332 { 1333 return ERR_PTR(-EINVAL); /* XXX: for now */ 1334 } 1335 EXPORT_SYMBOL(ib_redirect_mad_qp); 1336 1337 int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 1338 struct ib_wc *wc) 1339 { 1340 dev_err(&mad_agent->device->dev, 1341 "ib_process_mad_wc() not implemented yet\n"); 1342 return 0; 1343 } 1344 EXPORT_SYMBOL(ib_process_mad_wc); 1345 1346 static int method_in_use(struct ib_mad_mgmt_method_table **method, 1347 struct ib_mad_reg_req *mad_reg_req) 1348 { 1349 int i; 1350 1351 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { 1352 if ((*method)->agent[i]) { 1353 pr_err("Method %d already in use\n", i); 1354 return -EINVAL; 1355 } 1356 } 1357 return 0; 1358 } 1359 1360 static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1361 { 1362 /* Allocate management method table */ 1363 *method = kzalloc(sizeof **method, GFP_ATOMIC); 1364 if (!*method) { 1365 pr_err("No memory for ib_mad_mgmt_method_table\n"); 1366 return -ENOMEM; 1367 } 1368 1369 return 0; 1370 } 1371 1372 /* 1373 * Check to see if there are any methods still in use 1374 */ 1375 static int check_method_table(struct ib_mad_mgmt_method_table *method) 1376 { 1377 int i; 1378 1379 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) 1380 if (method->agent[i]) 1381 return 1; 1382 return 0; 1383 } 1384 1385 /* 1386 * Check to see if there are any method tables for this class still in use 1387 */ 1388 static int check_class_table(struct ib_mad_mgmt_class_table *class) 1389 { 1390 int i; 1391 1392 for (i = 0; i < MAX_MGMT_CLASS; i++) 1393 if (class->method_table[i]) 1394 return 1; 1395 return 0; 1396 } 1397 1398 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) 1399 { 1400 int i; 1401 1402 for (i = 0; i < MAX_MGMT_OUI; i++) 1403 if (vendor_class->method_table[i]) 1404 return 1; 1405 return 0; 1406 } 1407 1408 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, 1409 const char *oui) 1410 { 1411 int i; 1412 1413 for (i = 0; i < MAX_MGMT_OUI; i++) 1414 /* Is there matching OUI for this vendor class ? */ 1415 if (!memcmp(vendor_class->oui[i], oui, 3)) 1416 return i; 1417 1418 return -1; 1419 } 1420 1421 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) 1422 { 1423 int i; 1424 1425 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) 1426 if (vendor->vendor_class[i]) 1427 return 1; 1428 1429 return 0; 1430 } 1431 1432 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, 1433 struct ib_mad_agent_private *agent) 1434 { 1435 int i; 1436 1437 /* Remove any methods for this mad agent */ 1438 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { 1439 if (method->agent[i] == agent) { 1440 method->agent[i] = NULL; 1441 } 1442 } 1443 } 1444 1445 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1446 struct ib_mad_agent_private *agent_priv, 1447 u8 mgmt_class) 1448 { 1449 struct ib_mad_port_private *port_priv; 1450 struct ib_mad_mgmt_class_table **class; 1451 struct ib_mad_mgmt_method_table **method; 1452 int i, ret; 1453 1454 port_priv = agent_priv->qp_info->port_priv; 1455 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1456 if (!*class) { 1457 /* Allocate management class table for "new" class version */ 1458 *class = kzalloc(sizeof **class, GFP_ATOMIC); 1459 if (!*class) { 1460 dev_err(&agent_priv->agent.device->dev, 1461 "No memory for ib_mad_mgmt_class_table\n"); 1462 ret = -ENOMEM; 1463 goto error1; 1464 } 1465 1466 /* Allocate method table for this management class */ 1467 method = &(*class)->method_table[mgmt_class]; 1468 if ((ret = allocate_method_table(method))) 1469 goto error2; 1470 } else { 1471 method = &(*class)->method_table[mgmt_class]; 1472 if (!*method) { 1473 /* Allocate method table for this management class */ 1474 if ((ret = allocate_method_table(method))) 1475 goto error1; 1476 } 1477 } 1478 1479 /* Now, make sure methods are not already in use */ 1480 if (method_in_use(method, mad_reg_req)) 1481 goto error3; 1482 1483 /* Finally, add in methods being registered */ 1484 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1485 (*method)->agent[i] = agent_priv; 1486 1487 return 0; 1488 1489 error3: 1490 /* Remove any methods for this mad agent */ 1491 remove_methods_mad_agent(*method, agent_priv); 1492 /* Now, check to see if there are any methods in use */ 1493 if (!check_method_table(*method)) { 1494 /* If not, release management method table */ 1495 kfree(*method); 1496 *method = NULL; 1497 } 1498 ret = -EINVAL; 1499 goto error1; 1500 error2: 1501 kfree(*class); 1502 *class = NULL; 1503 error1: 1504 return ret; 1505 } 1506 1507 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1508 struct ib_mad_agent_private *agent_priv) 1509 { 1510 struct ib_mad_port_private *port_priv; 1511 struct ib_mad_mgmt_vendor_class_table **vendor_table; 1512 struct ib_mad_mgmt_vendor_class_table *vendor = NULL; 1513 struct ib_mad_mgmt_vendor_class *vendor_class = NULL; 1514 struct ib_mad_mgmt_method_table **method; 1515 int i, ret = -ENOMEM; 1516 u8 vclass; 1517 1518 /* "New" vendor (with OUI) class */ 1519 vclass = vendor_class_index(mad_reg_req->mgmt_class); 1520 port_priv = agent_priv->qp_info->port_priv; 1521 vendor_table = &port_priv->version[ 1522 mad_reg_req->mgmt_class_version].vendor; 1523 if (!*vendor_table) { 1524 /* Allocate mgmt vendor class table for "new" class version */ 1525 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); 1526 if (!vendor) { 1527 dev_err(&agent_priv->agent.device->dev, 1528 "No memory for ib_mad_mgmt_vendor_class_table\n"); 1529 goto error1; 1530 } 1531 1532 *vendor_table = vendor; 1533 } 1534 if (!(*vendor_table)->vendor_class[vclass]) { 1535 /* Allocate table for this management vendor class */ 1536 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); 1537 if (!vendor_class) { 1538 dev_err(&agent_priv->agent.device->dev, 1539 "No memory for ib_mad_mgmt_vendor_class\n"); 1540 goto error2; 1541 } 1542 1543 (*vendor_table)->vendor_class[vclass] = vendor_class; 1544 } 1545 for (i = 0; i < MAX_MGMT_OUI; i++) { 1546 /* Is there matching OUI for this vendor class ? */ 1547 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], 1548 mad_reg_req->oui, 3)) { 1549 method = &(*vendor_table)->vendor_class[ 1550 vclass]->method_table[i]; 1551 BUG_ON(!*method); 1552 goto check_in_use; 1553 } 1554 } 1555 for (i = 0; i < MAX_MGMT_OUI; i++) { 1556 /* OUI slot available ? */ 1557 if (!is_vendor_oui((*vendor_table)->vendor_class[ 1558 vclass]->oui[i])) { 1559 method = &(*vendor_table)->vendor_class[ 1560 vclass]->method_table[i]; 1561 BUG_ON(*method); 1562 /* Allocate method table for this OUI */ 1563 if ((ret = allocate_method_table(method))) 1564 goto error3; 1565 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], 1566 mad_reg_req->oui, 3); 1567 goto check_in_use; 1568 } 1569 } 1570 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); 1571 goto error3; 1572 1573 check_in_use: 1574 /* Now, make sure methods are not already in use */ 1575 if (method_in_use(method, mad_reg_req)) 1576 goto error4; 1577 1578 /* Finally, add in methods being registered */ 1579 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1580 (*method)->agent[i] = agent_priv; 1581 1582 return 0; 1583 1584 error4: 1585 /* Remove any methods for this mad agent */ 1586 remove_methods_mad_agent(*method, agent_priv); 1587 /* Now, check to see if there are any methods in use */ 1588 if (!check_method_table(*method)) { 1589 /* If not, release management method table */ 1590 kfree(*method); 1591 *method = NULL; 1592 } 1593 ret = -EINVAL; 1594 error3: 1595 if (vendor_class) { 1596 (*vendor_table)->vendor_class[vclass] = NULL; 1597 kfree(vendor_class); 1598 } 1599 error2: 1600 if (vendor) { 1601 *vendor_table = NULL; 1602 kfree(vendor); 1603 } 1604 error1: 1605 return ret; 1606 } 1607 1608 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) 1609 { 1610 struct ib_mad_port_private *port_priv; 1611 struct ib_mad_mgmt_class_table *class; 1612 struct ib_mad_mgmt_method_table *method; 1613 struct ib_mad_mgmt_vendor_class_table *vendor; 1614 struct ib_mad_mgmt_vendor_class *vendor_class; 1615 int index; 1616 u8 mgmt_class; 1617 1618 /* 1619 * Was MAD registration request supplied 1620 * with original registration ? 1621 */ 1622 if (!agent_priv->reg_req) { 1623 goto out; 1624 } 1625 1626 port_priv = agent_priv->qp_info->port_priv; 1627 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); 1628 class = port_priv->version[ 1629 agent_priv->reg_req->mgmt_class_version].class; 1630 if (!class) 1631 goto vendor_check; 1632 1633 method = class->method_table[mgmt_class]; 1634 if (method) { 1635 /* Remove any methods for this mad agent */ 1636 remove_methods_mad_agent(method, agent_priv); 1637 /* Now, check to see if there are any methods still in use */ 1638 if (!check_method_table(method)) { 1639 /* If not, release management method table */ 1640 kfree(method); 1641 class->method_table[mgmt_class] = NULL; 1642 /* Any management classes left ? */ 1643 if (!check_class_table(class)) { 1644 /* If not, release management class table */ 1645 kfree(class); 1646 port_priv->version[ 1647 agent_priv->reg_req-> 1648 mgmt_class_version].class = NULL; 1649 } 1650 } 1651 } 1652 1653 vendor_check: 1654 if (!is_vendor_class(mgmt_class)) 1655 goto out; 1656 1657 /* normalize mgmt_class to vendor range 2 */ 1658 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); 1659 vendor = port_priv->version[ 1660 agent_priv->reg_req->mgmt_class_version].vendor; 1661 1662 if (!vendor) 1663 goto out; 1664 1665 vendor_class = vendor->vendor_class[mgmt_class]; 1666 if (vendor_class) { 1667 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); 1668 if (index < 0) 1669 goto out; 1670 method = vendor_class->method_table[index]; 1671 if (method) { 1672 /* Remove any methods for this mad agent */ 1673 remove_methods_mad_agent(method, agent_priv); 1674 /* 1675 * Now, check to see if there are 1676 * any methods still in use 1677 */ 1678 if (!check_method_table(method)) { 1679 /* If not, release management method table */ 1680 kfree(method); 1681 vendor_class->method_table[index] = NULL; 1682 memset(vendor_class->oui[index], 0, 3); 1683 /* Any OUIs left ? */ 1684 if (!check_vendor_class(vendor_class)) { 1685 /* If not, release vendor class table */ 1686 kfree(vendor_class); 1687 vendor->vendor_class[mgmt_class] = NULL; 1688 /* Any other vendor classes left ? */ 1689 if (!check_vendor_table(vendor)) { 1690 kfree(vendor); 1691 port_priv->version[ 1692 agent_priv->reg_req-> 1693 mgmt_class_version]. 1694 vendor = NULL; 1695 } 1696 } 1697 } 1698 } 1699 } 1700 1701 out: 1702 return; 1703 } 1704 1705 static struct ib_mad_agent_private * 1706 find_mad_agent(struct ib_mad_port_private *port_priv, 1707 const struct ib_mad_hdr *mad_hdr) 1708 { 1709 struct ib_mad_agent_private *mad_agent = NULL; 1710 unsigned long flags; 1711 1712 spin_lock_irqsave(&port_priv->reg_lock, flags); 1713 if (ib_response_mad(mad_hdr)) { 1714 u32 hi_tid; 1715 struct ib_mad_agent_private *entry; 1716 1717 /* 1718 * Routing is based on high 32 bits of transaction ID 1719 * of MAD. 1720 */ 1721 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; 1722 list_for_each_entry(entry, &port_priv->agent_list, agent_list) { 1723 if (entry->agent.hi_tid == hi_tid) { 1724 mad_agent = entry; 1725 break; 1726 } 1727 } 1728 } else { 1729 struct ib_mad_mgmt_class_table *class; 1730 struct ib_mad_mgmt_method_table *method; 1731 struct ib_mad_mgmt_vendor_class_table *vendor; 1732 struct ib_mad_mgmt_vendor_class *vendor_class; 1733 const struct ib_vendor_mad *vendor_mad; 1734 int index; 1735 1736 /* 1737 * Routing is based on version, class, and method 1738 * For "newer" vendor MADs, also based on OUI 1739 */ 1740 if (mad_hdr->class_version >= MAX_MGMT_VERSION) 1741 goto out; 1742 if (!is_vendor_class(mad_hdr->mgmt_class)) { 1743 class = port_priv->version[ 1744 mad_hdr->class_version].class; 1745 if (!class) 1746 goto out; 1747 if (convert_mgmt_class(mad_hdr->mgmt_class) >= 1748 IB_MGMT_MAX_METHODS) 1749 goto out; 1750 method = class->method_table[convert_mgmt_class( 1751 mad_hdr->mgmt_class)]; 1752 if (method) 1753 mad_agent = method->agent[mad_hdr->method & 1754 ~IB_MGMT_METHOD_RESP]; 1755 } else { 1756 vendor = port_priv->version[ 1757 mad_hdr->class_version].vendor; 1758 if (!vendor) 1759 goto out; 1760 vendor_class = vendor->vendor_class[vendor_class_index( 1761 mad_hdr->mgmt_class)]; 1762 if (!vendor_class) 1763 goto out; 1764 /* Find matching OUI */ 1765 vendor_mad = (const struct ib_vendor_mad *)mad_hdr; 1766 index = find_vendor_oui(vendor_class, vendor_mad->oui); 1767 if (index == -1) 1768 goto out; 1769 method = vendor_class->method_table[index]; 1770 if (method) { 1771 mad_agent = method->agent[mad_hdr->method & 1772 ~IB_MGMT_METHOD_RESP]; 1773 } 1774 } 1775 } 1776 1777 if (mad_agent) { 1778 if (mad_agent->agent.recv_handler) 1779 atomic_inc(&mad_agent->refcount); 1780 else { 1781 dev_notice(&port_priv->device->dev, 1782 "No receive handler for client %p on port %d\n", 1783 &mad_agent->agent, port_priv->port_num); 1784 mad_agent = NULL; 1785 } 1786 } 1787 out: 1788 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 1789 1790 return mad_agent; 1791 } 1792 1793 static int validate_mad(const struct ib_mad_hdr *mad_hdr, 1794 const struct ib_mad_qp_info *qp_info, 1795 bool opa) 1796 { 1797 int valid = 0; 1798 u32 qp_num = qp_info->qp->qp_num; 1799 1800 /* Make sure MAD base version is understood */ 1801 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && 1802 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { 1803 pr_err("MAD received with unsupported base version %d %s\n", 1804 mad_hdr->base_version, opa ? "(opa)" : ""); 1805 goto out; 1806 } 1807 1808 /* Filter SMI packets sent to other than QP0 */ 1809 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || 1810 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 1811 if (qp_num == 0) 1812 valid = 1; 1813 } else { 1814 /* CM attributes other than ClassPortInfo only use Send method */ 1815 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && 1816 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && 1817 (mad_hdr->method != IB_MGMT_METHOD_SEND)) 1818 goto out; 1819 /* Filter GSI packets sent to QP0 */ 1820 if (qp_num != 0) 1821 valid = 1; 1822 } 1823 1824 out: 1825 return valid; 1826 } 1827 1828 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, 1829 const struct ib_mad_hdr *mad_hdr) 1830 { 1831 struct ib_rmpp_mad *rmpp_mad; 1832 1833 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; 1834 return !mad_agent_priv->agent.rmpp_version || 1835 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || 1836 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 1837 IB_MGMT_RMPP_FLAG_ACTIVE) || 1838 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1839 } 1840 1841 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, 1842 const struct ib_mad_recv_wc *rwc) 1843 { 1844 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == 1845 rwc->recv_buf.mad->mad_hdr.mgmt_class; 1846 } 1847 1848 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, 1849 const struct ib_mad_send_wr_private *wr, 1850 const struct ib_mad_recv_wc *rwc ) 1851 { 1852 struct ib_ah_attr attr; 1853 u8 send_resp, rcv_resp; 1854 union ib_gid sgid; 1855 struct ib_device *device = mad_agent_priv->agent.device; 1856 u8 port_num = mad_agent_priv->agent.port_num; 1857 u8 lmc; 1858 1859 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); 1860 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); 1861 1862 if (send_resp == rcv_resp) 1863 /* both requests, or both responses. GIDs different */ 1864 return 0; 1865 1866 if (ib_query_ah(wr->send_buf.ah, &attr)) 1867 /* Assume not equal, to avoid false positives. */ 1868 return 0; 1869 1870 if (!!(attr.ah_flags & IB_AH_GRH) != 1871 !!(rwc->wc->wc_flags & IB_WC_GRH)) 1872 /* one has GID, other does not. Assume different */ 1873 return 0; 1874 1875 if (!send_resp && rcv_resp) { 1876 /* is request/response. */ 1877 if (!(attr.ah_flags & IB_AH_GRH)) { 1878 if (ib_get_cached_lmc(device, port_num, &lmc)) 1879 return 0; 1880 return (!lmc || !((attr.src_path_bits ^ 1881 rwc->wc->dlid_path_bits) & 1882 ((1 << lmc) - 1))); 1883 } else { 1884 if (ib_get_cached_gid(device, port_num, 1885 attr.grh.sgid_index, &sgid, NULL)) 1886 return 0; 1887 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 1888 16); 1889 } 1890 } 1891 1892 if (!(attr.ah_flags & IB_AH_GRH)) 1893 return attr.dlid == rwc->wc->slid; 1894 else 1895 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw, 1896 16); 1897 } 1898 1899 static inline int is_direct(u8 class) 1900 { 1901 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); 1902 } 1903 1904 struct ib_mad_send_wr_private* 1905 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, 1906 const struct ib_mad_recv_wc *wc) 1907 { 1908 struct ib_mad_send_wr_private *wr; 1909 const struct ib_mad_hdr *mad_hdr; 1910 1911 mad_hdr = &wc->recv_buf.mad->mad_hdr; 1912 1913 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { 1914 if ((wr->tid == mad_hdr->tid) && 1915 rcv_has_same_class(wr, wc) && 1916 /* 1917 * Don't check GID for direct routed MADs. 1918 * These might have permissive LIDs. 1919 */ 1920 (is_direct(mad_hdr->mgmt_class) || 1921 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1922 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1923 } 1924 1925 /* 1926 * It's possible to receive the response before we've 1927 * been notified that the send has completed 1928 */ 1929 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { 1930 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && 1931 wr->tid == mad_hdr->tid && 1932 wr->timeout && 1933 rcv_has_same_class(wr, wc) && 1934 /* 1935 * Don't check GID for direct routed MADs. 1936 * These might have permissive LIDs. 1937 */ 1938 (is_direct(mad_hdr->mgmt_class) || 1939 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1940 /* Verify request has not been canceled */ 1941 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1942 } 1943 return NULL; 1944 } 1945 1946 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) 1947 { 1948 mad_send_wr->timeout = 0; 1949 if (mad_send_wr->refcount == 1) 1950 list_move_tail(&mad_send_wr->agent_list, 1951 &mad_send_wr->mad_agent_priv->done_list); 1952 } 1953 1954 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1955 struct ib_mad_recv_wc *mad_recv_wc) 1956 { 1957 struct ib_mad_send_wr_private *mad_send_wr; 1958 struct ib_mad_send_wc mad_send_wc; 1959 unsigned long flags; 1960 1961 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1962 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1963 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1964 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 1965 mad_recv_wc); 1966 if (!mad_recv_wc) { 1967 deref_mad_agent(mad_agent_priv); 1968 return; 1969 } 1970 } 1971 1972 /* Complete corresponding request */ 1973 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { 1974 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1975 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 1976 if (!mad_send_wr) { 1977 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1978 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) 1979 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) 1980 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) 1981 & IB_MGMT_RMPP_FLAG_ACTIVE)) { 1982 /* user rmpp is in effect 1983 * and this is an active RMPP MAD 1984 */ 1985 mad_recv_wc->wc->wr_id = 0; 1986 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, 1987 mad_recv_wc); 1988 atomic_dec(&mad_agent_priv->refcount); 1989 } else { 1990 /* not user rmpp, revert to normal behavior and 1991 * drop the mad */ 1992 ib_free_recv_mad(mad_recv_wc); 1993 deref_mad_agent(mad_agent_priv); 1994 return; 1995 } 1996 } else { 1997 ib_mark_mad_done(mad_send_wr); 1998 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1999 2000 /* Defined behavior is to complete response before request */ 2001 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; 2002 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, 2003 mad_recv_wc); 2004 atomic_dec(&mad_agent_priv->refcount); 2005 2006 mad_send_wc.status = IB_WC_SUCCESS; 2007 mad_send_wc.vendor_err = 0; 2008 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2009 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2010 } 2011 } else { 2012 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, 2013 mad_recv_wc); 2014 deref_mad_agent(mad_agent_priv); 2015 } 2016 } 2017 2018 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, 2019 const struct ib_mad_qp_info *qp_info, 2020 const struct ib_wc *wc, 2021 int port_num, 2022 struct ib_mad_private *recv, 2023 struct ib_mad_private *response) 2024 { 2025 enum smi_forward_action retsmi; 2026 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2027 2028 if (smi_handle_dr_smp_recv(smp, 2029 rdma_cap_ib_switch(port_priv->device), 2030 port_num, 2031 port_priv->device->phys_port_cnt) == 2032 IB_SMI_DISCARD) 2033 return IB_SMI_DISCARD; 2034 2035 retsmi = smi_check_forward_dr_smp(smp); 2036 if (retsmi == IB_SMI_LOCAL) 2037 return IB_SMI_HANDLE; 2038 2039 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2040 if (smi_handle_dr_smp_send(smp, 2041 rdma_cap_ib_switch(port_priv->device), 2042 port_num) == IB_SMI_DISCARD) 2043 return IB_SMI_DISCARD; 2044 2045 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2046 return IB_SMI_DISCARD; 2047 } else if (rdma_cap_ib_switch(port_priv->device)) { 2048 /* forward case for switches */ 2049 memcpy(response, recv, mad_priv_size(response)); 2050 response->header.recv_wc.wc = &response->header.wc; 2051 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2052 response->header.recv_wc.recv_buf.grh = &response->grh; 2053 2054 agent_send_response((const struct ib_mad_hdr *)response->mad, 2055 &response->grh, wc, 2056 port_priv->device, 2057 smi_get_fwd_port(smp), 2058 qp_info->qp->qp_num, 2059 response->mad_size, 2060 false); 2061 2062 return IB_SMI_DISCARD; 2063 } 2064 return IB_SMI_HANDLE; 2065 } 2066 2067 static bool generate_unmatched_resp(const struct ib_mad_private *recv, 2068 struct ib_mad_private *response, 2069 size_t *resp_len, bool opa) 2070 { 2071 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; 2072 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; 2073 2074 if (recv_hdr->method == IB_MGMT_METHOD_GET || 2075 recv_hdr->method == IB_MGMT_METHOD_SET) { 2076 memcpy(response, recv, mad_priv_size(response)); 2077 response->header.recv_wc.wc = &response->header.wc; 2078 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2079 response->header.recv_wc.recv_buf.grh = &response->grh; 2080 resp_hdr->method = IB_MGMT_METHOD_GET_RESP; 2081 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 2082 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2083 resp_hdr->status |= IB_SMP_DIRECTION; 2084 2085 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { 2086 if (recv_hdr->mgmt_class == 2087 IB_MGMT_CLASS_SUBN_LID_ROUTED || 2088 recv_hdr->mgmt_class == 2089 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2090 *resp_len = opa_get_smp_header_size( 2091 (struct opa_smp *)recv->mad); 2092 else 2093 *resp_len = sizeof(struct ib_mad_hdr); 2094 } 2095 2096 return true; 2097 } else { 2098 return false; 2099 } 2100 } 2101 2102 static enum smi_action 2103 handle_opa_smi(struct ib_mad_port_private *port_priv, 2104 struct ib_mad_qp_info *qp_info, 2105 struct ib_wc *wc, 2106 int port_num, 2107 struct ib_mad_private *recv, 2108 struct ib_mad_private *response) 2109 { 2110 enum smi_forward_action retsmi; 2111 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2112 2113 if (opa_smi_handle_dr_smp_recv(smp, 2114 rdma_cap_ib_switch(port_priv->device), 2115 port_num, 2116 port_priv->device->phys_port_cnt) == 2117 IB_SMI_DISCARD) 2118 return IB_SMI_DISCARD; 2119 2120 retsmi = opa_smi_check_forward_dr_smp(smp); 2121 if (retsmi == IB_SMI_LOCAL) 2122 return IB_SMI_HANDLE; 2123 2124 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2125 if (opa_smi_handle_dr_smp_send(smp, 2126 rdma_cap_ib_switch(port_priv->device), 2127 port_num) == IB_SMI_DISCARD) 2128 return IB_SMI_DISCARD; 2129 2130 if (opa_smi_check_local_smp(smp, port_priv->device) == 2131 IB_SMI_DISCARD) 2132 return IB_SMI_DISCARD; 2133 2134 } else if (rdma_cap_ib_switch(port_priv->device)) { 2135 /* forward case for switches */ 2136 memcpy(response, recv, mad_priv_size(response)); 2137 response->header.recv_wc.wc = &response->header.wc; 2138 response->header.recv_wc.recv_buf.opa_mad = 2139 (struct opa_mad *)response->mad; 2140 response->header.recv_wc.recv_buf.grh = &response->grh; 2141 2142 agent_send_response((const struct ib_mad_hdr *)response->mad, 2143 &response->grh, wc, 2144 port_priv->device, 2145 opa_smi_get_fwd_port(smp), 2146 qp_info->qp->qp_num, 2147 recv->header.wc.byte_len, 2148 true); 2149 2150 return IB_SMI_DISCARD; 2151 } 2152 2153 return IB_SMI_HANDLE; 2154 } 2155 2156 static enum smi_action 2157 handle_smi(struct ib_mad_port_private *port_priv, 2158 struct ib_mad_qp_info *qp_info, 2159 struct ib_wc *wc, 2160 int port_num, 2161 struct ib_mad_private *recv, 2162 struct ib_mad_private *response, 2163 bool opa) 2164 { 2165 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; 2166 2167 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && 2168 mad_hdr->class_version == OPA_SMI_CLASS_VERSION) 2169 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, 2170 response); 2171 2172 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); 2173 } 2174 2175 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, 2176 struct ib_wc *wc) 2177 { 2178 struct ib_mad_qp_info *qp_info; 2179 struct ib_mad_private_header *mad_priv_hdr; 2180 struct ib_mad_private *recv, *response = NULL; 2181 struct ib_mad_list_head *mad_list; 2182 struct ib_mad_agent_private *mad_agent; 2183 int port_num; 2184 int ret = IB_MAD_RESULT_SUCCESS; 2185 size_t mad_size; 2186 u16 resp_mad_pkey_index = 0; 2187 bool opa; 2188 2189 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; 2190 qp_info = mad_list->mad_queue->qp_info; 2191 dequeue_mad(mad_list); 2192 2193 opa = rdma_cap_opa_mad(qp_info->port_priv->device, 2194 qp_info->port_priv->port_num); 2195 2196 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 2197 mad_list); 2198 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 2199 ib_dma_unmap_single(port_priv->device, 2200 recv->header.mapping, 2201 mad_priv_dma_size(recv), 2202 DMA_FROM_DEVICE); 2203 2204 /* Setup MAD receive work completion from "normal" work completion */ 2205 recv->header.wc = *wc; 2206 recv->header.recv_wc.wc = &recv->header.wc; 2207 2208 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { 2209 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); 2210 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2211 } else { 2212 recv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2213 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2214 } 2215 2216 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; 2217 recv->header.recv_wc.recv_buf.grh = &recv->grh; 2218 2219 if (atomic_read(&qp_info->snoop_count)) 2220 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); 2221 2222 /* Validate MAD */ 2223 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) 2224 goto out; 2225 2226 mad_size = recv->mad_size; 2227 response = alloc_mad_private(mad_size, GFP_KERNEL); 2228 if (!response) { 2229 dev_err(&port_priv->device->dev, 2230 "ib_mad_recv_done_handler no memory for response buffer\n"); 2231 goto out; 2232 } 2233 2234 if (rdma_cap_ib_switch(port_priv->device)) 2235 port_num = wc->port_num; 2236 else 2237 port_num = port_priv->port_num; 2238 2239 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == 2240 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 2241 if (handle_smi(port_priv, qp_info, wc, port_num, recv, 2242 response, opa) 2243 == IB_SMI_DISCARD) 2244 goto out; 2245 } 2246 2247 /* Give driver "right of first refusal" on incoming MAD */ 2248 if (port_priv->device->process_mad) { 2249 ret = port_priv->device->process_mad(port_priv->device, 0, 2250 port_priv->port_num, 2251 wc, &recv->grh, 2252 (const struct ib_mad_hdr *)recv->mad, 2253 recv->mad_size, 2254 (struct ib_mad_hdr *)response->mad, 2255 &mad_size, &resp_mad_pkey_index); 2256 2257 if (opa) 2258 wc->pkey_index = resp_mad_pkey_index; 2259 2260 if (ret & IB_MAD_RESULT_SUCCESS) { 2261 if (ret & IB_MAD_RESULT_CONSUMED) 2262 goto out; 2263 if (ret & IB_MAD_RESULT_REPLY) { 2264 agent_send_response((const struct ib_mad_hdr *)response->mad, 2265 &recv->grh, wc, 2266 port_priv->device, 2267 port_num, 2268 qp_info->qp->qp_num, 2269 mad_size, opa); 2270 goto out; 2271 } 2272 } 2273 } 2274 2275 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); 2276 if (mad_agent) { 2277 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); 2278 /* 2279 * recv is freed up in error cases in ib_mad_complete_recv 2280 * or via recv_handler in ib_mad_complete_recv() 2281 */ 2282 recv = NULL; 2283 } else if ((ret & IB_MAD_RESULT_SUCCESS) && 2284 generate_unmatched_resp(recv, response, &mad_size, opa)) { 2285 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, 2286 port_priv->device, port_num, 2287 qp_info->qp->qp_num, mad_size, opa); 2288 } 2289 2290 out: 2291 /* Post another receive request for this QP */ 2292 if (response) { 2293 ib_mad_post_receive_mads(qp_info, response); 2294 kfree(recv); 2295 } else 2296 ib_mad_post_receive_mads(qp_info, recv); 2297 } 2298 2299 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) 2300 { 2301 struct ib_mad_send_wr_private *mad_send_wr; 2302 unsigned long delay; 2303 2304 if (list_empty(&mad_agent_priv->wait_list)) { 2305 cancel_delayed_work(&mad_agent_priv->timed_work); 2306 } else { 2307 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2308 struct ib_mad_send_wr_private, 2309 agent_list); 2310 2311 if (time_after(mad_agent_priv->timeout, 2312 mad_send_wr->timeout)) { 2313 mad_agent_priv->timeout = mad_send_wr->timeout; 2314 delay = mad_send_wr->timeout - jiffies; 2315 if ((long)delay <= 0) 2316 delay = 1; 2317 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2318 &mad_agent_priv->timed_work, delay); 2319 } 2320 } 2321 } 2322 2323 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) 2324 { 2325 struct ib_mad_agent_private *mad_agent_priv; 2326 struct ib_mad_send_wr_private *temp_mad_send_wr; 2327 struct list_head *list_item; 2328 unsigned long delay; 2329 2330 mad_agent_priv = mad_send_wr->mad_agent_priv; 2331 list_del(&mad_send_wr->agent_list); 2332 2333 delay = mad_send_wr->timeout; 2334 mad_send_wr->timeout += jiffies; 2335 2336 if (delay) { 2337 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { 2338 temp_mad_send_wr = list_entry(list_item, 2339 struct ib_mad_send_wr_private, 2340 agent_list); 2341 if (time_after(mad_send_wr->timeout, 2342 temp_mad_send_wr->timeout)) 2343 break; 2344 } 2345 } 2346 else 2347 list_item = &mad_agent_priv->wait_list; 2348 list_add(&mad_send_wr->agent_list, list_item); 2349 2350 /* Reschedule a work item if we have a shorter timeout */ 2351 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) 2352 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2353 &mad_agent_priv->timed_work, delay); 2354 } 2355 2356 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2357 int timeout_ms) 2358 { 2359 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2360 wait_for_response(mad_send_wr); 2361 } 2362 2363 /* 2364 * Process a send work completion 2365 */ 2366 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 2367 struct ib_mad_send_wc *mad_send_wc) 2368 { 2369 struct ib_mad_agent_private *mad_agent_priv; 2370 unsigned long flags; 2371 int ret; 2372 2373 mad_agent_priv = mad_send_wr->mad_agent_priv; 2374 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2375 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 2376 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); 2377 if (ret == IB_RMPP_RESULT_CONSUMED) 2378 goto done; 2379 } else 2380 ret = IB_RMPP_RESULT_UNHANDLED; 2381 2382 if (mad_send_wc->status != IB_WC_SUCCESS && 2383 mad_send_wr->status == IB_WC_SUCCESS) { 2384 mad_send_wr->status = mad_send_wc->status; 2385 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2386 } 2387 2388 if (--mad_send_wr->refcount > 0) { 2389 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && 2390 mad_send_wr->status == IB_WC_SUCCESS) { 2391 wait_for_response(mad_send_wr); 2392 } 2393 goto done; 2394 } 2395 2396 /* Remove send from MAD agent and notify client of completion */ 2397 list_del(&mad_send_wr->agent_list); 2398 adjust_timeout(mad_agent_priv); 2399 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2400 2401 if (mad_send_wr->status != IB_WC_SUCCESS ) 2402 mad_send_wc->status = mad_send_wr->status; 2403 if (ret == IB_RMPP_RESULT_INTERNAL) 2404 ib_rmpp_send_handler(mad_send_wc); 2405 else 2406 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2407 mad_send_wc); 2408 2409 /* Release reference on agent taken when sending */ 2410 deref_mad_agent(mad_agent_priv); 2411 return; 2412 done: 2413 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2414 } 2415 2416 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, 2417 struct ib_wc *wc) 2418 { 2419 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; 2420 struct ib_mad_list_head *mad_list; 2421 struct ib_mad_qp_info *qp_info; 2422 struct ib_mad_queue *send_queue; 2423 struct ib_send_wr *bad_send_wr; 2424 struct ib_mad_send_wc mad_send_wc; 2425 unsigned long flags; 2426 int ret; 2427 2428 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; 2429 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2430 mad_list); 2431 send_queue = mad_list->mad_queue; 2432 qp_info = send_queue->qp_info; 2433 2434 retry: 2435 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2436 mad_send_wr->header_mapping, 2437 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2438 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2439 mad_send_wr->payload_mapping, 2440 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2441 queued_send_wr = NULL; 2442 spin_lock_irqsave(&send_queue->lock, flags); 2443 list_del(&mad_list->list); 2444 2445 /* Move queued send to the send queue */ 2446 if (send_queue->count-- > send_queue->max_active) { 2447 mad_list = container_of(qp_info->overflow_list.next, 2448 struct ib_mad_list_head, list); 2449 queued_send_wr = container_of(mad_list, 2450 struct ib_mad_send_wr_private, 2451 mad_list); 2452 list_move_tail(&mad_list->list, &send_queue->list); 2453 } 2454 spin_unlock_irqrestore(&send_queue->lock, flags); 2455 2456 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2457 mad_send_wc.status = wc->status; 2458 mad_send_wc.vendor_err = wc->vendor_err; 2459 if (atomic_read(&qp_info->snoop_count)) 2460 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, 2461 IB_MAD_SNOOP_SEND_COMPLETIONS); 2462 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2463 2464 if (queued_send_wr) { 2465 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, 2466 &bad_send_wr); 2467 if (ret) { 2468 dev_err(&port_priv->device->dev, 2469 "ib_post_send failed: %d\n", ret); 2470 mad_send_wr = queued_send_wr; 2471 wc->status = IB_WC_LOC_QP_OP_ERR; 2472 goto retry; 2473 } 2474 } 2475 } 2476 2477 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) 2478 { 2479 struct ib_mad_send_wr_private *mad_send_wr; 2480 struct ib_mad_list_head *mad_list; 2481 unsigned long flags; 2482 2483 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 2484 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { 2485 mad_send_wr = container_of(mad_list, 2486 struct ib_mad_send_wr_private, 2487 mad_list); 2488 mad_send_wr->retry = 1; 2489 } 2490 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 2491 } 2492 2493 static void mad_error_handler(struct ib_mad_port_private *port_priv, 2494 struct ib_wc *wc) 2495 { 2496 struct ib_mad_list_head *mad_list; 2497 struct ib_mad_qp_info *qp_info; 2498 struct ib_mad_send_wr_private *mad_send_wr; 2499 int ret; 2500 2501 /* Determine if failure was a send or receive */ 2502 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; 2503 qp_info = mad_list->mad_queue->qp_info; 2504 if (mad_list->mad_queue == &qp_info->recv_queue) 2505 /* 2506 * Receive errors indicate that the QP has entered the error 2507 * state - error handling/shutdown code will cleanup 2508 */ 2509 return; 2510 2511 /* 2512 * Send errors will transition the QP to SQE - move 2513 * QP to RTS and repost flushed work requests 2514 */ 2515 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2516 mad_list); 2517 if (wc->status == IB_WC_WR_FLUSH_ERR) { 2518 if (mad_send_wr->retry) { 2519 /* Repost send */ 2520 struct ib_send_wr *bad_send_wr; 2521 2522 mad_send_wr->retry = 0; 2523 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, 2524 &bad_send_wr); 2525 if (ret) 2526 ib_mad_send_done_handler(port_priv, wc); 2527 } else 2528 ib_mad_send_done_handler(port_priv, wc); 2529 } else { 2530 struct ib_qp_attr *attr; 2531 2532 /* Transition QP to RTS and fail offending send */ 2533 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2534 if (attr) { 2535 attr->qp_state = IB_QPS_RTS; 2536 attr->cur_qp_state = IB_QPS_SQE; 2537 ret = ib_modify_qp(qp_info->qp, attr, 2538 IB_QP_STATE | IB_QP_CUR_STATE); 2539 kfree(attr); 2540 if (ret) 2541 dev_err(&port_priv->device->dev, 2542 "mad_error_handler - ib_modify_qp to RTS : %d\n", 2543 ret); 2544 else 2545 mark_sends_for_retry(qp_info); 2546 } 2547 ib_mad_send_done_handler(port_priv, wc); 2548 } 2549 } 2550 2551 /* 2552 * IB MAD completion callback 2553 */ 2554 static void ib_mad_completion_handler(struct work_struct *work) 2555 { 2556 struct ib_mad_port_private *port_priv; 2557 struct ib_wc wc; 2558 2559 port_priv = container_of(work, struct ib_mad_port_private, work); 2560 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 2561 2562 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { 2563 if (wc.status == IB_WC_SUCCESS) { 2564 switch (wc.opcode) { 2565 case IB_WC_SEND: 2566 ib_mad_send_done_handler(port_priv, &wc); 2567 break; 2568 case IB_WC_RECV: 2569 ib_mad_recv_done_handler(port_priv, &wc); 2570 break; 2571 default: 2572 BUG_ON(1); 2573 break; 2574 } 2575 } else 2576 mad_error_handler(port_priv, &wc); 2577 } 2578 } 2579 2580 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) 2581 { 2582 unsigned long flags; 2583 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; 2584 struct ib_mad_send_wc mad_send_wc; 2585 struct list_head cancel_list; 2586 2587 INIT_LIST_HEAD(&cancel_list); 2588 2589 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2590 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2591 &mad_agent_priv->send_list, agent_list) { 2592 if (mad_send_wr->status == IB_WC_SUCCESS) { 2593 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2594 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2595 } 2596 } 2597 2598 /* Empty wait list to prevent receives from finding a request */ 2599 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); 2600 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2601 2602 /* Report all cancelled requests */ 2603 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 2604 mad_send_wc.vendor_err = 0; 2605 2606 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2607 &cancel_list, agent_list) { 2608 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2609 list_del(&mad_send_wr->agent_list); 2610 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2611 &mad_send_wc); 2612 atomic_dec(&mad_agent_priv->refcount); 2613 } 2614 } 2615 2616 static struct ib_mad_send_wr_private* 2617 find_send_wr(struct ib_mad_agent_private *mad_agent_priv, 2618 struct ib_mad_send_buf *send_buf) 2619 { 2620 struct ib_mad_send_wr_private *mad_send_wr; 2621 2622 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2623 agent_list) { 2624 if (&mad_send_wr->send_buf == send_buf) 2625 return mad_send_wr; 2626 } 2627 2628 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2629 agent_list) { 2630 if (is_rmpp_data_mad(mad_agent_priv, 2631 mad_send_wr->send_buf.mad) && 2632 &mad_send_wr->send_buf == send_buf) 2633 return mad_send_wr; 2634 } 2635 return NULL; 2636 } 2637 2638 int ib_modify_mad(struct ib_mad_agent *mad_agent, 2639 struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2640 { 2641 struct ib_mad_agent_private *mad_agent_priv; 2642 struct ib_mad_send_wr_private *mad_send_wr; 2643 unsigned long flags; 2644 int active; 2645 2646 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2647 agent); 2648 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2649 mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2650 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2651 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2652 return -EINVAL; 2653 } 2654 2655 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); 2656 if (!timeout_ms) { 2657 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2658 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2659 } 2660 2661 mad_send_wr->send_buf.timeout_ms = timeout_ms; 2662 if (active) 2663 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2664 else 2665 ib_reset_mad_timeout(mad_send_wr, timeout_ms); 2666 2667 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2668 return 0; 2669 } 2670 EXPORT_SYMBOL(ib_modify_mad); 2671 2672 void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2673 struct ib_mad_send_buf *send_buf) 2674 { 2675 ib_modify_mad(mad_agent, send_buf, 0); 2676 } 2677 EXPORT_SYMBOL(ib_cancel_mad); 2678 2679 static void local_completions(struct work_struct *work) 2680 { 2681 struct ib_mad_agent_private *mad_agent_priv; 2682 struct ib_mad_local_private *local; 2683 struct ib_mad_agent_private *recv_mad_agent; 2684 unsigned long flags; 2685 int free_mad; 2686 struct ib_wc wc; 2687 struct ib_mad_send_wc mad_send_wc; 2688 bool opa; 2689 2690 mad_agent_priv = 2691 container_of(work, struct ib_mad_agent_private, local_work); 2692 2693 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 2694 mad_agent_priv->qp_info->port_priv->port_num); 2695 2696 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2697 while (!list_empty(&mad_agent_priv->local_list)) { 2698 local = list_entry(mad_agent_priv->local_list.next, 2699 struct ib_mad_local_private, 2700 completion_list); 2701 list_del(&local->completion_list); 2702 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2703 free_mad = 0; 2704 if (local->mad_priv) { 2705 u8 base_version; 2706 recv_mad_agent = local->recv_mad_agent; 2707 if (!recv_mad_agent) { 2708 dev_err(&mad_agent_priv->agent.device->dev, 2709 "No receive MAD agent for local completion\n"); 2710 free_mad = 1; 2711 goto local_send_completion; 2712 } 2713 2714 /* 2715 * Defined behavior is to complete response 2716 * before request 2717 */ 2718 build_smp_wc(recv_mad_agent->agent.qp, 2719 (unsigned long) local->mad_send_wr, 2720 be16_to_cpu(IB_LID_PERMISSIVE), 2721 local->mad_send_wr->send_wr.pkey_index, 2722 recv_mad_agent->agent.port_num, &wc); 2723 2724 local->mad_priv->header.recv_wc.wc = &wc; 2725 2726 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; 2727 if (opa && base_version == OPA_MGMT_BASE_VERSION) { 2728 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; 2729 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2730 } else { 2731 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2732 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2733 } 2734 2735 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); 2736 list_add(&local->mad_priv->header.recv_wc.recv_buf.list, 2737 &local->mad_priv->header.recv_wc.rmpp_list); 2738 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2739 local->mad_priv->header.recv_wc.recv_buf.mad = 2740 (struct ib_mad *)local->mad_priv->mad; 2741 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) 2742 snoop_recv(recv_mad_agent->qp_info, 2743 &local->mad_priv->header.recv_wc, 2744 IB_MAD_SNOOP_RECVS); 2745 recv_mad_agent->agent.recv_handler( 2746 &recv_mad_agent->agent, 2747 &local->mad_priv->header.recv_wc); 2748 spin_lock_irqsave(&recv_mad_agent->lock, flags); 2749 atomic_dec(&recv_mad_agent->refcount); 2750 spin_unlock_irqrestore(&recv_mad_agent->lock, flags); 2751 } 2752 2753 local_send_completion: 2754 /* Complete send */ 2755 mad_send_wc.status = IB_WC_SUCCESS; 2756 mad_send_wc.vendor_err = 0; 2757 mad_send_wc.send_buf = &local->mad_send_wr->send_buf; 2758 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2759 snoop_send(mad_agent_priv->qp_info, 2760 &local->mad_send_wr->send_buf, 2761 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); 2762 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2763 &mad_send_wc); 2764 2765 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2766 atomic_dec(&mad_agent_priv->refcount); 2767 if (free_mad) 2768 kfree(local->mad_priv); 2769 kfree(local); 2770 } 2771 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2772 } 2773 2774 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) 2775 { 2776 int ret; 2777 2778 if (!mad_send_wr->retries_left) 2779 return -ETIMEDOUT; 2780 2781 mad_send_wr->retries_left--; 2782 mad_send_wr->send_buf.retries++; 2783 2784 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 2785 2786 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { 2787 ret = ib_retry_rmpp(mad_send_wr); 2788 switch (ret) { 2789 case IB_RMPP_RESULT_UNHANDLED: 2790 ret = ib_send_mad(mad_send_wr); 2791 break; 2792 case IB_RMPP_RESULT_CONSUMED: 2793 ret = 0; 2794 break; 2795 default: 2796 ret = -ECOMM; 2797 break; 2798 } 2799 } else 2800 ret = ib_send_mad(mad_send_wr); 2801 2802 if (!ret) { 2803 mad_send_wr->refcount++; 2804 list_add_tail(&mad_send_wr->agent_list, 2805 &mad_send_wr->mad_agent_priv->send_list); 2806 } 2807 return ret; 2808 } 2809 2810 static void timeout_sends(struct work_struct *work) 2811 { 2812 struct ib_mad_agent_private *mad_agent_priv; 2813 struct ib_mad_send_wr_private *mad_send_wr; 2814 struct ib_mad_send_wc mad_send_wc; 2815 unsigned long flags, delay; 2816 2817 mad_agent_priv = container_of(work, struct ib_mad_agent_private, 2818 timed_work.work); 2819 mad_send_wc.vendor_err = 0; 2820 2821 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2822 while (!list_empty(&mad_agent_priv->wait_list)) { 2823 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2824 struct ib_mad_send_wr_private, 2825 agent_list); 2826 2827 if (time_after(mad_send_wr->timeout, jiffies)) { 2828 delay = mad_send_wr->timeout - jiffies; 2829 if ((long)delay <= 0) 2830 delay = 1; 2831 queue_delayed_work(mad_agent_priv->qp_info-> 2832 port_priv->wq, 2833 &mad_agent_priv->timed_work, delay); 2834 break; 2835 } 2836 2837 list_del(&mad_send_wr->agent_list); 2838 if (mad_send_wr->status == IB_WC_SUCCESS && 2839 !retry_send(mad_send_wr)) 2840 continue; 2841 2842 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2843 2844 if (mad_send_wr->status == IB_WC_SUCCESS) 2845 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2846 else 2847 mad_send_wc.status = mad_send_wr->status; 2848 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2849 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2850 &mad_send_wc); 2851 2852 atomic_dec(&mad_agent_priv->refcount); 2853 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2854 } 2855 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2856 } 2857 2858 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) 2859 { 2860 struct ib_mad_port_private *port_priv = cq->cq_context; 2861 unsigned long flags; 2862 2863 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 2864 if (!list_empty(&port_priv->port_list)) 2865 queue_work(port_priv->wq, &port_priv->work); 2866 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 2867 } 2868 2869 /* 2870 * Allocate receive MADs and post receive WRs for them 2871 */ 2872 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 2873 struct ib_mad_private *mad) 2874 { 2875 unsigned long flags; 2876 int post, ret; 2877 struct ib_mad_private *mad_priv; 2878 struct ib_sge sg_list; 2879 struct ib_recv_wr recv_wr, *bad_recv_wr; 2880 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; 2881 2882 /* Initialize common scatter list fields */ 2883 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; 2884 2885 /* Initialize common receive WR fields */ 2886 recv_wr.next = NULL; 2887 recv_wr.sg_list = &sg_list; 2888 recv_wr.num_sge = 1; 2889 2890 do { 2891 /* Allocate and map receive buffer */ 2892 if (mad) { 2893 mad_priv = mad; 2894 mad = NULL; 2895 } else { 2896 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), 2897 GFP_ATOMIC); 2898 if (!mad_priv) { 2899 dev_err(&qp_info->port_priv->device->dev, 2900 "No memory for receive buffer\n"); 2901 ret = -ENOMEM; 2902 break; 2903 } 2904 } 2905 sg_list.length = mad_priv_dma_size(mad_priv); 2906 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, 2907 &mad_priv->grh, 2908 mad_priv_dma_size(mad_priv), 2909 DMA_FROM_DEVICE); 2910 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2911 sg_list.addr))) { 2912 ret = -ENOMEM; 2913 break; 2914 } 2915 mad_priv->header.mapping = sg_list.addr; 2916 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2917 mad_priv->header.mad_list.mad_queue = recv_queue; 2918 2919 /* Post receive WR */ 2920 spin_lock_irqsave(&recv_queue->lock, flags); 2921 post = (++recv_queue->count < recv_queue->max_active); 2922 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); 2923 spin_unlock_irqrestore(&recv_queue->lock, flags); 2924 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); 2925 if (ret) { 2926 spin_lock_irqsave(&recv_queue->lock, flags); 2927 list_del(&mad_priv->header.mad_list.list); 2928 recv_queue->count--; 2929 spin_unlock_irqrestore(&recv_queue->lock, flags); 2930 ib_dma_unmap_single(qp_info->port_priv->device, 2931 mad_priv->header.mapping, 2932 mad_priv_dma_size(mad_priv), 2933 DMA_FROM_DEVICE); 2934 kfree(mad_priv); 2935 dev_err(&qp_info->port_priv->device->dev, 2936 "ib_post_recv failed: %d\n", ret); 2937 break; 2938 } 2939 } while (post); 2940 2941 return ret; 2942 } 2943 2944 /* 2945 * Return all the posted receive MADs 2946 */ 2947 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) 2948 { 2949 struct ib_mad_private_header *mad_priv_hdr; 2950 struct ib_mad_private *recv; 2951 struct ib_mad_list_head *mad_list; 2952 2953 if (!qp_info->qp) 2954 return; 2955 2956 while (!list_empty(&qp_info->recv_queue.list)) { 2957 2958 mad_list = list_entry(qp_info->recv_queue.list.next, 2959 struct ib_mad_list_head, list); 2960 mad_priv_hdr = container_of(mad_list, 2961 struct ib_mad_private_header, 2962 mad_list); 2963 recv = container_of(mad_priv_hdr, struct ib_mad_private, 2964 header); 2965 2966 /* Remove from posted receive MAD list */ 2967 list_del(&mad_list->list); 2968 2969 ib_dma_unmap_single(qp_info->port_priv->device, 2970 recv->header.mapping, 2971 mad_priv_dma_size(recv), 2972 DMA_FROM_DEVICE); 2973 kfree(recv); 2974 } 2975 2976 qp_info->recv_queue.count = 0; 2977 } 2978 2979 /* 2980 * Start the port 2981 */ 2982 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) 2983 { 2984 int ret, i; 2985 struct ib_qp_attr *attr; 2986 struct ib_qp *qp; 2987 u16 pkey_index; 2988 2989 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2990 if (!attr) { 2991 dev_err(&port_priv->device->dev, 2992 "Couldn't kmalloc ib_qp_attr\n"); 2993 return -ENOMEM; 2994 } 2995 2996 ret = ib_find_pkey(port_priv->device, port_priv->port_num, 2997 IB_DEFAULT_PKEY_FULL, &pkey_index); 2998 if (ret) 2999 pkey_index = 0; 3000 3001 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 3002 qp = port_priv->qp_info[i].qp; 3003 if (!qp) 3004 continue; 3005 3006 /* 3007 * PKey index for QP1 is irrelevant but 3008 * one is needed for the Reset to Init transition 3009 */ 3010 attr->qp_state = IB_QPS_INIT; 3011 attr->pkey_index = pkey_index; 3012 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 3013 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 3014 IB_QP_PKEY_INDEX | IB_QP_QKEY); 3015 if (ret) { 3016 dev_err(&port_priv->device->dev, 3017 "Couldn't change QP%d state to INIT: %d\n", 3018 i, ret); 3019 goto out; 3020 } 3021 3022 attr->qp_state = IB_QPS_RTR; 3023 ret = ib_modify_qp(qp, attr, IB_QP_STATE); 3024 if (ret) { 3025 dev_err(&port_priv->device->dev, 3026 "Couldn't change QP%d state to RTR: %d\n", 3027 i, ret); 3028 goto out; 3029 } 3030 3031 attr->qp_state = IB_QPS_RTS; 3032 attr->sq_psn = IB_MAD_SEND_Q_PSN; 3033 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); 3034 if (ret) { 3035 dev_err(&port_priv->device->dev, 3036 "Couldn't change QP%d state to RTS: %d\n", 3037 i, ret); 3038 goto out; 3039 } 3040 } 3041 3042 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 3043 if (ret) { 3044 dev_err(&port_priv->device->dev, 3045 "Failed to request completion notification: %d\n", 3046 ret); 3047 goto out; 3048 } 3049 3050 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 3051 if (!port_priv->qp_info[i].qp) 3052 continue; 3053 3054 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); 3055 if (ret) { 3056 dev_err(&port_priv->device->dev, 3057 "Couldn't post receive WRs\n"); 3058 goto out; 3059 } 3060 } 3061 out: 3062 kfree(attr); 3063 return ret; 3064 } 3065 3066 static void qp_event_handler(struct ib_event *event, void *qp_context) 3067 { 3068 struct ib_mad_qp_info *qp_info = qp_context; 3069 3070 /* It's worse than that! He's dead, Jim! */ 3071 dev_err(&qp_info->port_priv->device->dev, 3072 "Fatal error (%d) on MAD QP (%d)\n", 3073 event->event, qp_info->qp->qp_num); 3074 } 3075 3076 static void init_mad_queue(struct ib_mad_qp_info *qp_info, 3077 struct ib_mad_queue *mad_queue) 3078 { 3079 mad_queue->qp_info = qp_info; 3080 mad_queue->count = 0; 3081 spin_lock_init(&mad_queue->lock); 3082 INIT_LIST_HEAD(&mad_queue->list); 3083 } 3084 3085 static void init_mad_qp(struct ib_mad_port_private *port_priv, 3086 struct ib_mad_qp_info *qp_info) 3087 { 3088 qp_info->port_priv = port_priv; 3089 init_mad_queue(qp_info, &qp_info->send_queue); 3090 init_mad_queue(qp_info, &qp_info->recv_queue); 3091 INIT_LIST_HEAD(&qp_info->overflow_list); 3092 spin_lock_init(&qp_info->snoop_lock); 3093 qp_info->snoop_table = NULL; 3094 qp_info->snoop_table_size = 0; 3095 atomic_set(&qp_info->snoop_count, 0); 3096 } 3097 3098 static int create_mad_qp(struct ib_mad_qp_info *qp_info, 3099 enum ib_qp_type qp_type) 3100 { 3101 struct ib_qp_init_attr qp_init_attr; 3102 int ret; 3103 3104 memset(&qp_init_attr, 0, sizeof qp_init_attr); 3105 qp_init_attr.send_cq = qp_info->port_priv->cq; 3106 qp_init_attr.recv_cq = qp_info->port_priv->cq; 3107 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 3108 qp_init_attr.cap.max_send_wr = mad_sendq_size; 3109 qp_init_attr.cap.max_recv_wr = mad_recvq_size; 3110 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 3111 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 3112 qp_init_attr.qp_type = qp_type; 3113 qp_init_attr.port_num = qp_info->port_priv->port_num; 3114 qp_init_attr.qp_context = qp_info; 3115 qp_init_attr.event_handler = qp_event_handler; 3116 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); 3117 if (IS_ERR(qp_info->qp)) { 3118 dev_err(&qp_info->port_priv->device->dev, 3119 "Couldn't create ib_mad QP%d\n", 3120 get_spl_qp_index(qp_type)); 3121 ret = PTR_ERR(qp_info->qp); 3122 goto error; 3123 } 3124 /* Use minimum queue sizes unless the CQ is resized */ 3125 qp_info->send_queue.max_active = mad_sendq_size; 3126 qp_info->recv_queue.max_active = mad_recvq_size; 3127 return 0; 3128 3129 error: 3130 return ret; 3131 } 3132 3133 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) 3134 { 3135 if (!qp_info->qp) 3136 return; 3137 3138 ib_destroy_qp(qp_info->qp); 3139 kfree(qp_info->snoop_table); 3140 } 3141 3142 /* 3143 * Open the port 3144 * Create the QP, PD, MR, and CQ if needed 3145 */ 3146 static int ib_mad_port_open(struct ib_device *device, 3147 int port_num) 3148 { 3149 int ret, cq_size; 3150 struct ib_mad_port_private *port_priv; 3151 unsigned long flags; 3152 char name[sizeof "ib_mad123"]; 3153 int has_smi; 3154 struct ib_cq_init_attr cq_attr = {}; 3155 3156 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) 3157 return -EFAULT; 3158 3159 if (WARN_ON(rdma_cap_opa_mad(device, port_num) && 3160 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) 3161 return -EFAULT; 3162 3163 /* Create new device info */ 3164 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 3165 if (!port_priv) { 3166 dev_err(&device->dev, "No memory for ib_mad_port_private\n"); 3167 return -ENOMEM; 3168 } 3169 3170 port_priv->device = device; 3171 port_priv->port_num = port_num; 3172 spin_lock_init(&port_priv->reg_lock); 3173 INIT_LIST_HEAD(&port_priv->agent_list); 3174 init_mad_qp(port_priv, &port_priv->qp_info[0]); 3175 init_mad_qp(port_priv, &port_priv->qp_info[1]); 3176 3177 cq_size = mad_sendq_size + mad_recvq_size; 3178 has_smi = rdma_cap_ib_smi(device, port_num); 3179 if (has_smi) 3180 cq_size *= 2; 3181 3182 cq_attr.cqe = cq_size; 3183 port_priv->cq = ib_create_cq(port_priv->device, 3184 ib_mad_thread_completion_handler, 3185 NULL, port_priv, &cq_attr); 3186 if (IS_ERR(port_priv->cq)) { 3187 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); 3188 ret = PTR_ERR(port_priv->cq); 3189 goto error3; 3190 } 3191 3192 port_priv->pd = ib_alloc_pd(device); 3193 if (IS_ERR(port_priv->pd)) { 3194 dev_err(&device->dev, "Couldn't create ib_mad PD\n"); 3195 ret = PTR_ERR(port_priv->pd); 3196 goto error4; 3197 } 3198 3199 if (has_smi) { 3200 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); 3201 if (ret) 3202 goto error6; 3203 } 3204 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); 3205 if (ret) 3206 goto error7; 3207 3208 snprintf(name, sizeof name, "ib_mad%d", port_num); 3209 port_priv->wq = create_singlethread_workqueue(name); 3210 if (!port_priv->wq) { 3211 ret = -ENOMEM; 3212 goto error8; 3213 } 3214 INIT_WORK(&port_priv->work, ib_mad_completion_handler); 3215 3216 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3217 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 3218 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3219 3220 ret = ib_mad_port_start(port_priv); 3221 if (ret) { 3222 dev_err(&device->dev, "Couldn't start port\n"); 3223 goto error9; 3224 } 3225 3226 return 0; 3227 3228 error9: 3229 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3230 list_del_init(&port_priv->port_list); 3231 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3232 3233 destroy_workqueue(port_priv->wq); 3234 error8: 3235 destroy_mad_qp(&port_priv->qp_info[1]); 3236 error7: 3237 destroy_mad_qp(&port_priv->qp_info[0]); 3238 error6: 3239 ib_dealloc_pd(port_priv->pd); 3240 error4: 3241 ib_destroy_cq(port_priv->cq); 3242 cleanup_recv_queue(&port_priv->qp_info[1]); 3243 cleanup_recv_queue(&port_priv->qp_info[0]); 3244 error3: 3245 kfree(port_priv); 3246 3247 return ret; 3248 } 3249 3250 /* 3251 * Close the port 3252 * If there are no classes using the port, free the port 3253 * resources (CQ, MR, PD, QP) and remove the port's info structure 3254 */ 3255 static int ib_mad_port_close(struct ib_device *device, int port_num) 3256 { 3257 struct ib_mad_port_private *port_priv; 3258 unsigned long flags; 3259 3260 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3261 port_priv = __ib_get_mad_port(device, port_num); 3262 if (port_priv == NULL) { 3263 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3264 dev_err(&device->dev, "Port %d not found\n", port_num); 3265 return -ENODEV; 3266 } 3267 list_del_init(&port_priv->port_list); 3268 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3269 3270 destroy_workqueue(port_priv->wq); 3271 destroy_mad_qp(&port_priv->qp_info[1]); 3272 destroy_mad_qp(&port_priv->qp_info[0]); 3273 ib_dealloc_pd(port_priv->pd); 3274 ib_destroy_cq(port_priv->cq); 3275 cleanup_recv_queue(&port_priv->qp_info[1]); 3276 cleanup_recv_queue(&port_priv->qp_info[0]); 3277 /* XXX: Handle deallocation of MAD registration tables */ 3278 3279 kfree(port_priv); 3280 3281 return 0; 3282 } 3283 3284 static void ib_mad_init_device(struct ib_device *device) 3285 { 3286 int start, i; 3287 3288 start = rdma_start_port(device); 3289 3290 for (i = start; i <= rdma_end_port(device); i++) { 3291 if (!rdma_cap_ib_mad(device, i)) 3292 continue; 3293 3294 if (ib_mad_port_open(device, i)) { 3295 dev_err(&device->dev, "Couldn't open port %d\n", i); 3296 goto error; 3297 } 3298 if (ib_agent_port_open(device, i)) { 3299 dev_err(&device->dev, 3300 "Couldn't open port %d for agents\n", i); 3301 goto error_agent; 3302 } 3303 } 3304 return; 3305 3306 error_agent: 3307 if (ib_mad_port_close(device, i)) 3308 dev_err(&device->dev, "Couldn't close port %d\n", i); 3309 3310 error: 3311 while (--i >= start) { 3312 if (!rdma_cap_ib_mad(device, i)) 3313 continue; 3314 3315 if (ib_agent_port_close(device, i)) 3316 dev_err(&device->dev, 3317 "Couldn't close port %d for agents\n", i); 3318 if (ib_mad_port_close(device, i)) 3319 dev_err(&device->dev, "Couldn't close port %d\n", i); 3320 } 3321 } 3322 3323 static void ib_mad_remove_device(struct ib_device *device, void *client_data) 3324 { 3325 int i; 3326 3327 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 3328 if (!rdma_cap_ib_mad(device, i)) 3329 continue; 3330 3331 if (ib_agent_port_close(device, i)) 3332 dev_err(&device->dev, 3333 "Couldn't close port %d for agents\n", i); 3334 if (ib_mad_port_close(device, i)) 3335 dev_err(&device->dev, "Couldn't close port %d\n", i); 3336 } 3337 } 3338 3339 static struct ib_client mad_client = { 3340 .name = "mad", 3341 .add = ib_mad_init_device, 3342 .remove = ib_mad_remove_device 3343 }; 3344 3345 static int __init ib_mad_init_module(void) 3346 { 3347 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); 3348 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); 3349 3350 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); 3351 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); 3352 3353 INIT_LIST_HEAD(&ib_mad_port_list); 3354 3355 if (ib_register_client(&mad_client)) { 3356 pr_err("Couldn't register ib_mad client\n"); 3357 return -EINVAL; 3358 } 3359 3360 return 0; 3361 } 3362 3363 static void __exit ib_mad_cleanup_module(void) 3364 { 3365 ib_unregister_client(&mad_client); 3366 } 3367 3368 module_init(ib_mad_init_module); 3369 module_exit(ib_mad_cleanup_module); 3370