1 /* 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2009 HNR Consulting. All rights reserved. 6 * Copyright (c) 2014 Intel Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/module.h> 43 #include <rdma/ib_cache.h> 44 45 #include "mad_priv.h" 46 #include "mad_rmpp.h" 47 #include "smi.h" 48 #include "opa_smi.h" 49 #include "agent.h" 50 51 MODULE_LICENSE("Dual BSD/GPL"); 52 MODULE_DESCRIPTION("kernel IB MAD API"); 53 MODULE_AUTHOR("Hal Rosenstock"); 54 MODULE_AUTHOR("Sean Hefty"); 55 56 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 57 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 58 59 module_param_named(send_queue_size, mad_sendq_size, int, 0444); 60 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); 61 module_param_named(recv_queue_size, mad_recvq_size, int, 0444); 62 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); 63 64 static struct list_head ib_mad_port_list; 65 static u32 ib_mad_client_id = 0; 66 67 /* Port list lock */ 68 static DEFINE_SPINLOCK(ib_mad_port_list_lock); 69 70 /* Forward declarations */ 71 static int method_in_use(struct ib_mad_mgmt_method_table **method, 72 struct ib_mad_reg_req *mad_reg_req); 73 static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 74 static struct ib_mad_agent_private *find_mad_agent( 75 struct ib_mad_port_private *port_priv, 76 const struct ib_mad_hdr *mad); 77 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 78 struct ib_mad_private *mad); 79 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 80 static void timeout_sends(struct work_struct *work); 81 static void local_completions(struct work_struct *work); 82 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 83 struct ib_mad_agent_private *agent_priv, 84 u8 mgmt_class); 85 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 86 struct ib_mad_agent_private *agent_priv); 87 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 88 struct ib_wc *wc); 89 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); 90 91 /* 92 * Returns a ib_mad_port_private structure or NULL for a device/port 93 * Assumes ib_mad_port_list_lock is being held 94 */ 95 static inline struct ib_mad_port_private * 96 __ib_get_mad_port(struct ib_device *device, int port_num) 97 { 98 struct ib_mad_port_private *entry; 99 100 list_for_each_entry(entry, &ib_mad_port_list, port_list) { 101 if (entry->device == device && entry->port_num == port_num) 102 return entry; 103 } 104 return NULL; 105 } 106 107 /* 108 * Wrapper function to return a ib_mad_port_private structure or NULL 109 * for a device/port 110 */ 111 static inline struct ib_mad_port_private * 112 ib_get_mad_port(struct ib_device *device, int port_num) 113 { 114 struct ib_mad_port_private *entry; 115 unsigned long flags; 116 117 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 118 entry = __ib_get_mad_port(device, port_num); 119 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 120 121 return entry; 122 } 123 124 static inline u8 convert_mgmt_class(u8 mgmt_class) 125 { 126 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ 127 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 128 0 : mgmt_class; 129 } 130 131 static int get_spl_qp_index(enum ib_qp_type qp_type) 132 { 133 switch (qp_type) 134 { 135 case IB_QPT_SMI: 136 return 0; 137 case IB_QPT_GSI: 138 return 1; 139 default: 140 return -1; 141 } 142 } 143 144 static int vendor_class_index(u8 mgmt_class) 145 { 146 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; 147 } 148 149 static int is_vendor_class(u8 mgmt_class) 150 { 151 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || 152 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) 153 return 0; 154 return 1; 155 } 156 157 static int is_vendor_oui(char *oui) 158 { 159 if (oui[0] || oui[1] || oui[2]) 160 return 1; 161 return 0; 162 } 163 164 static int is_vendor_method_in_use( 165 struct ib_mad_mgmt_vendor_class *vendor_class, 166 struct ib_mad_reg_req *mad_reg_req) 167 { 168 struct ib_mad_mgmt_method_table *method; 169 int i; 170 171 for (i = 0; i < MAX_MGMT_OUI; i++) { 172 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { 173 method = vendor_class->method_table[i]; 174 if (method) { 175 if (method_in_use(&method, mad_reg_req)) 176 return 1; 177 else 178 break; 179 } 180 } 181 } 182 return 0; 183 } 184 185 int ib_response_mad(const struct ib_mad_hdr *hdr) 186 { 187 return ((hdr->method & IB_MGMT_METHOD_RESP) || 188 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || 189 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && 190 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); 191 } 192 EXPORT_SYMBOL(ib_response_mad); 193 194 /* 195 * ib_register_mad_agent - Register to send/receive MADs 196 */ 197 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 198 u8 port_num, 199 enum ib_qp_type qp_type, 200 struct ib_mad_reg_req *mad_reg_req, 201 u8 rmpp_version, 202 ib_mad_send_handler send_handler, 203 ib_mad_recv_handler recv_handler, 204 void *context, 205 u32 registration_flags) 206 { 207 struct ib_mad_port_private *port_priv; 208 struct ib_mad_agent *ret = ERR_PTR(-EINVAL); 209 struct ib_mad_agent_private *mad_agent_priv; 210 struct ib_mad_reg_req *reg_req = NULL; 211 struct ib_mad_mgmt_class_table *class; 212 struct ib_mad_mgmt_vendor_class_table *vendor; 213 struct ib_mad_mgmt_vendor_class *vendor_class; 214 struct ib_mad_mgmt_method_table *method; 215 int ret2, qpn; 216 unsigned long flags; 217 u8 mgmt_class, vclass; 218 219 /* Validate parameters */ 220 qpn = get_spl_qp_index(qp_type); 221 if (qpn == -1) { 222 dev_notice(&device->dev, 223 "ib_register_mad_agent: invalid QP Type %d\n", 224 qp_type); 225 goto error1; 226 } 227 228 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { 229 dev_notice(&device->dev, 230 "ib_register_mad_agent: invalid RMPP Version %u\n", 231 rmpp_version); 232 goto error1; 233 } 234 235 /* Validate MAD registration request if supplied */ 236 if (mad_reg_req) { 237 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { 238 dev_notice(&device->dev, 239 "ib_register_mad_agent: invalid Class Version %u\n", 240 mad_reg_req->mgmt_class_version); 241 goto error1; 242 } 243 if (!recv_handler) { 244 dev_notice(&device->dev, 245 "ib_register_mad_agent: no recv_handler\n"); 246 goto error1; 247 } 248 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { 249 /* 250 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only 251 * one in this range currently allowed 252 */ 253 if (mad_reg_req->mgmt_class != 254 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 255 dev_notice(&device->dev, 256 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", 257 mad_reg_req->mgmt_class); 258 goto error1; 259 } 260 } else if (mad_reg_req->mgmt_class == 0) { 261 /* 262 * Class 0 is reserved in IBA and is used for 263 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 264 */ 265 dev_notice(&device->dev, 266 "ib_register_mad_agent: Invalid Mgmt Class 0\n"); 267 goto error1; 268 } else if (is_vendor_class(mad_reg_req->mgmt_class)) { 269 /* 270 * If class is in "new" vendor range, 271 * ensure supplied OUI is not zero 272 */ 273 if (!is_vendor_oui(mad_reg_req->oui)) { 274 dev_notice(&device->dev, 275 "ib_register_mad_agent: No OUI specified for class 0x%x\n", 276 mad_reg_req->mgmt_class); 277 goto error1; 278 } 279 } 280 /* Make sure class supplied is consistent with RMPP */ 281 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 282 if (rmpp_version) { 283 dev_notice(&device->dev, 284 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", 285 mad_reg_req->mgmt_class); 286 goto error1; 287 } 288 } 289 290 /* Make sure class supplied is consistent with QP type */ 291 if (qp_type == IB_QPT_SMI) { 292 if ((mad_reg_req->mgmt_class != 293 IB_MGMT_CLASS_SUBN_LID_ROUTED) && 294 (mad_reg_req->mgmt_class != 295 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 296 dev_notice(&device->dev, 297 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", 298 mad_reg_req->mgmt_class); 299 goto error1; 300 } 301 } else { 302 if ((mad_reg_req->mgmt_class == 303 IB_MGMT_CLASS_SUBN_LID_ROUTED) || 304 (mad_reg_req->mgmt_class == 305 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 306 dev_notice(&device->dev, 307 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", 308 mad_reg_req->mgmt_class); 309 goto error1; 310 } 311 } 312 } else { 313 /* No registration request supplied */ 314 if (!send_handler) 315 goto error1; 316 if (registration_flags & IB_MAD_USER_RMPP) 317 goto error1; 318 } 319 320 /* Validate device and port */ 321 port_priv = ib_get_mad_port(device, port_num); 322 if (!port_priv) { 323 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); 324 ret = ERR_PTR(-ENODEV); 325 goto error1; 326 } 327 328 /* Verify the QP requested is supported. For example, Ethernet devices 329 * will not have QP0 */ 330 if (!port_priv->qp_info[qpn].qp) { 331 dev_notice(&device->dev, 332 "ib_register_mad_agent: QP %d not supported\n", qpn); 333 ret = ERR_PTR(-EPROTONOSUPPORT); 334 goto error1; 335 } 336 337 /* Allocate structures */ 338 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); 339 if (!mad_agent_priv) { 340 ret = ERR_PTR(-ENOMEM); 341 goto error1; 342 } 343 344 if (mad_reg_req) { 345 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); 346 if (!reg_req) { 347 ret = ERR_PTR(-ENOMEM); 348 goto error3; 349 } 350 } 351 352 /* Now, fill in the various structures */ 353 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 354 mad_agent_priv->reg_req = reg_req; 355 mad_agent_priv->agent.rmpp_version = rmpp_version; 356 mad_agent_priv->agent.device = device; 357 mad_agent_priv->agent.recv_handler = recv_handler; 358 mad_agent_priv->agent.send_handler = send_handler; 359 mad_agent_priv->agent.context = context; 360 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 361 mad_agent_priv->agent.port_num = port_num; 362 mad_agent_priv->agent.flags = registration_flags; 363 spin_lock_init(&mad_agent_priv->lock); 364 INIT_LIST_HEAD(&mad_agent_priv->send_list); 365 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 366 INIT_LIST_HEAD(&mad_agent_priv->done_list); 367 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 368 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); 369 INIT_LIST_HEAD(&mad_agent_priv->local_list); 370 INIT_WORK(&mad_agent_priv->local_work, local_completions); 371 atomic_set(&mad_agent_priv->refcount, 1); 372 init_completion(&mad_agent_priv->comp); 373 374 spin_lock_irqsave(&port_priv->reg_lock, flags); 375 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; 376 377 /* 378 * Make sure MAD registration (if supplied) 379 * is non overlapping with any existing ones 380 */ 381 if (mad_reg_req) { 382 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); 383 if (!is_vendor_class(mgmt_class)) { 384 class = port_priv->version[mad_reg_req-> 385 mgmt_class_version].class; 386 if (class) { 387 method = class->method_table[mgmt_class]; 388 if (method) { 389 if (method_in_use(&method, 390 mad_reg_req)) 391 goto error4; 392 } 393 } 394 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, 395 mgmt_class); 396 } else { 397 /* "New" vendor class range */ 398 vendor = port_priv->version[mad_reg_req-> 399 mgmt_class_version].vendor; 400 if (vendor) { 401 vclass = vendor_class_index(mgmt_class); 402 vendor_class = vendor->vendor_class[vclass]; 403 if (vendor_class) { 404 if (is_vendor_method_in_use( 405 vendor_class, 406 mad_reg_req)) 407 goto error4; 408 } 409 } 410 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 411 } 412 if (ret2) { 413 ret = ERR_PTR(ret2); 414 goto error4; 415 } 416 } 417 418 /* Add mad agent into port's agent list */ 419 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 420 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 421 422 return &mad_agent_priv->agent; 423 424 error4: 425 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 426 kfree(reg_req); 427 error3: 428 kfree(mad_agent_priv); 429 error1: 430 return ret; 431 } 432 EXPORT_SYMBOL(ib_register_mad_agent); 433 434 static inline int is_snooping_sends(int mad_snoop_flags) 435 { 436 return (mad_snoop_flags & 437 (/*IB_MAD_SNOOP_POSTED_SENDS | 438 IB_MAD_SNOOP_RMPP_SENDS |*/ 439 IB_MAD_SNOOP_SEND_COMPLETIONS /*| 440 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); 441 } 442 443 static inline int is_snooping_recvs(int mad_snoop_flags) 444 { 445 return (mad_snoop_flags & 446 (IB_MAD_SNOOP_RECVS /*| 447 IB_MAD_SNOOP_RMPP_RECVS*/)); 448 } 449 450 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, 451 struct ib_mad_snoop_private *mad_snoop_priv) 452 { 453 struct ib_mad_snoop_private **new_snoop_table; 454 unsigned long flags; 455 int i; 456 457 spin_lock_irqsave(&qp_info->snoop_lock, flags); 458 /* Check for empty slot in array. */ 459 for (i = 0; i < qp_info->snoop_table_size; i++) 460 if (!qp_info->snoop_table[i]) 461 break; 462 463 if (i == qp_info->snoop_table_size) { 464 /* Grow table. */ 465 new_snoop_table = krealloc(qp_info->snoop_table, 466 sizeof mad_snoop_priv * 467 (qp_info->snoop_table_size + 1), 468 GFP_ATOMIC); 469 if (!new_snoop_table) { 470 i = -ENOMEM; 471 goto out; 472 } 473 474 qp_info->snoop_table = new_snoop_table; 475 qp_info->snoop_table_size++; 476 } 477 qp_info->snoop_table[i] = mad_snoop_priv; 478 atomic_inc(&qp_info->snoop_count); 479 out: 480 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 481 return i; 482 } 483 484 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, 485 u8 port_num, 486 enum ib_qp_type qp_type, 487 int mad_snoop_flags, 488 ib_mad_snoop_handler snoop_handler, 489 ib_mad_recv_handler recv_handler, 490 void *context) 491 { 492 struct ib_mad_port_private *port_priv; 493 struct ib_mad_agent *ret; 494 struct ib_mad_snoop_private *mad_snoop_priv; 495 int qpn; 496 497 /* Validate parameters */ 498 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || 499 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { 500 ret = ERR_PTR(-EINVAL); 501 goto error1; 502 } 503 qpn = get_spl_qp_index(qp_type); 504 if (qpn == -1) { 505 ret = ERR_PTR(-EINVAL); 506 goto error1; 507 } 508 port_priv = ib_get_mad_port(device, port_num); 509 if (!port_priv) { 510 ret = ERR_PTR(-ENODEV); 511 goto error1; 512 } 513 /* Allocate structures */ 514 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 515 if (!mad_snoop_priv) { 516 ret = ERR_PTR(-ENOMEM); 517 goto error1; 518 } 519 520 /* Now, fill in the various structures */ 521 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 522 mad_snoop_priv->agent.device = device; 523 mad_snoop_priv->agent.recv_handler = recv_handler; 524 mad_snoop_priv->agent.snoop_handler = snoop_handler; 525 mad_snoop_priv->agent.context = context; 526 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 527 mad_snoop_priv->agent.port_num = port_num; 528 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 529 init_completion(&mad_snoop_priv->comp); 530 mad_snoop_priv->snoop_index = register_snoop_agent( 531 &port_priv->qp_info[qpn], 532 mad_snoop_priv); 533 if (mad_snoop_priv->snoop_index < 0) { 534 ret = ERR_PTR(mad_snoop_priv->snoop_index); 535 goto error2; 536 } 537 538 atomic_set(&mad_snoop_priv->refcount, 1); 539 return &mad_snoop_priv->agent; 540 541 error2: 542 kfree(mad_snoop_priv); 543 error1: 544 return ret; 545 } 546 EXPORT_SYMBOL(ib_register_mad_snoop); 547 548 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 549 { 550 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 551 complete(&mad_agent_priv->comp); 552 } 553 554 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) 555 { 556 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 557 complete(&mad_snoop_priv->comp); 558 } 559 560 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 561 { 562 struct ib_mad_port_private *port_priv; 563 unsigned long flags; 564 565 /* Note that we could still be handling received MADs */ 566 567 /* 568 * Canceling all sends results in dropping received response 569 * MADs, preventing us from queuing additional work 570 */ 571 cancel_mads(mad_agent_priv); 572 port_priv = mad_agent_priv->qp_info->port_priv; 573 cancel_delayed_work(&mad_agent_priv->timed_work); 574 575 spin_lock_irqsave(&port_priv->reg_lock, flags); 576 remove_mad_reg_req(mad_agent_priv); 577 list_del(&mad_agent_priv->agent_list); 578 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 579 580 flush_workqueue(port_priv->wq); 581 ib_cancel_rmpp_recvs(mad_agent_priv); 582 583 deref_mad_agent(mad_agent_priv); 584 wait_for_completion(&mad_agent_priv->comp); 585 586 kfree(mad_agent_priv->reg_req); 587 kfree(mad_agent_priv); 588 } 589 590 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) 591 { 592 struct ib_mad_qp_info *qp_info; 593 unsigned long flags; 594 595 qp_info = mad_snoop_priv->qp_info; 596 spin_lock_irqsave(&qp_info->snoop_lock, flags); 597 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; 598 atomic_dec(&qp_info->snoop_count); 599 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 600 601 deref_snoop_agent(mad_snoop_priv); 602 wait_for_completion(&mad_snoop_priv->comp); 603 604 kfree(mad_snoop_priv); 605 } 606 607 /* 608 * ib_unregister_mad_agent - Unregisters a client from using MAD services 609 */ 610 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) 611 { 612 struct ib_mad_agent_private *mad_agent_priv; 613 struct ib_mad_snoop_private *mad_snoop_priv; 614 615 /* If the TID is zero, the agent can only snoop. */ 616 if (mad_agent->hi_tid) { 617 mad_agent_priv = container_of(mad_agent, 618 struct ib_mad_agent_private, 619 agent); 620 unregister_mad_agent(mad_agent_priv); 621 } else { 622 mad_snoop_priv = container_of(mad_agent, 623 struct ib_mad_snoop_private, 624 agent); 625 unregister_mad_snoop(mad_snoop_priv); 626 } 627 return 0; 628 } 629 EXPORT_SYMBOL(ib_unregister_mad_agent); 630 631 static void dequeue_mad(struct ib_mad_list_head *mad_list) 632 { 633 struct ib_mad_queue *mad_queue; 634 unsigned long flags; 635 636 BUG_ON(!mad_list->mad_queue); 637 mad_queue = mad_list->mad_queue; 638 spin_lock_irqsave(&mad_queue->lock, flags); 639 list_del(&mad_list->list); 640 mad_queue->count--; 641 spin_unlock_irqrestore(&mad_queue->lock, flags); 642 } 643 644 static void snoop_send(struct ib_mad_qp_info *qp_info, 645 struct ib_mad_send_buf *send_buf, 646 struct ib_mad_send_wc *mad_send_wc, 647 int mad_snoop_flags) 648 { 649 struct ib_mad_snoop_private *mad_snoop_priv; 650 unsigned long flags; 651 int i; 652 653 spin_lock_irqsave(&qp_info->snoop_lock, flags); 654 for (i = 0; i < qp_info->snoop_table_size; i++) { 655 mad_snoop_priv = qp_info->snoop_table[i]; 656 if (!mad_snoop_priv || 657 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 658 continue; 659 660 atomic_inc(&mad_snoop_priv->refcount); 661 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 662 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 663 send_buf, mad_send_wc); 664 deref_snoop_agent(mad_snoop_priv); 665 spin_lock_irqsave(&qp_info->snoop_lock, flags); 666 } 667 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 668 } 669 670 static void snoop_recv(struct ib_mad_qp_info *qp_info, 671 struct ib_mad_recv_wc *mad_recv_wc, 672 int mad_snoop_flags) 673 { 674 struct ib_mad_snoop_private *mad_snoop_priv; 675 unsigned long flags; 676 int i; 677 678 spin_lock_irqsave(&qp_info->snoop_lock, flags); 679 for (i = 0; i < qp_info->snoop_table_size; i++) { 680 mad_snoop_priv = qp_info->snoop_table[i]; 681 if (!mad_snoop_priv || 682 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 683 continue; 684 685 atomic_inc(&mad_snoop_priv->refcount); 686 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 687 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, 688 mad_recv_wc); 689 deref_snoop_agent(mad_snoop_priv); 690 spin_lock_irqsave(&qp_info->snoop_lock, flags); 691 } 692 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 693 } 694 695 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, 696 u16 pkey_index, u8 port_num, struct ib_wc *wc) 697 { 698 memset(wc, 0, sizeof *wc); 699 wc->wr_cqe = cqe; 700 wc->status = IB_WC_SUCCESS; 701 wc->opcode = IB_WC_RECV; 702 wc->pkey_index = pkey_index; 703 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 704 wc->src_qp = IB_QP0; 705 wc->qp = qp; 706 wc->slid = slid; 707 wc->sl = 0; 708 wc->dlid_path_bits = 0; 709 wc->port_num = port_num; 710 } 711 712 static size_t mad_priv_size(const struct ib_mad_private *mp) 713 { 714 return sizeof(struct ib_mad_private) + mp->mad_size; 715 } 716 717 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) 718 { 719 size_t size = sizeof(struct ib_mad_private) + mad_size; 720 struct ib_mad_private *ret = kzalloc(size, flags); 721 722 if (ret) 723 ret->mad_size = mad_size; 724 725 return ret; 726 } 727 728 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) 729 { 730 return rdma_max_mad_size(port_priv->device, port_priv->port_num); 731 } 732 733 static size_t mad_priv_dma_size(const struct ib_mad_private *mp) 734 { 735 return sizeof(struct ib_grh) + mp->mad_size; 736 } 737 738 /* 739 * Return 0 if SMP is to be sent 740 * Return 1 if SMP was consumed locally (whether or not solicited) 741 * Return < 0 if error 742 */ 743 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 744 struct ib_mad_send_wr_private *mad_send_wr) 745 { 746 int ret = 0; 747 struct ib_smp *smp = mad_send_wr->send_buf.mad; 748 struct opa_smp *opa_smp = (struct opa_smp *)smp; 749 unsigned long flags; 750 struct ib_mad_local_private *local; 751 struct ib_mad_private *mad_priv; 752 struct ib_mad_port_private *port_priv; 753 struct ib_mad_agent_private *recv_mad_agent = NULL; 754 struct ib_device *device = mad_agent_priv->agent.device; 755 u8 port_num; 756 struct ib_wc mad_wc; 757 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; 758 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); 759 u16 out_mad_pkey_index = 0; 760 u16 drslid; 761 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 762 mad_agent_priv->qp_info->port_priv->port_num); 763 764 if (rdma_cap_ib_switch(device) && 765 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 766 port_num = send_wr->port_num; 767 else 768 port_num = mad_agent_priv->agent.port_num; 769 770 /* 771 * Directed route handling starts if the initial LID routed part of 772 * a request or the ending LID routed part of a response is empty. 773 * If we are at the start of the LID routed part, don't update the 774 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. 775 */ 776 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) { 777 u32 opa_drslid; 778 779 if ((opa_get_smp_direction(opa_smp) 780 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 781 OPA_LID_PERMISSIVE && 782 opa_smi_handle_dr_smp_send(opa_smp, 783 rdma_cap_ib_switch(device), 784 port_num) == IB_SMI_DISCARD) { 785 ret = -EINVAL; 786 dev_err(&device->dev, "OPA Invalid directed route\n"); 787 goto out; 788 } 789 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 790 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && 791 opa_drslid & 0xffff0000) { 792 ret = -EINVAL; 793 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 794 opa_drslid); 795 goto out; 796 } 797 drslid = (u16)(opa_drslid & 0x0000ffff); 798 799 /* Check to post send on QP or process locally */ 800 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && 801 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) 802 goto out; 803 } else { 804 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 805 IB_LID_PERMISSIVE && 806 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == 807 IB_SMI_DISCARD) { 808 ret = -EINVAL; 809 dev_err(&device->dev, "Invalid directed route\n"); 810 goto out; 811 } 812 drslid = be16_to_cpu(smp->dr_slid); 813 814 /* Check to post send on QP or process locally */ 815 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && 816 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) 817 goto out; 818 } 819 820 local = kmalloc(sizeof *local, GFP_ATOMIC); 821 if (!local) { 822 ret = -ENOMEM; 823 dev_err(&device->dev, "No memory for ib_mad_local_private\n"); 824 goto out; 825 } 826 local->mad_priv = NULL; 827 local->recv_mad_agent = NULL; 828 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); 829 if (!mad_priv) { 830 ret = -ENOMEM; 831 dev_err(&device->dev, "No memory for local response MAD\n"); 832 kfree(local); 833 goto out; 834 } 835 836 build_smp_wc(mad_agent_priv->agent.qp, 837 send_wr->wr.wr_cqe, drslid, 838 send_wr->pkey_index, 839 send_wr->port_num, &mad_wc); 840 841 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { 842 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len 843 + mad_send_wr->send_buf.data_len 844 + sizeof(struct ib_grh); 845 } 846 847 /* No GRH for DR SMP */ 848 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, 849 (const struct ib_mad_hdr *)smp, mad_size, 850 (struct ib_mad_hdr *)mad_priv->mad, 851 &mad_size, &out_mad_pkey_index); 852 switch (ret) 853 { 854 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 855 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && 856 mad_agent_priv->agent.recv_handler) { 857 local->mad_priv = mad_priv; 858 local->recv_mad_agent = mad_agent_priv; 859 /* 860 * Reference MAD agent until receive 861 * side of local completion handled 862 */ 863 atomic_inc(&mad_agent_priv->refcount); 864 } else 865 kfree(mad_priv); 866 break; 867 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 868 kfree(mad_priv); 869 break; 870 case IB_MAD_RESULT_SUCCESS: 871 /* Treat like an incoming receive MAD */ 872 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 873 mad_agent_priv->agent.port_num); 874 if (port_priv) { 875 memcpy(mad_priv->mad, smp, mad_priv->mad_size); 876 recv_mad_agent = find_mad_agent(port_priv, 877 (const struct ib_mad_hdr *)mad_priv->mad); 878 } 879 if (!port_priv || !recv_mad_agent) { 880 /* 881 * No receiving agent so drop packet and 882 * generate send completion. 883 */ 884 kfree(mad_priv); 885 break; 886 } 887 local->mad_priv = mad_priv; 888 local->recv_mad_agent = recv_mad_agent; 889 break; 890 default: 891 kfree(mad_priv); 892 kfree(local); 893 ret = -EINVAL; 894 goto out; 895 } 896 897 local->mad_send_wr = mad_send_wr; 898 if (opa) { 899 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; 900 local->return_wc_byte_len = mad_size; 901 } 902 /* Reference MAD agent until send side of local completion handled */ 903 atomic_inc(&mad_agent_priv->refcount); 904 /* Queue local completion to local list */ 905 spin_lock_irqsave(&mad_agent_priv->lock, flags); 906 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); 907 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 908 queue_work(mad_agent_priv->qp_info->port_priv->wq, 909 &mad_agent_priv->local_work); 910 ret = 1; 911 out: 912 return ret; 913 } 914 915 static int get_pad_size(int hdr_len, int data_len, size_t mad_size) 916 { 917 int seg_size, pad; 918 919 seg_size = mad_size - hdr_len; 920 if (data_len && seg_size) { 921 pad = seg_size - data_len % seg_size; 922 return pad == seg_size ? 0 : pad; 923 } else 924 return seg_size; 925 } 926 927 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) 928 { 929 struct ib_rmpp_segment *s, *t; 930 931 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { 932 list_del(&s->list); 933 kfree(s); 934 } 935 } 936 937 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, 938 size_t mad_size, gfp_t gfp_mask) 939 { 940 struct ib_mad_send_buf *send_buf = &send_wr->send_buf; 941 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; 942 struct ib_rmpp_segment *seg = NULL; 943 int left, seg_size, pad; 944 945 send_buf->seg_size = mad_size - send_buf->hdr_len; 946 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; 947 seg_size = send_buf->seg_size; 948 pad = send_wr->pad; 949 950 /* Allocate data segments. */ 951 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { 952 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); 953 if (!seg) { 954 dev_err(&send_buf->mad_agent->device->dev, 955 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n", 956 sizeof (*seg) + seg_size, gfp_mask); 957 free_send_rmpp_list(send_wr); 958 return -ENOMEM; 959 } 960 seg->num = ++send_buf->seg_count; 961 list_add_tail(&seg->list, &send_wr->rmpp_list); 962 } 963 964 /* Zero any padding */ 965 if (pad) 966 memset(seg->data + seg_size - pad, 0, pad); 967 968 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> 969 agent.rmpp_version; 970 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 971 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 972 973 send_wr->cur_seg = container_of(send_wr->rmpp_list.next, 974 struct ib_rmpp_segment, list); 975 send_wr->last_ack_seg = send_wr->cur_seg; 976 return 0; 977 } 978 979 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) 980 { 981 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); 982 } 983 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); 984 985 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 986 u32 remote_qpn, u16 pkey_index, 987 int rmpp_active, 988 int hdr_len, int data_len, 989 gfp_t gfp_mask, 990 u8 base_version) 991 { 992 struct ib_mad_agent_private *mad_agent_priv; 993 struct ib_mad_send_wr_private *mad_send_wr; 994 int pad, message_size, ret, size; 995 void *buf; 996 size_t mad_size; 997 bool opa; 998 999 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 1000 agent); 1001 1002 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); 1003 1004 if (opa && base_version == OPA_MGMT_BASE_VERSION) 1005 mad_size = sizeof(struct opa_mad); 1006 else 1007 mad_size = sizeof(struct ib_mad); 1008 1009 pad = get_pad_size(hdr_len, data_len, mad_size); 1010 message_size = hdr_len + data_len + pad; 1011 1012 if (ib_mad_kernel_rmpp_agent(mad_agent)) { 1013 if (!rmpp_active && message_size > mad_size) 1014 return ERR_PTR(-EINVAL); 1015 } else 1016 if (rmpp_active || message_size > mad_size) 1017 return ERR_PTR(-EINVAL); 1018 1019 size = rmpp_active ? hdr_len : mad_size; 1020 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); 1021 if (!buf) 1022 return ERR_PTR(-ENOMEM); 1023 1024 mad_send_wr = buf + size; 1025 INIT_LIST_HEAD(&mad_send_wr->rmpp_list); 1026 mad_send_wr->send_buf.mad = buf; 1027 mad_send_wr->send_buf.hdr_len = hdr_len; 1028 mad_send_wr->send_buf.data_len = data_len; 1029 mad_send_wr->pad = pad; 1030 1031 mad_send_wr->mad_agent_priv = mad_agent_priv; 1032 mad_send_wr->sg_list[0].length = hdr_len; 1033 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; 1034 1035 /* OPA MADs don't have to be the full 2048 bytes */ 1036 if (opa && base_version == OPA_MGMT_BASE_VERSION && 1037 data_len < mad_size - hdr_len) 1038 mad_send_wr->sg_list[1].length = data_len; 1039 else 1040 mad_send_wr->sg_list[1].length = mad_size - hdr_len; 1041 1042 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; 1043 1044 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1045 1046 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1047 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; 1048 mad_send_wr->send_wr.wr.num_sge = 2; 1049 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; 1050 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; 1051 mad_send_wr->send_wr.remote_qpn = remote_qpn; 1052 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; 1053 mad_send_wr->send_wr.pkey_index = pkey_index; 1054 1055 if (rmpp_active) { 1056 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); 1057 if (ret) { 1058 kfree(buf); 1059 return ERR_PTR(ret); 1060 } 1061 } 1062 1063 mad_send_wr->send_buf.mad_agent = mad_agent; 1064 atomic_inc(&mad_agent_priv->refcount); 1065 return &mad_send_wr->send_buf; 1066 } 1067 EXPORT_SYMBOL(ib_create_send_mad); 1068 1069 int ib_get_mad_data_offset(u8 mgmt_class) 1070 { 1071 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 1072 return IB_MGMT_SA_HDR; 1073 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1074 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1075 (mgmt_class == IB_MGMT_CLASS_BIS)) 1076 return IB_MGMT_DEVICE_HDR; 1077 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1078 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 1079 return IB_MGMT_VENDOR_HDR; 1080 else 1081 return IB_MGMT_MAD_HDR; 1082 } 1083 EXPORT_SYMBOL(ib_get_mad_data_offset); 1084 1085 int ib_is_mad_class_rmpp(u8 mgmt_class) 1086 { 1087 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 1088 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1089 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1090 (mgmt_class == IB_MGMT_CLASS_BIS) || 1091 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1092 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 1093 return 1; 1094 return 0; 1095 } 1096 EXPORT_SYMBOL(ib_is_mad_class_rmpp); 1097 1098 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 1099 { 1100 struct ib_mad_send_wr_private *mad_send_wr; 1101 struct list_head *list; 1102 1103 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1104 send_buf); 1105 list = &mad_send_wr->cur_seg->list; 1106 1107 if (mad_send_wr->cur_seg->num < seg_num) { 1108 list_for_each_entry(mad_send_wr->cur_seg, list, list) 1109 if (mad_send_wr->cur_seg->num == seg_num) 1110 break; 1111 } else if (mad_send_wr->cur_seg->num > seg_num) { 1112 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) 1113 if (mad_send_wr->cur_seg->num == seg_num) 1114 break; 1115 } 1116 return mad_send_wr->cur_seg->data; 1117 } 1118 EXPORT_SYMBOL(ib_get_rmpp_segment); 1119 1120 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) 1121 { 1122 if (mad_send_wr->send_buf.seg_count) 1123 return ib_get_rmpp_segment(&mad_send_wr->send_buf, 1124 mad_send_wr->seg_num); 1125 else 1126 return mad_send_wr->send_buf.mad + 1127 mad_send_wr->send_buf.hdr_len; 1128 } 1129 1130 void ib_free_send_mad(struct ib_mad_send_buf *send_buf) 1131 { 1132 struct ib_mad_agent_private *mad_agent_priv; 1133 struct ib_mad_send_wr_private *mad_send_wr; 1134 1135 mad_agent_priv = container_of(send_buf->mad_agent, 1136 struct ib_mad_agent_private, agent); 1137 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1138 send_buf); 1139 1140 free_send_rmpp_list(mad_send_wr); 1141 kfree(send_buf->mad); 1142 deref_mad_agent(mad_agent_priv); 1143 } 1144 EXPORT_SYMBOL(ib_free_send_mad); 1145 1146 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 1147 { 1148 struct ib_mad_qp_info *qp_info; 1149 struct list_head *list; 1150 struct ib_send_wr *bad_send_wr; 1151 struct ib_mad_agent *mad_agent; 1152 struct ib_sge *sge; 1153 unsigned long flags; 1154 int ret; 1155 1156 /* Set WR ID to find mad_send_wr upon completion */ 1157 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1158 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1159 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1160 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1161 1162 mad_agent = mad_send_wr->send_buf.mad_agent; 1163 sge = mad_send_wr->sg_list; 1164 sge[0].addr = ib_dma_map_single(mad_agent->device, 1165 mad_send_wr->send_buf.mad, 1166 sge[0].length, 1167 DMA_TO_DEVICE); 1168 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) 1169 return -ENOMEM; 1170 1171 mad_send_wr->header_mapping = sge[0].addr; 1172 1173 sge[1].addr = ib_dma_map_single(mad_agent->device, 1174 ib_get_payload(mad_send_wr), 1175 sge[1].length, 1176 DMA_TO_DEVICE); 1177 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { 1178 ib_dma_unmap_single(mad_agent->device, 1179 mad_send_wr->header_mapping, 1180 sge[0].length, DMA_TO_DEVICE); 1181 return -ENOMEM; 1182 } 1183 mad_send_wr->payload_mapping = sge[1].addr; 1184 1185 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1186 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1187 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, 1188 &bad_send_wr); 1189 list = &qp_info->send_queue.list; 1190 } else { 1191 ret = 0; 1192 list = &qp_info->overflow_list; 1193 } 1194 1195 if (!ret) { 1196 qp_info->send_queue.count++; 1197 list_add_tail(&mad_send_wr->mad_list.list, list); 1198 } 1199 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1200 if (ret) { 1201 ib_dma_unmap_single(mad_agent->device, 1202 mad_send_wr->header_mapping, 1203 sge[0].length, DMA_TO_DEVICE); 1204 ib_dma_unmap_single(mad_agent->device, 1205 mad_send_wr->payload_mapping, 1206 sge[1].length, DMA_TO_DEVICE); 1207 } 1208 return ret; 1209 } 1210 1211 /* 1212 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 1213 * with the registered client 1214 */ 1215 int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 1216 struct ib_mad_send_buf **bad_send_buf) 1217 { 1218 struct ib_mad_agent_private *mad_agent_priv; 1219 struct ib_mad_send_buf *next_send_buf; 1220 struct ib_mad_send_wr_private *mad_send_wr; 1221 unsigned long flags; 1222 int ret = -EINVAL; 1223 1224 /* Walk list of send WRs and post each on send list */ 1225 for (; send_buf; send_buf = next_send_buf) { 1226 1227 mad_send_wr = container_of(send_buf, 1228 struct ib_mad_send_wr_private, 1229 send_buf); 1230 mad_agent_priv = mad_send_wr->mad_agent_priv; 1231 1232 if (!send_buf->mad_agent->send_handler || 1233 (send_buf->timeout_ms && 1234 !send_buf->mad_agent->recv_handler)) { 1235 ret = -EINVAL; 1236 goto error; 1237 } 1238 1239 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1240 if (mad_agent_priv->agent.rmpp_version) { 1241 ret = -EINVAL; 1242 goto error; 1243 } 1244 } 1245 1246 /* 1247 * Save pointer to next work request to post in case the 1248 * current one completes, and the user modifies the work 1249 * request associated with the completion 1250 */ 1251 next_send_buf = send_buf->next; 1252 mad_send_wr->send_wr.ah = send_buf->ah; 1253 1254 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1255 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1256 ret = handle_outgoing_dr_smp(mad_agent_priv, 1257 mad_send_wr); 1258 if (ret < 0) /* error */ 1259 goto error; 1260 else if (ret == 1) /* locally consumed */ 1261 continue; 1262 } 1263 1264 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; 1265 /* Timeout will be updated after send completes */ 1266 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); 1267 mad_send_wr->max_retries = send_buf->retries; 1268 mad_send_wr->retries_left = send_buf->retries; 1269 send_buf->retries = 0; 1270 /* Reference for work request to QP + response */ 1271 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 1272 mad_send_wr->status = IB_WC_SUCCESS; 1273 1274 /* Reference MAD agent until send completes */ 1275 atomic_inc(&mad_agent_priv->refcount); 1276 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1277 list_add_tail(&mad_send_wr->agent_list, 1278 &mad_agent_priv->send_list); 1279 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1280 1281 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1282 ret = ib_send_rmpp_mad(mad_send_wr); 1283 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) 1284 ret = ib_send_mad(mad_send_wr); 1285 } else 1286 ret = ib_send_mad(mad_send_wr); 1287 if (ret < 0) { 1288 /* Fail send request */ 1289 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1290 list_del(&mad_send_wr->agent_list); 1291 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1292 atomic_dec(&mad_agent_priv->refcount); 1293 goto error; 1294 } 1295 } 1296 return 0; 1297 error: 1298 if (bad_send_buf) 1299 *bad_send_buf = send_buf; 1300 return ret; 1301 } 1302 EXPORT_SYMBOL(ib_post_send_mad); 1303 1304 /* 1305 * ib_free_recv_mad - Returns data buffers used to receive 1306 * a MAD to the access layer 1307 */ 1308 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1309 { 1310 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; 1311 struct ib_mad_private_header *mad_priv_hdr; 1312 struct ib_mad_private *priv; 1313 struct list_head free_list; 1314 1315 INIT_LIST_HEAD(&free_list); 1316 list_splice_init(&mad_recv_wc->rmpp_list, &free_list); 1317 1318 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, 1319 &free_list, list) { 1320 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, 1321 recv_buf); 1322 mad_priv_hdr = container_of(mad_recv_wc, 1323 struct ib_mad_private_header, 1324 recv_wc); 1325 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1326 header); 1327 kfree(priv); 1328 } 1329 } 1330 EXPORT_SYMBOL(ib_free_recv_mad); 1331 1332 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, 1333 u8 rmpp_version, 1334 ib_mad_send_handler send_handler, 1335 ib_mad_recv_handler recv_handler, 1336 void *context) 1337 { 1338 return ERR_PTR(-EINVAL); /* XXX: for now */ 1339 } 1340 EXPORT_SYMBOL(ib_redirect_mad_qp); 1341 1342 int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 1343 struct ib_wc *wc) 1344 { 1345 dev_err(&mad_agent->device->dev, 1346 "ib_process_mad_wc() not implemented yet\n"); 1347 return 0; 1348 } 1349 EXPORT_SYMBOL(ib_process_mad_wc); 1350 1351 static int method_in_use(struct ib_mad_mgmt_method_table **method, 1352 struct ib_mad_reg_req *mad_reg_req) 1353 { 1354 int i; 1355 1356 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { 1357 if ((*method)->agent[i]) { 1358 pr_err("Method %d already in use\n", i); 1359 return -EINVAL; 1360 } 1361 } 1362 return 0; 1363 } 1364 1365 static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1366 { 1367 /* Allocate management method table */ 1368 *method = kzalloc(sizeof **method, GFP_ATOMIC); 1369 if (!*method) { 1370 pr_err("No memory for ib_mad_mgmt_method_table\n"); 1371 return -ENOMEM; 1372 } 1373 1374 return 0; 1375 } 1376 1377 /* 1378 * Check to see if there are any methods still in use 1379 */ 1380 static int check_method_table(struct ib_mad_mgmt_method_table *method) 1381 { 1382 int i; 1383 1384 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) 1385 if (method->agent[i]) 1386 return 1; 1387 return 0; 1388 } 1389 1390 /* 1391 * Check to see if there are any method tables for this class still in use 1392 */ 1393 static int check_class_table(struct ib_mad_mgmt_class_table *class) 1394 { 1395 int i; 1396 1397 for (i = 0; i < MAX_MGMT_CLASS; i++) 1398 if (class->method_table[i]) 1399 return 1; 1400 return 0; 1401 } 1402 1403 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) 1404 { 1405 int i; 1406 1407 for (i = 0; i < MAX_MGMT_OUI; i++) 1408 if (vendor_class->method_table[i]) 1409 return 1; 1410 return 0; 1411 } 1412 1413 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, 1414 const char *oui) 1415 { 1416 int i; 1417 1418 for (i = 0; i < MAX_MGMT_OUI; i++) 1419 /* Is there matching OUI for this vendor class ? */ 1420 if (!memcmp(vendor_class->oui[i], oui, 3)) 1421 return i; 1422 1423 return -1; 1424 } 1425 1426 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) 1427 { 1428 int i; 1429 1430 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) 1431 if (vendor->vendor_class[i]) 1432 return 1; 1433 1434 return 0; 1435 } 1436 1437 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, 1438 struct ib_mad_agent_private *agent) 1439 { 1440 int i; 1441 1442 /* Remove any methods for this mad agent */ 1443 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { 1444 if (method->agent[i] == agent) { 1445 method->agent[i] = NULL; 1446 } 1447 } 1448 } 1449 1450 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1451 struct ib_mad_agent_private *agent_priv, 1452 u8 mgmt_class) 1453 { 1454 struct ib_mad_port_private *port_priv; 1455 struct ib_mad_mgmt_class_table **class; 1456 struct ib_mad_mgmt_method_table **method; 1457 int i, ret; 1458 1459 port_priv = agent_priv->qp_info->port_priv; 1460 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1461 if (!*class) { 1462 /* Allocate management class table for "new" class version */ 1463 *class = kzalloc(sizeof **class, GFP_ATOMIC); 1464 if (!*class) { 1465 dev_err(&agent_priv->agent.device->dev, 1466 "No memory for ib_mad_mgmt_class_table\n"); 1467 ret = -ENOMEM; 1468 goto error1; 1469 } 1470 1471 /* Allocate method table for this management class */ 1472 method = &(*class)->method_table[mgmt_class]; 1473 if ((ret = allocate_method_table(method))) 1474 goto error2; 1475 } else { 1476 method = &(*class)->method_table[mgmt_class]; 1477 if (!*method) { 1478 /* Allocate method table for this management class */ 1479 if ((ret = allocate_method_table(method))) 1480 goto error1; 1481 } 1482 } 1483 1484 /* Now, make sure methods are not already in use */ 1485 if (method_in_use(method, mad_reg_req)) 1486 goto error3; 1487 1488 /* Finally, add in methods being registered */ 1489 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1490 (*method)->agent[i] = agent_priv; 1491 1492 return 0; 1493 1494 error3: 1495 /* Remove any methods for this mad agent */ 1496 remove_methods_mad_agent(*method, agent_priv); 1497 /* Now, check to see if there are any methods in use */ 1498 if (!check_method_table(*method)) { 1499 /* If not, release management method table */ 1500 kfree(*method); 1501 *method = NULL; 1502 } 1503 ret = -EINVAL; 1504 goto error1; 1505 error2: 1506 kfree(*class); 1507 *class = NULL; 1508 error1: 1509 return ret; 1510 } 1511 1512 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1513 struct ib_mad_agent_private *agent_priv) 1514 { 1515 struct ib_mad_port_private *port_priv; 1516 struct ib_mad_mgmt_vendor_class_table **vendor_table; 1517 struct ib_mad_mgmt_vendor_class_table *vendor = NULL; 1518 struct ib_mad_mgmt_vendor_class *vendor_class = NULL; 1519 struct ib_mad_mgmt_method_table **method; 1520 int i, ret = -ENOMEM; 1521 u8 vclass; 1522 1523 /* "New" vendor (with OUI) class */ 1524 vclass = vendor_class_index(mad_reg_req->mgmt_class); 1525 port_priv = agent_priv->qp_info->port_priv; 1526 vendor_table = &port_priv->version[ 1527 mad_reg_req->mgmt_class_version].vendor; 1528 if (!*vendor_table) { 1529 /* Allocate mgmt vendor class table for "new" class version */ 1530 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); 1531 if (!vendor) { 1532 dev_err(&agent_priv->agent.device->dev, 1533 "No memory for ib_mad_mgmt_vendor_class_table\n"); 1534 goto error1; 1535 } 1536 1537 *vendor_table = vendor; 1538 } 1539 if (!(*vendor_table)->vendor_class[vclass]) { 1540 /* Allocate table for this management vendor class */ 1541 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); 1542 if (!vendor_class) { 1543 dev_err(&agent_priv->agent.device->dev, 1544 "No memory for ib_mad_mgmt_vendor_class\n"); 1545 goto error2; 1546 } 1547 1548 (*vendor_table)->vendor_class[vclass] = vendor_class; 1549 } 1550 for (i = 0; i < MAX_MGMT_OUI; i++) { 1551 /* Is there matching OUI for this vendor class ? */ 1552 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], 1553 mad_reg_req->oui, 3)) { 1554 method = &(*vendor_table)->vendor_class[ 1555 vclass]->method_table[i]; 1556 BUG_ON(!*method); 1557 goto check_in_use; 1558 } 1559 } 1560 for (i = 0; i < MAX_MGMT_OUI; i++) { 1561 /* OUI slot available ? */ 1562 if (!is_vendor_oui((*vendor_table)->vendor_class[ 1563 vclass]->oui[i])) { 1564 method = &(*vendor_table)->vendor_class[ 1565 vclass]->method_table[i]; 1566 BUG_ON(*method); 1567 /* Allocate method table for this OUI */ 1568 if ((ret = allocate_method_table(method))) 1569 goto error3; 1570 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], 1571 mad_reg_req->oui, 3); 1572 goto check_in_use; 1573 } 1574 } 1575 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); 1576 goto error3; 1577 1578 check_in_use: 1579 /* Now, make sure methods are not already in use */ 1580 if (method_in_use(method, mad_reg_req)) 1581 goto error4; 1582 1583 /* Finally, add in methods being registered */ 1584 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1585 (*method)->agent[i] = agent_priv; 1586 1587 return 0; 1588 1589 error4: 1590 /* Remove any methods for this mad agent */ 1591 remove_methods_mad_agent(*method, agent_priv); 1592 /* Now, check to see if there are any methods in use */ 1593 if (!check_method_table(*method)) { 1594 /* If not, release management method table */ 1595 kfree(*method); 1596 *method = NULL; 1597 } 1598 ret = -EINVAL; 1599 error3: 1600 if (vendor_class) { 1601 (*vendor_table)->vendor_class[vclass] = NULL; 1602 kfree(vendor_class); 1603 } 1604 error2: 1605 if (vendor) { 1606 *vendor_table = NULL; 1607 kfree(vendor); 1608 } 1609 error1: 1610 return ret; 1611 } 1612 1613 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) 1614 { 1615 struct ib_mad_port_private *port_priv; 1616 struct ib_mad_mgmt_class_table *class; 1617 struct ib_mad_mgmt_method_table *method; 1618 struct ib_mad_mgmt_vendor_class_table *vendor; 1619 struct ib_mad_mgmt_vendor_class *vendor_class; 1620 int index; 1621 u8 mgmt_class; 1622 1623 /* 1624 * Was MAD registration request supplied 1625 * with original registration ? 1626 */ 1627 if (!agent_priv->reg_req) { 1628 goto out; 1629 } 1630 1631 port_priv = agent_priv->qp_info->port_priv; 1632 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); 1633 class = port_priv->version[ 1634 agent_priv->reg_req->mgmt_class_version].class; 1635 if (!class) 1636 goto vendor_check; 1637 1638 method = class->method_table[mgmt_class]; 1639 if (method) { 1640 /* Remove any methods for this mad agent */ 1641 remove_methods_mad_agent(method, agent_priv); 1642 /* Now, check to see if there are any methods still in use */ 1643 if (!check_method_table(method)) { 1644 /* If not, release management method table */ 1645 kfree(method); 1646 class->method_table[mgmt_class] = NULL; 1647 /* Any management classes left ? */ 1648 if (!check_class_table(class)) { 1649 /* If not, release management class table */ 1650 kfree(class); 1651 port_priv->version[ 1652 agent_priv->reg_req-> 1653 mgmt_class_version].class = NULL; 1654 } 1655 } 1656 } 1657 1658 vendor_check: 1659 if (!is_vendor_class(mgmt_class)) 1660 goto out; 1661 1662 /* normalize mgmt_class to vendor range 2 */ 1663 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); 1664 vendor = port_priv->version[ 1665 agent_priv->reg_req->mgmt_class_version].vendor; 1666 1667 if (!vendor) 1668 goto out; 1669 1670 vendor_class = vendor->vendor_class[mgmt_class]; 1671 if (vendor_class) { 1672 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); 1673 if (index < 0) 1674 goto out; 1675 method = vendor_class->method_table[index]; 1676 if (method) { 1677 /* Remove any methods for this mad agent */ 1678 remove_methods_mad_agent(method, agent_priv); 1679 /* 1680 * Now, check to see if there are 1681 * any methods still in use 1682 */ 1683 if (!check_method_table(method)) { 1684 /* If not, release management method table */ 1685 kfree(method); 1686 vendor_class->method_table[index] = NULL; 1687 memset(vendor_class->oui[index], 0, 3); 1688 /* Any OUIs left ? */ 1689 if (!check_vendor_class(vendor_class)) { 1690 /* If not, release vendor class table */ 1691 kfree(vendor_class); 1692 vendor->vendor_class[mgmt_class] = NULL; 1693 /* Any other vendor classes left ? */ 1694 if (!check_vendor_table(vendor)) { 1695 kfree(vendor); 1696 port_priv->version[ 1697 agent_priv->reg_req-> 1698 mgmt_class_version]. 1699 vendor = NULL; 1700 } 1701 } 1702 } 1703 } 1704 } 1705 1706 out: 1707 return; 1708 } 1709 1710 static struct ib_mad_agent_private * 1711 find_mad_agent(struct ib_mad_port_private *port_priv, 1712 const struct ib_mad_hdr *mad_hdr) 1713 { 1714 struct ib_mad_agent_private *mad_agent = NULL; 1715 unsigned long flags; 1716 1717 spin_lock_irqsave(&port_priv->reg_lock, flags); 1718 if (ib_response_mad(mad_hdr)) { 1719 u32 hi_tid; 1720 struct ib_mad_agent_private *entry; 1721 1722 /* 1723 * Routing is based on high 32 bits of transaction ID 1724 * of MAD. 1725 */ 1726 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; 1727 list_for_each_entry(entry, &port_priv->agent_list, agent_list) { 1728 if (entry->agent.hi_tid == hi_tid) { 1729 mad_agent = entry; 1730 break; 1731 } 1732 } 1733 } else { 1734 struct ib_mad_mgmt_class_table *class; 1735 struct ib_mad_mgmt_method_table *method; 1736 struct ib_mad_mgmt_vendor_class_table *vendor; 1737 struct ib_mad_mgmt_vendor_class *vendor_class; 1738 const struct ib_vendor_mad *vendor_mad; 1739 int index; 1740 1741 /* 1742 * Routing is based on version, class, and method 1743 * For "newer" vendor MADs, also based on OUI 1744 */ 1745 if (mad_hdr->class_version >= MAX_MGMT_VERSION) 1746 goto out; 1747 if (!is_vendor_class(mad_hdr->mgmt_class)) { 1748 class = port_priv->version[ 1749 mad_hdr->class_version].class; 1750 if (!class) 1751 goto out; 1752 if (convert_mgmt_class(mad_hdr->mgmt_class) >= 1753 IB_MGMT_MAX_METHODS) 1754 goto out; 1755 method = class->method_table[convert_mgmt_class( 1756 mad_hdr->mgmt_class)]; 1757 if (method) 1758 mad_agent = method->agent[mad_hdr->method & 1759 ~IB_MGMT_METHOD_RESP]; 1760 } else { 1761 vendor = port_priv->version[ 1762 mad_hdr->class_version].vendor; 1763 if (!vendor) 1764 goto out; 1765 vendor_class = vendor->vendor_class[vendor_class_index( 1766 mad_hdr->mgmt_class)]; 1767 if (!vendor_class) 1768 goto out; 1769 /* Find matching OUI */ 1770 vendor_mad = (const struct ib_vendor_mad *)mad_hdr; 1771 index = find_vendor_oui(vendor_class, vendor_mad->oui); 1772 if (index == -1) 1773 goto out; 1774 method = vendor_class->method_table[index]; 1775 if (method) { 1776 mad_agent = method->agent[mad_hdr->method & 1777 ~IB_MGMT_METHOD_RESP]; 1778 } 1779 } 1780 } 1781 1782 if (mad_agent) { 1783 if (mad_agent->agent.recv_handler) 1784 atomic_inc(&mad_agent->refcount); 1785 else { 1786 dev_notice(&port_priv->device->dev, 1787 "No receive handler for client %p on port %d\n", 1788 &mad_agent->agent, port_priv->port_num); 1789 mad_agent = NULL; 1790 } 1791 } 1792 out: 1793 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 1794 1795 return mad_agent; 1796 } 1797 1798 static int validate_mad(const struct ib_mad_hdr *mad_hdr, 1799 const struct ib_mad_qp_info *qp_info, 1800 bool opa) 1801 { 1802 int valid = 0; 1803 u32 qp_num = qp_info->qp->qp_num; 1804 1805 /* Make sure MAD base version is understood */ 1806 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && 1807 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { 1808 pr_err("MAD received with unsupported base version %d %s\n", 1809 mad_hdr->base_version, opa ? "(opa)" : ""); 1810 goto out; 1811 } 1812 1813 /* Filter SMI packets sent to other than QP0 */ 1814 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || 1815 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 1816 if (qp_num == 0) 1817 valid = 1; 1818 } else { 1819 /* CM attributes other than ClassPortInfo only use Send method */ 1820 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && 1821 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && 1822 (mad_hdr->method != IB_MGMT_METHOD_SEND)) 1823 goto out; 1824 /* Filter GSI packets sent to QP0 */ 1825 if (qp_num != 0) 1826 valid = 1; 1827 } 1828 1829 out: 1830 return valid; 1831 } 1832 1833 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, 1834 const struct ib_mad_hdr *mad_hdr) 1835 { 1836 struct ib_rmpp_mad *rmpp_mad; 1837 1838 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; 1839 return !mad_agent_priv->agent.rmpp_version || 1840 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || 1841 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 1842 IB_MGMT_RMPP_FLAG_ACTIVE) || 1843 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1844 } 1845 1846 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, 1847 const struct ib_mad_recv_wc *rwc) 1848 { 1849 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == 1850 rwc->recv_buf.mad->mad_hdr.mgmt_class; 1851 } 1852 1853 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, 1854 const struct ib_mad_send_wr_private *wr, 1855 const struct ib_mad_recv_wc *rwc ) 1856 { 1857 struct ib_ah_attr attr; 1858 u8 send_resp, rcv_resp; 1859 union ib_gid sgid; 1860 struct ib_device *device = mad_agent_priv->agent.device; 1861 u8 port_num = mad_agent_priv->agent.port_num; 1862 u8 lmc; 1863 1864 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); 1865 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); 1866 1867 if (send_resp == rcv_resp) 1868 /* both requests, or both responses. GIDs different */ 1869 return 0; 1870 1871 if (ib_query_ah(wr->send_buf.ah, &attr)) 1872 /* Assume not equal, to avoid false positives. */ 1873 return 0; 1874 1875 if (!!(attr.ah_flags & IB_AH_GRH) != 1876 !!(rwc->wc->wc_flags & IB_WC_GRH)) 1877 /* one has GID, other does not. Assume different */ 1878 return 0; 1879 1880 if (!send_resp && rcv_resp) { 1881 /* is request/response. */ 1882 if (!(attr.ah_flags & IB_AH_GRH)) { 1883 if (ib_get_cached_lmc(device, port_num, &lmc)) 1884 return 0; 1885 return (!lmc || !((attr.src_path_bits ^ 1886 rwc->wc->dlid_path_bits) & 1887 ((1 << lmc) - 1))); 1888 } else { 1889 if (ib_get_cached_gid(device, port_num, 1890 attr.grh.sgid_index, &sgid, NULL)) 1891 return 0; 1892 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 1893 16); 1894 } 1895 } 1896 1897 if (!(attr.ah_flags & IB_AH_GRH)) 1898 return attr.dlid == rwc->wc->slid; 1899 else 1900 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw, 1901 16); 1902 } 1903 1904 static inline int is_direct(u8 class) 1905 { 1906 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); 1907 } 1908 1909 struct ib_mad_send_wr_private* 1910 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, 1911 const struct ib_mad_recv_wc *wc) 1912 { 1913 struct ib_mad_send_wr_private *wr; 1914 const struct ib_mad_hdr *mad_hdr; 1915 1916 mad_hdr = &wc->recv_buf.mad->mad_hdr; 1917 1918 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { 1919 if ((wr->tid == mad_hdr->tid) && 1920 rcv_has_same_class(wr, wc) && 1921 /* 1922 * Don't check GID for direct routed MADs. 1923 * These might have permissive LIDs. 1924 */ 1925 (is_direct(mad_hdr->mgmt_class) || 1926 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1927 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1928 } 1929 1930 /* 1931 * It's possible to receive the response before we've 1932 * been notified that the send has completed 1933 */ 1934 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { 1935 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && 1936 wr->tid == mad_hdr->tid && 1937 wr->timeout && 1938 rcv_has_same_class(wr, wc) && 1939 /* 1940 * Don't check GID for direct routed MADs. 1941 * These might have permissive LIDs. 1942 */ 1943 (is_direct(mad_hdr->mgmt_class) || 1944 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1945 /* Verify request has not been canceled */ 1946 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1947 } 1948 return NULL; 1949 } 1950 1951 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) 1952 { 1953 mad_send_wr->timeout = 0; 1954 if (mad_send_wr->refcount == 1) 1955 list_move_tail(&mad_send_wr->agent_list, 1956 &mad_send_wr->mad_agent_priv->done_list); 1957 } 1958 1959 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1960 struct ib_mad_recv_wc *mad_recv_wc) 1961 { 1962 struct ib_mad_send_wr_private *mad_send_wr; 1963 struct ib_mad_send_wc mad_send_wc; 1964 unsigned long flags; 1965 1966 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1967 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1968 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1969 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 1970 mad_recv_wc); 1971 if (!mad_recv_wc) { 1972 deref_mad_agent(mad_agent_priv); 1973 return; 1974 } 1975 } 1976 1977 /* Complete corresponding request */ 1978 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { 1979 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1980 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 1981 if (!mad_send_wr) { 1982 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1983 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) 1984 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) 1985 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) 1986 & IB_MGMT_RMPP_FLAG_ACTIVE)) { 1987 /* user rmpp is in effect 1988 * and this is an active RMPP MAD 1989 */ 1990 mad_agent_priv->agent.recv_handler( 1991 &mad_agent_priv->agent, NULL, 1992 mad_recv_wc); 1993 atomic_dec(&mad_agent_priv->refcount); 1994 } else { 1995 /* not user rmpp, revert to normal behavior and 1996 * drop the mad */ 1997 ib_free_recv_mad(mad_recv_wc); 1998 deref_mad_agent(mad_agent_priv); 1999 return; 2000 } 2001 } else { 2002 ib_mark_mad_done(mad_send_wr); 2003 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2004 2005 /* Defined behavior is to complete response before request */ 2006 mad_agent_priv->agent.recv_handler( 2007 &mad_agent_priv->agent, 2008 &mad_send_wr->send_buf, 2009 mad_recv_wc); 2010 atomic_dec(&mad_agent_priv->refcount); 2011 2012 mad_send_wc.status = IB_WC_SUCCESS; 2013 mad_send_wc.vendor_err = 0; 2014 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2015 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2016 } 2017 } else { 2018 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, 2019 mad_recv_wc); 2020 deref_mad_agent(mad_agent_priv); 2021 } 2022 } 2023 2024 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, 2025 const struct ib_mad_qp_info *qp_info, 2026 const struct ib_wc *wc, 2027 int port_num, 2028 struct ib_mad_private *recv, 2029 struct ib_mad_private *response) 2030 { 2031 enum smi_forward_action retsmi; 2032 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2033 2034 if (smi_handle_dr_smp_recv(smp, 2035 rdma_cap_ib_switch(port_priv->device), 2036 port_num, 2037 port_priv->device->phys_port_cnt) == 2038 IB_SMI_DISCARD) 2039 return IB_SMI_DISCARD; 2040 2041 retsmi = smi_check_forward_dr_smp(smp); 2042 if (retsmi == IB_SMI_LOCAL) 2043 return IB_SMI_HANDLE; 2044 2045 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2046 if (smi_handle_dr_smp_send(smp, 2047 rdma_cap_ib_switch(port_priv->device), 2048 port_num) == IB_SMI_DISCARD) 2049 return IB_SMI_DISCARD; 2050 2051 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2052 return IB_SMI_DISCARD; 2053 } else if (rdma_cap_ib_switch(port_priv->device)) { 2054 /* forward case for switches */ 2055 memcpy(response, recv, mad_priv_size(response)); 2056 response->header.recv_wc.wc = &response->header.wc; 2057 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2058 response->header.recv_wc.recv_buf.grh = &response->grh; 2059 2060 agent_send_response((const struct ib_mad_hdr *)response->mad, 2061 &response->grh, wc, 2062 port_priv->device, 2063 smi_get_fwd_port(smp), 2064 qp_info->qp->qp_num, 2065 response->mad_size, 2066 false); 2067 2068 return IB_SMI_DISCARD; 2069 } 2070 return IB_SMI_HANDLE; 2071 } 2072 2073 static bool generate_unmatched_resp(const struct ib_mad_private *recv, 2074 struct ib_mad_private *response, 2075 size_t *resp_len, bool opa) 2076 { 2077 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; 2078 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; 2079 2080 if (recv_hdr->method == IB_MGMT_METHOD_GET || 2081 recv_hdr->method == IB_MGMT_METHOD_SET) { 2082 memcpy(response, recv, mad_priv_size(response)); 2083 response->header.recv_wc.wc = &response->header.wc; 2084 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2085 response->header.recv_wc.recv_buf.grh = &response->grh; 2086 resp_hdr->method = IB_MGMT_METHOD_GET_RESP; 2087 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 2088 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2089 resp_hdr->status |= IB_SMP_DIRECTION; 2090 2091 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { 2092 if (recv_hdr->mgmt_class == 2093 IB_MGMT_CLASS_SUBN_LID_ROUTED || 2094 recv_hdr->mgmt_class == 2095 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2096 *resp_len = opa_get_smp_header_size( 2097 (struct opa_smp *)recv->mad); 2098 else 2099 *resp_len = sizeof(struct ib_mad_hdr); 2100 } 2101 2102 return true; 2103 } else { 2104 return false; 2105 } 2106 } 2107 2108 static enum smi_action 2109 handle_opa_smi(struct ib_mad_port_private *port_priv, 2110 struct ib_mad_qp_info *qp_info, 2111 struct ib_wc *wc, 2112 int port_num, 2113 struct ib_mad_private *recv, 2114 struct ib_mad_private *response) 2115 { 2116 enum smi_forward_action retsmi; 2117 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2118 2119 if (opa_smi_handle_dr_smp_recv(smp, 2120 rdma_cap_ib_switch(port_priv->device), 2121 port_num, 2122 port_priv->device->phys_port_cnt) == 2123 IB_SMI_DISCARD) 2124 return IB_SMI_DISCARD; 2125 2126 retsmi = opa_smi_check_forward_dr_smp(smp); 2127 if (retsmi == IB_SMI_LOCAL) 2128 return IB_SMI_HANDLE; 2129 2130 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2131 if (opa_smi_handle_dr_smp_send(smp, 2132 rdma_cap_ib_switch(port_priv->device), 2133 port_num) == IB_SMI_DISCARD) 2134 return IB_SMI_DISCARD; 2135 2136 if (opa_smi_check_local_smp(smp, port_priv->device) == 2137 IB_SMI_DISCARD) 2138 return IB_SMI_DISCARD; 2139 2140 } else if (rdma_cap_ib_switch(port_priv->device)) { 2141 /* forward case for switches */ 2142 memcpy(response, recv, mad_priv_size(response)); 2143 response->header.recv_wc.wc = &response->header.wc; 2144 response->header.recv_wc.recv_buf.opa_mad = 2145 (struct opa_mad *)response->mad; 2146 response->header.recv_wc.recv_buf.grh = &response->grh; 2147 2148 agent_send_response((const struct ib_mad_hdr *)response->mad, 2149 &response->grh, wc, 2150 port_priv->device, 2151 opa_smi_get_fwd_port(smp), 2152 qp_info->qp->qp_num, 2153 recv->header.wc.byte_len, 2154 true); 2155 2156 return IB_SMI_DISCARD; 2157 } 2158 2159 return IB_SMI_HANDLE; 2160 } 2161 2162 static enum smi_action 2163 handle_smi(struct ib_mad_port_private *port_priv, 2164 struct ib_mad_qp_info *qp_info, 2165 struct ib_wc *wc, 2166 int port_num, 2167 struct ib_mad_private *recv, 2168 struct ib_mad_private *response, 2169 bool opa) 2170 { 2171 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; 2172 2173 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && 2174 mad_hdr->class_version == OPA_SMI_CLASS_VERSION) 2175 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, 2176 response); 2177 2178 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); 2179 } 2180 2181 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) 2182 { 2183 struct ib_mad_port_private *port_priv = cq->cq_context; 2184 struct ib_mad_list_head *mad_list = 2185 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2186 struct ib_mad_qp_info *qp_info; 2187 struct ib_mad_private_header *mad_priv_hdr; 2188 struct ib_mad_private *recv, *response = NULL; 2189 struct ib_mad_agent_private *mad_agent; 2190 int port_num; 2191 int ret = IB_MAD_RESULT_SUCCESS; 2192 size_t mad_size; 2193 u16 resp_mad_pkey_index = 0; 2194 bool opa; 2195 2196 if (list_empty_careful(&port_priv->port_list)) 2197 return; 2198 2199 if (wc->status != IB_WC_SUCCESS) { 2200 /* 2201 * Receive errors indicate that the QP has entered the error 2202 * state - error handling/shutdown code will cleanup 2203 */ 2204 return; 2205 } 2206 2207 qp_info = mad_list->mad_queue->qp_info; 2208 dequeue_mad(mad_list); 2209 2210 opa = rdma_cap_opa_mad(qp_info->port_priv->device, 2211 qp_info->port_priv->port_num); 2212 2213 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 2214 mad_list); 2215 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 2216 ib_dma_unmap_single(port_priv->device, 2217 recv->header.mapping, 2218 mad_priv_dma_size(recv), 2219 DMA_FROM_DEVICE); 2220 2221 /* Setup MAD receive work completion from "normal" work completion */ 2222 recv->header.wc = *wc; 2223 recv->header.recv_wc.wc = &recv->header.wc; 2224 2225 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { 2226 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); 2227 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2228 } else { 2229 recv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2230 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2231 } 2232 2233 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; 2234 recv->header.recv_wc.recv_buf.grh = &recv->grh; 2235 2236 if (atomic_read(&qp_info->snoop_count)) 2237 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); 2238 2239 /* Validate MAD */ 2240 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) 2241 goto out; 2242 2243 mad_size = recv->mad_size; 2244 response = alloc_mad_private(mad_size, GFP_KERNEL); 2245 if (!response) { 2246 dev_err(&port_priv->device->dev, 2247 "%s: no memory for response buffer\n", __func__); 2248 goto out; 2249 } 2250 2251 if (rdma_cap_ib_switch(port_priv->device)) 2252 port_num = wc->port_num; 2253 else 2254 port_num = port_priv->port_num; 2255 2256 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == 2257 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 2258 if (handle_smi(port_priv, qp_info, wc, port_num, recv, 2259 response, opa) 2260 == IB_SMI_DISCARD) 2261 goto out; 2262 } 2263 2264 /* Give driver "right of first refusal" on incoming MAD */ 2265 if (port_priv->device->process_mad) { 2266 ret = port_priv->device->process_mad(port_priv->device, 0, 2267 port_priv->port_num, 2268 wc, &recv->grh, 2269 (const struct ib_mad_hdr *)recv->mad, 2270 recv->mad_size, 2271 (struct ib_mad_hdr *)response->mad, 2272 &mad_size, &resp_mad_pkey_index); 2273 2274 if (opa) 2275 wc->pkey_index = resp_mad_pkey_index; 2276 2277 if (ret & IB_MAD_RESULT_SUCCESS) { 2278 if (ret & IB_MAD_RESULT_CONSUMED) 2279 goto out; 2280 if (ret & IB_MAD_RESULT_REPLY) { 2281 agent_send_response((const struct ib_mad_hdr *)response->mad, 2282 &recv->grh, wc, 2283 port_priv->device, 2284 port_num, 2285 qp_info->qp->qp_num, 2286 mad_size, opa); 2287 goto out; 2288 } 2289 } 2290 } 2291 2292 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); 2293 if (mad_agent) { 2294 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); 2295 /* 2296 * recv is freed up in error cases in ib_mad_complete_recv 2297 * or via recv_handler in ib_mad_complete_recv() 2298 */ 2299 recv = NULL; 2300 } else if ((ret & IB_MAD_RESULT_SUCCESS) && 2301 generate_unmatched_resp(recv, response, &mad_size, opa)) { 2302 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, 2303 port_priv->device, port_num, 2304 qp_info->qp->qp_num, mad_size, opa); 2305 } 2306 2307 out: 2308 /* Post another receive request for this QP */ 2309 if (response) { 2310 ib_mad_post_receive_mads(qp_info, response); 2311 kfree(recv); 2312 } else 2313 ib_mad_post_receive_mads(qp_info, recv); 2314 } 2315 2316 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) 2317 { 2318 struct ib_mad_send_wr_private *mad_send_wr; 2319 unsigned long delay; 2320 2321 if (list_empty(&mad_agent_priv->wait_list)) { 2322 cancel_delayed_work(&mad_agent_priv->timed_work); 2323 } else { 2324 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2325 struct ib_mad_send_wr_private, 2326 agent_list); 2327 2328 if (time_after(mad_agent_priv->timeout, 2329 mad_send_wr->timeout)) { 2330 mad_agent_priv->timeout = mad_send_wr->timeout; 2331 delay = mad_send_wr->timeout - jiffies; 2332 if ((long)delay <= 0) 2333 delay = 1; 2334 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2335 &mad_agent_priv->timed_work, delay); 2336 } 2337 } 2338 } 2339 2340 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) 2341 { 2342 struct ib_mad_agent_private *mad_agent_priv; 2343 struct ib_mad_send_wr_private *temp_mad_send_wr; 2344 struct list_head *list_item; 2345 unsigned long delay; 2346 2347 mad_agent_priv = mad_send_wr->mad_agent_priv; 2348 list_del(&mad_send_wr->agent_list); 2349 2350 delay = mad_send_wr->timeout; 2351 mad_send_wr->timeout += jiffies; 2352 2353 if (delay) { 2354 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { 2355 temp_mad_send_wr = list_entry(list_item, 2356 struct ib_mad_send_wr_private, 2357 agent_list); 2358 if (time_after(mad_send_wr->timeout, 2359 temp_mad_send_wr->timeout)) 2360 break; 2361 } 2362 } 2363 else 2364 list_item = &mad_agent_priv->wait_list; 2365 list_add(&mad_send_wr->agent_list, list_item); 2366 2367 /* Reschedule a work item if we have a shorter timeout */ 2368 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) 2369 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2370 &mad_agent_priv->timed_work, delay); 2371 } 2372 2373 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2374 int timeout_ms) 2375 { 2376 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2377 wait_for_response(mad_send_wr); 2378 } 2379 2380 /* 2381 * Process a send work completion 2382 */ 2383 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 2384 struct ib_mad_send_wc *mad_send_wc) 2385 { 2386 struct ib_mad_agent_private *mad_agent_priv; 2387 unsigned long flags; 2388 int ret; 2389 2390 mad_agent_priv = mad_send_wr->mad_agent_priv; 2391 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2392 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 2393 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); 2394 if (ret == IB_RMPP_RESULT_CONSUMED) 2395 goto done; 2396 } else 2397 ret = IB_RMPP_RESULT_UNHANDLED; 2398 2399 if (mad_send_wc->status != IB_WC_SUCCESS && 2400 mad_send_wr->status == IB_WC_SUCCESS) { 2401 mad_send_wr->status = mad_send_wc->status; 2402 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2403 } 2404 2405 if (--mad_send_wr->refcount > 0) { 2406 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && 2407 mad_send_wr->status == IB_WC_SUCCESS) { 2408 wait_for_response(mad_send_wr); 2409 } 2410 goto done; 2411 } 2412 2413 /* Remove send from MAD agent and notify client of completion */ 2414 list_del(&mad_send_wr->agent_list); 2415 adjust_timeout(mad_agent_priv); 2416 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2417 2418 if (mad_send_wr->status != IB_WC_SUCCESS ) 2419 mad_send_wc->status = mad_send_wr->status; 2420 if (ret == IB_RMPP_RESULT_INTERNAL) 2421 ib_rmpp_send_handler(mad_send_wc); 2422 else 2423 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2424 mad_send_wc); 2425 2426 /* Release reference on agent taken when sending */ 2427 deref_mad_agent(mad_agent_priv); 2428 return; 2429 done: 2430 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2431 } 2432 2433 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) 2434 { 2435 struct ib_mad_port_private *port_priv = cq->cq_context; 2436 struct ib_mad_list_head *mad_list = 2437 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2438 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; 2439 struct ib_mad_qp_info *qp_info; 2440 struct ib_mad_queue *send_queue; 2441 struct ib_send_wr *bad_send_wr; 2442 struct ib_mad_send_wc mad_send_wc; 2443 unsigned long flags; 2444 int ret; 2445 2446 if (list_empty_careful(&port_priv->port_list)) 2447 return; 2448 2449 if (wc->status != IB_WC_SUCCESS) { 2450 if (!ib_mad_send_error(port_priv, wc)) 2451 return; 2452 } 2453 2454 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2455 mad_list); 2456 send_queue = mad_list->mad_queue; 2457 qp_info = send_queue->qp_info; 2458 2459 retry: 2460 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2461 mad_send_wr->header_mapping, 2462 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2463 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2464 mad_send_wr->payload_mapping, 2465 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2466 queued_send_wr = NULL; 2467 spin_lock_irqsave(&send_queue->lock, flags); 2468 list_del(&mad_list->list); 2469 2470 /* Move queued send to the send queue */ 2471 if (send_queue->count-- > send_queue->max_active) { 2472 mad_list = container_of(qp_info->overflow_list.next, 2473 struct ib_mad_list_head, list); 2474 queued_send_wr = container_of(mad_list, 2475 struct ib_mad_send_wr_private, 2476 mad_list); 2477 list_move_tail(&mad_list->list, &send_queue->list); 2478 } 2479 spin_unlock_irqrestore(&send_queue->lock, flags); 2480 2481 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2482 mad_send_wc.status = wc->status; 2483 mad_send_wc.vendor_err = wc->vendor_err; 2484 if (atomic_read(&qp_info->snoop_count)) 2485 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, 2486 IB_MAD_SNOOP_SEND_COMPLETIONS); 2487 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2488 2489 if (queued_send_wr) { 2490 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, 2491 &bad_send_wr); 2492 if (ret) { 2493 dev_err(&port_priv->device->dev, 2494 "ib_post_send failed: %d\n", ret); 2495 mad_send_wr = queued_send_wr; 2496 wc->status = IB_WC_LOC_QP_OP_ERR; 2497 goto retry; 2498 } 2499 } 2500 } 2501 2502 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) 2503 { 2504 struct ib_mad_send_wr_private *mad_send_wr; 2505 struct ib_mad_list_head *mad_list; 2506 unsigned long flags; 2507 2508 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 2509 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { 2510 mad_send_wr = container_of(mad_list, 2511 struct ib_mad_send_wr_private, 2512 mad_list); 2513 mad_send_wr->retry = 1; 2514 } 2515 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 2516 } 2517 2518 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 2519 struct ib_wc *wc) 2520 { 2521 struct ib_mad_list_head *mad_list = 2522 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2523 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; 2524 struct ib_mad_send_wr_private *mad_send_wr; 2525 int ret; 2526 2527 /* 2528 * Send errors will transition the QP to SQE - move 2529 * QP to RTS and repost flushed work requests 2530 */ 2531 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2532 mad_list); 2533 if (wc->status == IB_WC_WR_FLUSH_ERR) { 2534 if (mad_send_wr->retry) { 2535 /* Repost send */ 2536 struct ib_send_wr *bad_send_wr; 2537 2538 mad_send_wr->retry = 0; 2539 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, 2540 &bad_send_wr); 2541 if (!ret) 2542 return false; 2543 } 2544 } else { 2545 struct ib_qp_attr *attr; 2546 2547 /* Transition QP to RTS and fail offending send */ 2548 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2549 if (attr) { 2550 attr->qp_state = IB_QPS_RTS; 2551 attr->cur_qp_state = IB_QPS_SQE; 2552 ret = ib_modify_qp(qp_info->qp, attr, 2553 IB_QP_STATE | IB_QP_CUR_STATE); 2554 kfree(attr); 2555 if (ret) 2556 dev_err(&port_priv->device->dev, 2557 "%s - ib_modify_qp to RTS: %d\n", 2558 __func__, ret); 2559 else 2560 mark_sends_for_retry(qp_info); 2561 } 2562 } 2563 2564 return true; 2565 } 2566 2567 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) 2568 { 2569 unsigned long flags; 2570 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; 2571 struct ib_mad_send_wc mad_send_wc; 2572 struct list_head cancel_list; 2573 2574 INIT_LIST_HEAD(&cancel_list); 2575 2576 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2577 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2578 &mad_agent_priv->send_list, agent_list) { 2579 if (mad_send_wr->status == IB_WC_SUCCESS) { 2580 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2581 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2582 } 2583 } 2584 2585 /* Empty wait list to prevent receives from finding a request */ 2586 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); 2587 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2588 2589 /* Report all cancelled requests */ 2590 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 2591 mad_send_wc.vendor_err = 0; 2592 2593 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2594 &cancel_list, agent_list) { 2595 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2596 list_del(&mad_send_wr->agent_list); 2597 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2598 &mad_send_wc); 2599 atomic_dec(&mad_agent_priv->refcount); 2600 } 2601 } 2602 2603 static struct ib_mad_send_wr_private* 2604 find_send_wr(struct ib_mad_agent_private *mad_agent_priv, 2605 struct ib_mad_send_buf *send_buf) 2606 { 2607 struct ib_mad_send_wr_private *mad_send_wr; 2608 2609 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2610 agent_list) { 2611 if (&mad_send_wr->send_buf == send_buf) 2612 return mad_send_wr; 2613 } 2614 2615 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2616 agent_list) { 2617 if (is_rmpp_data_mad(mad_agent_priv, 2618 mad_send_wr->send_buf.mad) && 2619 &mad_send_wr->send_buf == send_buf) 2620 return mad_send_wr; 2621 } 2622 return NULL; 2623 } 2624 2625 int ib_modify_mad(struct ib_mad_agent *mad_agent, 2626 struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2627 { 2628 struct ib_mad_agent_private *mad_agent_priv; 2629 struct ib_mad_send_wr_private *mad_send_wr; 2630 unsigned long flags; 2631 int active; 2632 2633 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2634 agent); 2635 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2636 mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2637 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2638 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2639 return -EINVAL; 2640 } 2641 2642 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); 2643 if (!timeout_ms) { 2644 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2645 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2646 } 2647 2648 mad_send_wr->send_buf.timeout_ms = timeout_ms; 2649 if (active) 2650 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2651 else 2652 ib_reset_mad_timeout(mad_send_wr, timeout_ms); 2653 2654 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2655 return 0; 2656 } 2657 EXPORT_SYMBOL(ib_modify_mad); 2658 2659 void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2660 struct ib_mad_send_buf *send_buf) 2661 { 2662 ib_modify_mad(mad_agent, send_buf, 0); 2663 } 2664 EXPORT_SYMBOL(ib_cancel_mad); 2665 2666 static void local_completions(struct work_struct *work) 2667 { 2668 struct ib_mad_agent_private *mad_agent_priv; 2669 struct ib_mad_local_private *local; 2670 struct ib_mad_agent_private *recv_mad_agent; 2671 unsigned long flags; 2672 int free_mad; 2673 struct ib_wc wc; 2674 struct ib_mad_send_wc mad_send_wc; 2675 bool opa; 2676 2677 mad_agent_priv = 2678 container_of(work, struct ib_mad_agent_private, local_work); 2679 2680 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 2681 mad_agent_priv->qp_info->port_priv->port_num); 2682 2683 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2684 while (!list_empty(&mad_agent_priv->local_list)) { 2685 local = list_entry(mad_agent_priv->local_list.next, 2686 struct ib_mad_local_private, 2687 completion_list); 2688 list_del(&local->completion_list); 2689 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2690 free_mad = 0; 2691 if (local->mad_priv) { 2692 u8 base_version; 2693 recv_mad_agent = local->recv_mad_agent; 2694 if (!recv_mad_agent) { 2695 dev_err(&mad_agent_priv->agent.device->dev, 2696 "No receive MAD agent for local completion\n"); 2697 free_mad = 1; 2698 goto local_send_completion; 2699 } 2700 2701 /* 2702 * Defined behavior is to complete response 2703 * before request 2704 */ 2705 build_smp_wc(recv_mad_agent->agent.qp, 2706 local->mad_send_wr->send_wr.wr.wr_cqe, 2707 be16_to_cpu(IB_LID_PERMISSIVE), 2708 local->mad_send_wr->send_wr.pkey_index, 2709 recv_mad_agent->agent.port_num, &wc); 2710 2711 local->mad_priv->header.recv_wc.wc = &wc; 2712 2713 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; 2714 if (opa && base_version == OPA_MGMT_BASE_VERSION) { 2715 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; 2716 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2717 } else { 2718 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2719 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2720 } 2721 2722 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); 2723 list_add(&local->mad_priv->header.recv_wc.recv_buf.list, 2724 &local->mad_priv->header.recv_wc.rmpp_list); 2725 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2726 local->mad_priv->header.recv_wc.recv_buf.mad = 2727 (struct ib_mad *)local->mad_priv->mad; 2728 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) 2729 snoop_recv(recv_mad_agent->qp_info, 2730 &local->mad_priv->header.recv_wc, 2731 IB_MAD_SNOOP_RECVS); 2732 recv_mad_agent->agent.recv_handler( 2733 &recv_mad_agent->agent, 2734 &local->mad_send_wr->send_buf, 2735 &local->mad_priv->header.recv_wc); 2736 spin_lock_irqsave(&recv_mad_agent->lock, flags); 2737 atomic_dec(&recv_mad_agent->refcount); 2738 spin_unlock_irqrestore(&recv_mad_agent->lock, flags); 2739 } 2740 2741 local_send_completion: 2742 /* Complete send */ 2743 mad_send_wc.status = IB_WC_SUCCESS; 2744 mad_send_wc.vendor_err = 0; 2745 mad_send_wc.send_buf = &local->mad_send_wr->send_buf; 2746 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2747 snoop_send(mad_agent_priv->qp_info, 2748 &local->mad_send_wr->send_buf, 2749 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); 2750 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2751 &mad_send_wc); 2752 2753 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2754 atomic_dec(&mad_agent_priv->refcount); 2755 if (free_mad) 2756 kfree(local->mad_priv); 2757 kfree(local); 2758 } 2759 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2760 } 2761 2762 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) 2763 { 2764 int ret; 2765 2766 if (!mad_send_wr->retries_left) 2767 return -ETIMEDOUT; 2768 2769 mad_send_wr->retries_left--; 2770 mad_send_wr->send_buf.retries++; 2771 2772 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 2773 2774 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { 2775 ret = ib_retry_rmpp(mad_send_wr); 2776 switch (ret) { 2777 case IB_RMPP_RESULT_UNHANDLED: 2778 ret = ib_send_mad(mad_send_wr); 2779 break; 2780 case IB_RMPP_RESULT_CONSUMED: 2781 ret = 0; 2782 break; 2783 default: 2784 ret = -ECOMM; 2785 break; 2786 } 2787 } else 2788 ret = ib_send_mad(mad_send_wr); 2789 2790 if (!ret) { 2791 mad_send_wr->refcount++; 2792 list_add_tail(&mad_send_wr->agent_list, 2793 &mad_send_wr->mad_agent_priv->send_list); 2794 } 2795 return ret; 2796 } 2797 2798 static void timeout_sends(struct work_struct *work) 2799 { 2800 struct ib_mad_agent_private *mad_agent_priv; 2801 struct ib_mad_send_wr_private *mad_send_wr; 2802 struct ib_mad_send_wc mad_send_wc; 2803 unsigned long flags, delay; 2804 2805 mad_agent_priv = container_of(work, struct ib_mad_agent_private, 2806 timed_work.work); 2807 mad_send_wc.vendor_err = 0; 2808 2809 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2810 while (!list_empty(&mad_agent_priv->wait_list)) { 2811 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2812 struct ib_mad_send_wr_private, 2813 agent_list); 2814 2815 if (time_after(mad_send_wr->timeout, jiffies)) { 2816 delay = mad_send_wr->timeout - jiffies; 2817 if ((long)delay <= 0) 2818 delay = 1; 2819 queue_delayed_work(mad_agent_priv->qp_info-> 2820 port_priv->wq, 2821 &mad_agent_priv->timed_work, delay); 2822 break; 2823 } 2824 2825 list_del(&mad_send_wr->agent_list); 2826 if (mad_send_wr->status == IB_WC_SUCCESS && 2827 !retry_send(mad_send_wr)) 2828 continue; 2829 2830 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2831 2832 if (mad_send_wr->status == IB_WC_SUCCESS) 2833 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2834 else 2835 mad_send_wc.status = mad_send_wr->status; 2836 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2837 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2838 &mad_send_wc); 2839 2840 atomic_dec(&mad_agent_priv->refcount); 2841 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2842 } 2843 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2844 } 2845 2846 /* 2847 * Allocate receive MADs and post receive WRs for them 2848 */ 2849 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 2850 struct ib_mad_private *mad) 2851 { 2852 unsigned long flags; 2853 int post, ret; 2854 struct ib_mad_private *mad_priv; 2855 struct ib_sge sg_list; 2856 struct ib_recv_wr recv_wr, *bad_recv_wr; 2857 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; 2858 2859 /* Initialize common scatter list fields */ 2860 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; 2861 2862 /* Initialize common receive WR fields */ 2863 recv_wr.next = NULL; 2864 recv_wr.sg_list = &sg_list; 2865 recv_wr.num_sge = 1; 2866 2867 do { 2868 /* Allocate and map receive buffer */ 2869 if (mad) { 2870 mad_priv = mad; 2871 mad = NULL; 2872 } else { 2873 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), 2874 GFP_ATOMIC); 2875 if (!mad_priv) { 2876 dev_err(&qp_info->port_priv->device->dev, 2877 "No memory for receive buffer\n"); 2878 ret = -ENOMEM; 2879 break; 2880 } 2881 } 2882 sg_list.length = mad_priv_dma_size(mad_priv); 2883 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, 2884 &mad_priv->grh, 2885 mad_priv_dma_size(mad_priv), 2886 DMA_FROM_DEVICE); 2887 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2888 sg_list.addr))) { 2889 ret = -ENOMEM; 2890 break; 2891 } 2892 mad_priv->header.mapping = sg_list.addr; 2893 mad_priv->header.mad_list.mad_queue = recv_queue; 2894 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; 2895 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; 2896 2897 /* Post receive WR */ 2898 spin_lock_irqsave(&recv_queue->lock, flags); 2899 post = (++recv_queue->count < recv_queue->max_active); 2900 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); 2901 spin_unlock_irqrestore(&recv_queue->lock, flags); 2902 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); 2903 if (ret) { 2904 spin_lock_irqsave(&recv_queue->lock, flags); 2905 list_del(&mad_priv->header.mad_list.list); 2906 recv_queue->count--; 2907 spin_unlock_irqrestore(&recv_queue->lock, flags); 2908 ib_dma_unmap_single(qp_info->port_priv->device, 2909 mad_priv->header.mapping, 2910 mad_priv_dma_size(mad_priv), 2911 DMA_FROM_DEVICE); 2912 kfree(mad_priv); 2913 dev_err(&qp_info->port_priv->device->dev, 2914 "ib_post_recv failed: %d\n", ret); 2915 break; 2916 } 2917 } while (post); 2918 2919 return ret; 2920 } 2921 2922 /* 2923 * Return all the posted receive MADs 2924 */ 2925 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) 2926 { 2927 struct ib_mad_private_header *mad_priv_hdr; 2928 struct ib_mad_private *recv; 2929 struct ib_mad_list_head *mad_list; 2930 2931 if (!qp_info->qp) 2932 return; 2933 2934 while (!list_empty(&qp_info->recv_queue.list)) { 2935 2936 mad_list = list_entry(qp_info->recv_queue.list.next, 2937 struct ib_mad_list_head, list); 2938 mad_priv_hdr = container_of(mad_list, 2939 struct ib_mad_private_header, 2940 mad_list); 2941 recv = container_of(mad_priv_hdr, struct ib_mad_private, 2942 header); 2943 2944 /* Remove from posted receive MAD list */ 2945 list_del(&mad_list->list); 2946 2947 ib_dma_unmap_single(qp_info->port_priv->device, 2948 recv->header.mapping, 2949 mad_priv_dma_size(recv), 2950 DMA_FROM_DEVICE); 2951 kfree(recv); 2952 } 2953 2954 qp_info->recv_queue.count = 0; 2955 } 2956 2957 /* 2958 * Start the port 2959 */ 2960 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) 2961 { 2962 int ret, i; 2963 struct ib_qp_attr *attr; 2964 struct ib_qp *qp; 2965 u16 pkey_index; 2966 2967 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2968 if (!attr) { 2969 dev_err(&port_priv->device->dev, 2970 "Couldn't kmalloc ib_qp_attr\n"); 2971 return -ENOMEM; 2972 } 2973 2974 ret = ib_find_pkey(port_priv->device, port_priv->port_num, 2975 IB_DEFAULT_PKEY_FULL, &pkey_index); 2976 if (ret) 2977 pkey_index = 0; 2978 2979 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2980 qp = port_priv->qp_info[i].qp; 2981 if (!qp) 2982 continue; 2983 2984 /* 2985 * PKey index for QP1 is irrelevant but 2986 * one is needed for the Reset to Init transition 2987 */ 2988 attr->qp_state = IB_QPS_INIT; 2989 attr->pkey_index = pkey_index; 2990 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 2991 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 2992 IB_QP_PKEY_INDEX | IB_QP_QKEY); 2993 if (ret) { 2994 dev_err(&port_priv->device->dev, 2995 "Couldn't change QP%d state to INIT: %d\n", 2996 i, ret); 2997 goto out; 2998 } 2999 3000 attr->qp_state = IB_QPS_RTR; 3001 ret = ib_modify_qp(qp, attr, IB_QP_STATE); 3002 if (ret) { 3003 dev_err(&port_priv->device->dev, 3004 "Couldn't change QP%d state to RTR: %d\n", 3005 i, ret); 3006 goto out; 3007 } 3008 3009 attr->qp_state = IB_QPS_RTS; 3010 attr->sq_psn = IB_MAD_SEND_Q_PSN; 3011 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); 3012 if (ret) { 3013 dev_err(&port_priv->device->dev, 3014 "Couldn't change QP%d state to RTS: %d\n", 3015 i, ret); 3016 goto out; 3017 } 3018 } 3019 3020 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 3021 if (ret) { 3022 dev_err(&port_priv->device->dev, 3023 "Failed to request completion notification: %d\n", 3024 ret); 3025 goto out; 3026 } 3027 3028 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 3029 if (!port_priv->qp_info[i].qp) 3030 continue; 3031 3032 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); 3033 if (ret) { 3034 dev_err(&port_priv->device->dev, 3035 "Couldn't post receive WRs\n"); 3036 goto out; 3037 } 3038 } 3039 out: 3040 kfree(attr); 3041 return ret; 3042 } 3043 3044 static void qp_event_handler(struct ib_event *event, void *qp_context) 3045 { 3046 struct ib_mad_qp_info *qp_info = qp_context; 3047 3048 /* It's worse than that! He's dead, Jim! */ 3049 dev_err(&qp_info->port_priv->device->dev, 3050 "Fatal error (%d) on MAD QP (%d)\n", 3051 event->event, qp_info->qp->qp_num); 3052 } 3053 3054 static void init_mad_queue(struct ib_mad_qp_info *qp_info, 3055 struct ib_mad_queue *mad_queue) 3056 { 3057 mad_queue->qp_info = qp_info; 3058 mad_queue->count = 0; 3059 spin_lock_init(&mad_queue->lock); 3060 INIT_LIST_HEAD(&mad_queue->list); 3061 } 3062 3063 static void init_mad_qp(struct ib_mad_port_private *port_priv, 3064 struct ib_mad_qp_info *qp_info) 3065 { 3066 qp_info->port_priv = port_priv; 3067 init_mad_queue(qp_info, &qp_info->send_queue); 3068 init_mad_queue(qp_info, &qp_info->recv_queue); 3069 INIT_LIST_HEAD(&qp_info->overflow_list); 3070 spin_lock_init(&qp_info->snoop_lock); 3071 qp_info->snoop_table = NULL; 3072 qp_info->snoop_table_size = 0; 3073 atomic_set(&qp_info->snoop_count, 0); 3074 } 3075 3076 static int create_mad_qp(struct ib_mad_qp_info *qp_info, 3077 enum ib_qp_type qp_type) 3078 { 3079 struct ib_qp_init_attr qp_init_attr; 3080 int ret; 3081 3082 memset(&qp_init_attr, 0, sizeof qp_init_attr); 3083 qp_init_attr.send_cq = qp_info->port_priv->cq; 3084 qp_init_attr.recv_cq = qp_info->port_priv->cq; 3085 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 3086 qp_init_attr.cap.max_send_wr = mad_sendq_size; 3087 qp_init_attr.cap.max_recv_wr = mad_recvq_size; 3088 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 3089 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 3090 qp_init_attr.qp_type = qp_type; 3091 qp_init_attr.port_num = qp_info->port_priv->port_num; 3092 qp_init_attr.qp_context = qp_info; 3093 qp_init_attr.event_handler = qp_event_handler; 3094 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); 3095 if (IS_ERR(qp_info->qp)) { 3096 dev_err(&qp_info->port_priv->device->dev, 3097 "Couldn't create ib_mad QP%d\n", 3098 get_spl_qp_index(qp_type)); 3099 ret = PTR_ERR(qp_info->qp); 3100 goto error; 3101 } 3102 /* Use minimum queue sizes unless the CQ is resized */ 3103 qp_info->send_queue.max_active = mad_sendq_size; 3104 qp_info->recv_queue.max_active = mad_recvq_size; 3105 return 0; 3106 3107 error: 3108 return ret; 3109 } 3110 3111 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) 3112 { 3113 if (!qp_info->qp) 3114 return; 3115 3116 ib_destroy_qp(qp_info->qp); 3117 kfree(qp_info->snoop_table); 3118 } 3119 3120 /* 3121 * Open the port 3122 * Create the QP, PD, MR, and CQ if needed 3123 */ 3124 static int ib_mad_port_open(struct ib_device *device, 3125 int port_num) 3126 { 3127 int ret, cq_size; 3128 struct ib_mad_port_private *port_priv; 3129 unsigned long flags; 3130 char name[sizeof "ib_mad123"]; 3131 int has_smi; 3132 3133 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) 3134 return -EFAULT; 3135 3136 if (WARN_ON(rdma_cap_opa_mad(device, port_num) && 3137 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) 3138 return -EFAULT; 3139 3140 /* Create new device info */ 3141 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 3142 if (!port_priv) { 3143 dev_err(&device->dev, "No memory for ib_mad_port_private\n"); 3144 return -ENOMEM; 3145 } 3146 3147 port_priv->device = device; 3148 port_priv->port_num = port_num; 3149 spin_lock_init(&port_priv->reg_lock); 3150 INIT_LIST_HEAD(&port_priv->agent_list); 3151 init_mad_qp(port_priv, &port_priv->qp_info[0]); 3152 init_mad_qp(port_priv, &port_priv->qp_info[1]); 3153 3154 cq_size = mad_sendq_size + mad_recvq_size; 3155 has_smi = rdma_cap_ib_smi(device, port_num); 3156 if (has_smi) 3157 cq_size *= 2; 3158 3159 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, 3160 IB_POLL_WORKQUEUE); 3161 if (IS_ERR(port_priv->cq)) { 3162 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); 3163 ret = PTR_ERR(port_priv->cq); 3164 goto error3; 3165 } 3166 3167 port_priv->pd = ib_alloc_pd(device); 3168 if (IS_ERR(port_priv->pd)) { 3169 dev_err(&device->dev, "Couldn't create ib_mad PD\n"); 3170 ret = PTR_ERR(port_priv->pd); 3171 goto error4; 3172 } 3173 3174 if (has_smi) { 3175 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); 3176 if (ret) 3177 goto error6; 3178 } 3179 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); 3180 if (ret) 3181 goto error7; 3182 3183 snprintf(name, sizeof name, "ib_mad%d", port_num); 3184 port_priv->wq = create_singlethread_workqueue(name); 3185 if (!port_priv->wq) { 3186 ret = -ENOMEM; 3187 goto error8; 3188 } 3189 3190 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3191 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 3192 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3193 3194 ret = ib_mad_port_start(port_priv); 3195 if (ret) { 3196 dev_err(&device->dev, "Couldn't start port\n"); 3197 goto error9; 3198 } 3199 3200 return 0; 3201 3202 error9: 3203 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3204 list_del_init(&port_priv->port_list); 3205 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3206 3207 destroy_workqueue(port_priv->wq); 3208 error8: 3209 destroy_mad_qp(&port_priv->qp_info[1]); 3210 error7: 3211 destroy_mad_qp(&port_priv->qp_info[0]); 3212 error6: 3213 ib_dealloc_pd(port_priv->pd); 3214 error4: 3215 ib_free_cq(port_priv->cq); 3216 cleanup_recv_queue(&port_priv->qp_info[1]); 3217 cleanup_recv_queue(&port_priv->qp_info[0]); 3218 error3: 3219 kfree(port_priv); 3220 3221 return ret; 3222 } 3223 3224 /* 3225 * Close the port 3226 * If there are no classes using the port, free the port 3227 * resources (CQ, MR, PD, QP) and remove the port's info structure 3228 */ 3229 static int ib_mad_port_close(struct ib_device *device, int port_num) 3230 { 3231 struct ib_mad_port_private *port_priv; 3232 unsigned long flags; 3233 3234 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3235 port_priv = __ib_get_mad_port(device, port_num); 3236 if (port_priv == NULL) { 3237 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3238 dev_err(&device->dev, "Port %d not found\n", port_num); 3239 return -ENODEV; 3240 } 3241 list_del_init(&port_priv->port_list); 3242 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3243 3244 destroy_workqueue(port_priv->wq); 3245 destroy_mad_qp(&port_priv->qp_info[1]); 3246 destroy_mad_qp(&port_priv->qp_info[0]); 3247 ib_dealloc_pd(port_priv->pd); 3248 ib_free_cq(port_priv->cq); 3249 cleanup_recv_queue(&port_priv->qp_info[1]); 3250 cleanup_recv_queue(&port_priv->qp_info[0]); 3251 /* XXX: Handle deallocation of MAD registration tables */ 3252 3253 kfree(port_priv); 3254 3255 return 0; 3256 } 3257 3258 static void ib_mad_init_device(struct ib_device *device) 3259 { 3260 int start, i; 3261 3262 start = rdma_start_port(device); 3263 3264 for (i = start; i <= rdma_end_port(device); i++) { 3265 if (!rdma_cap_ib_mad(device, i)) 3266 continue; 3267 3268 if (ib_mad_port_open(device, i)) { 3269 dev_err(&device->dev, "Couldn't open port %d\n", i); 3270 goto error; 3271 } 3272 if (ib_agent_port_open(device, i)) { 3273 dev_err(&device->dev, 3274 "Couldn't open port %d for agents\n", i); 3275 goto error_agent; 3276 } 3277 } 3278 return; 3279 3280 error_agent: 3281 if (ib_mad_port_close(device, i)) 3282 dev_err(&device->dev, "Couldn't close port %d\n", i); 3283 3284 error: 3285 while (--i >= start) { 3286 if (!rdma_cap_ib_mad(device, i)) 3287 continue; 3288 3289 if (ib_agent_port_close(device, i)) 3290 dev_err(&device->dev, 3291 "Couldn't close port %d for agents\n", i); 3292 if (ib_mad_port_close(device, i)) 3293 dev_err(&device->dev, "Couldn't close port %d\n", i); 3294 } 3295 } 3296 3297 static void ib_mad_remove_device(struct ib_device *device, void *client_data) 3298 { 3299 int i; 3300 3301 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 3302 if (!rdma_cap_ib_mad(device, i)) 3303 continue; 3304 3305 if (ib_agent_port_close(device, i)) 3306 dev_err(&device->dev, 3307 "Couldn't close port %d for agents\n", i); 3308 if (ib_mad_port_close(device, i)) 3309 dev_err(&device->dev, "Couldn't close port %d\n", i); 3310 } 3311 } 3312 3313 static struct ib_client mad_client = { 3314 .name = "mad", 3315 .add = ib_mad_init_device, 3316 .remove = ib_mad_remove_device 3317 }; 3318 3319 static int __init ib_mad_init_module(void) 3320 { 3321 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); 3322 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); 3323 3324 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); 3325 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); 3326 3327 INIT_LIST_HEAD(&ib_mad_port_list); 3328 3329 if (ib_register_client(&mad_client)) { 3330 pr_err("Couldn't register ib_mad client\n"); 3331 return -EINVAL; 3332 } 3333 3334 return 0; 3335 } 3336 3337 static void __exit ib_mad_cleanup_module(void) 3338 { 3339 ib_unregister_client(&mad_client); 3340 } 3341 3342 module_init(ib_mad_init_module); 3343 module_exit(ib_mad_cleanup_module); 3344