1 /* 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2009 HNR Consulting. All rights reserved. 6 * Copyright (c) 2014 Intel Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/module.h> 43 #include <rdma/ib_cache.h> 44 45 #include "mad_priv.h" 46 #include "mad_rmpp.h" 47 #include "smi.h" 48 #include "opa_smi.h" 49 #include "agent.h" 50 #include "core_priv.h" 51 52 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 53 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 54 55 module_param_named(send_queue_size, mad_sendq_size, int, 0444); 56 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); 57 module_param_named(recv_queue_size, mad_recvq_size, int, 0444); 58 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); 59 60 static struct list_head ib_mad_port_list; 61 static u32 ib_mad_client_id = 0; 62 63 /* Port list lock */ 64 static DEFINE_SPINLOCK(ib_mad_port_list_lock); 65 66 /* Forward declarations */ 67 static int method_in_use(struct ib_mad_mgmt_method_table **method, 68 struct ib_mad_reg_req *mad_reg_req); 69 static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 70 static struct ib_mad_agent_private *find_mad_agent( 71 struct ib_mad_port_private *port_priv, 72 const struct ib_mad_hdr *mad); 73 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 74 struct ib_mad_private *mad); 75 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 76 static void timeout_sends(struct work_struct *work); 77 static void local_completions(struct work_struct *work); 78 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 79 struct ib_mad_agent_private *agent_priv, 80 u8 mgmt_class); 81 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 82 struct ib_mad_agent_private *agent_priv); 83 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 84 struct ib_wc *wc); 85 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); 86 87 /* 88 * Returns a ib_mad_port_private structure or NULL for a device/port 89 * Assumes ib_mad_port_list_lock is being held 90 */ 91 static inline struct ib_mad_port_private * 92 __ib_get_mad_port(struct ib_device *device, int port_num) 93 { 94 struct ib_mad_port_private *entry; 95 96 list_for_each_entry(entry, &ib_mad_port_list, port_list) { 97 if (entry->device == device && entry->port_num == port_num) 98 return entry; 99 } 100 return NULL; 101 } 102 103 /* 104 * Wrapper function to return a ib_mad_port_private structure or NULL 105 * for a device/port 106 */ 107 static inline struct ib_mad_port_private * 108 ib_get_mad_port(struct ib_device *device, int port_num) 109 { 110 struct ib_mad_port_private *entry; 111 unsigned long flags; 112 113 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 114 entry = __ib_get_mad_port(device, port_num); 115 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 116 117 return entry; 118 } 119 120 static inline u8 convert_mgmt_class(u8 mgmt_class) 121 { 122 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ 123 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 124 0 : mgmt_class; 125 } 126 127 static int get_spl_qp_index(enum ib_qp_type qp_type) 128 { 129 switch (qp_type) 130 { 131 case IB_QPT_SMI: 132 return 0; 133 case IB_QPT_GSI: 134 return 1; 135 default: 136 return -1; 137 } 138 } 139 140 static int vendor_class_index(u8 mgmt_class) 141 { 142 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; 143 } 144 145 static int is_vendor_class(u8 mgmt_class) 146 { 147 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || 148 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) 149 return 0; 150 return 1; 151 } 152 153 static int is_vendor_oui(char *oui) 154 { 155 if (oui[0] || oui[1] || oui[2]) 156 return 1; 157 return 0; 158 } 159 160 static int is_vendor_method_in_use( 161 struct ib_mad_mgmt_vendor_class *vendor_class, 162 struct ib_mad_reg_req *mad_reg_req) 163 { 164 struct ib_mad_mgmt_method_table *method; 165 int i; 166 167 for (i = 0; i < MAX_MGMT_OUI; i++) { 168 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { 169 method = vendor_class->method_table[i]; 170 if (method) { 171 if (method_in_use(&method, mad_reg_req)) 172 return 1; 173 else 174 break; 175 } 176 } 177 } 178 return 0; 179 } 180 181 int ib_response_mad(const struct ib_mad_hdr *hdr) 182 { 183 return ((hdr->method & IB_MGMT_METHOD_RESP) || 184 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || 185 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && 186 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); 187 } 188 EXPORT_SYMBOL(ib_response_mad); 189 190 /* 191 * ib_register_mad_agent - Register to send/receive MADs 192 */ 193 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 194 u8 port_num, 195 enum ib_qp_type qp_type, 196 struct ib_mad_reg_req *mad_reg_req, 197 u8 rmpp_version, 198 ib_mad_send_handler send_handler, 199 ib_mad_recv_handler recv_handler, 200 void *context, 201 u32 registration_flags) 202 { 203 struct ib_mad_port_private *port_priv; 204 struct ib_mad_agent *ret = ERR_PTR(-EINVAL); 205 struct ib_mad_agent_private *mad_agent_priv; 206 struct ib_mad_reg_req *reg_req = NULL; 207 struct ib_mad_mgmt_class_table *class; 208 struct ib_mad_mgmt_vendor_class_table *vendor; 209 struct ib_mad_mgmt_vendor_class *vendor_class; 210 struct ib_mad_mgmt_method_table *method; 211 int ret2, qpn; 212 unsigned long flags; 213 u8 mgmt_class, vclass; 214 215 /* Validate parameters */ 216 qpn = get_spl_qp_index(qp_type); 217 if (qpn == -1) { 218 dev_notice(&device->dev, 219 "ib_register_mad_agent: invalid QP Type %d\n", 220 qp_type); 221 goto error1; 222 } 223 224 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { 225 dev_notice(&device->dev, 226 "ib_register_mad_agent: invalid RMPP Version %u\n", 227 rmpp_version); 228 goto error1; 229 } 230 231 /* Validate MAD registration request if supplied */ 232 if (mad_reg_req) { 233 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { 234 dev_notice(&device->dev, 235 "ib_register_mad_agent: invalid Class Version %u\n", 236 mad_reg_req->mgmt_class_version); 237 goto error1; 238 } 239 if (!recv_handler) { 240 dev_notice(&device->dev, 241 "ib_register_mad_agent: no recv_handler\n"); 242 goto error1; 243 } 244 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { 245 /* 246 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only 247 * one in this range currently allowed 248 */ 249 if (mad_reg_req->mgmt_class != 250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 251 dev_notice(&device->dev, 252 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", 253 mad_reg_req->mgmt_class); 254 goto error1; 255 } 256 } else if (mad_reg_req->mgmt_class == 0) { 257 /* 258 * Class 0 is reserved in IBA and is used for 259 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 260 */ 261 dev_notice(&device->dev, 262 "ib_register_mad_agent: Invalid Mgmt Class 0\n"); 263 goto error1; 264 } else if (is_vendor_class(mad_reg_req->mgmt_class)) { 265 /* 266 * If class is in "new" vendor range, 267 * ensure supplied OUI is not zero 268 */ 269 if (!is_vendor_oui(mad_reg_req->oui)) { 270 dev_notice(&device->dev, 271 "ib_register_mad_agent: No OUI specified for class 0x%x\n", 272 mad_reg_req->mgmt_class); 273 goto error1; 274 } 275 } 276 /* Make sure class supplied is consistent with RMPP */ 277 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 278 if (rmpp_version) { 279 dev_notice(&device->dev, 280 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", 281 mad_reg_req->mgmt_class); 282 goto error1; 283 } 284 } 285 286 /* Make sure class supplied is consistent with QP type */ 287 if (qp_type == IB_QPT_SMI) { 288 if ((mad_reg_req->mgmt_class != 289 IB_MGMT_CLASS_SUBN_LID_ROUTED) && 290 (mad_reg_req->mgmt_class != 291 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 292 dev_notice(&device->dev, 293 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", 294 mad_reg_req->mgmt_class); 295 goto error1; 296 } 297 } else { 298 if ((mad_reg_req->mgmt_class == 299 IB_MGMT_CLASS_SUBN_LID_ROUTED) || 300 (mad_reg_req->mgmt_class == 301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 302 dev_notice(&device->dev, 303 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", 304 mad_reg_req->mgmt_class); 305 goto error1; 306 } 307 } 308 } else { 309 /* No registration request supplied */ 310 if (!send_handler) 311 goto error1; 312 if (registration_flags & IB_MAD_USER_RMPP) 313 goto error1; 314 } 315 316 /* Validate device and port */ 317 port_priv = ib_get_mad_port(device, port_num); 318 if (!port_priv) { 319 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); 320 ret = ERR_PTR(-ENODEV); 321 goto error1; 322 } 323 324 /* Verify the QP requested is supported. For example, Ethernet devices 325 * will not have QP0 */ 326 if (!port_priv->qp_info[qpn].qp) { 327 dev_notice(&device->dev, 328 "ib_register_mad_agent: QP %d not supported\n", qpn); 329 ret = ERR_PTR(-EPROTONOSUPPORT); 330 goto error1; 331 } 332 333 /* Allocate structures */ 334 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); 335 if (!mad_agent_priv) { 336 ret = ERR_PTR(-ENOMEM); 337 goto error1; 338 } 339 340 if (mad_reg_req) { 341 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); 342 if (!reg_req) { 343 ret = ERR_PTR(-ENOMEM); 344 goto error3; 345 } 346 } 347 348 /* Now, fill in the various structures */ 349 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 350 mad_agent_priv->reg_req = reg_req; 351 mad_agent_priv->agent.rmpp_version = rmpp_version; 352 mad_agent_priv->agent.device = device; 353 mad_agent_priv->agent.recv_handler = recv_handler; 354 mad_agent_priv->agent.send_handler = send_handler; 355 mad_agent_priv->agent.context = context; 356 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 357 mad_agent_priv->agent.port_num = port_num; 358 mad_agent_priv->agent.flags = registration_flags; 359 spin_lock_init(&mad_agent_priv->lock); 360 INIT_LIST_HEAD(&mad_agent_priv->send_list); 361 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 362 INIT_LIST_HEAD(&mad_agent_priv->done_list); 363 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 364 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); 365 INIT_LIST_HEAD(&mad_agent_priv->local_list); 366 INIT_WORK(&mad_agent_priv->local_work, local_completions); 367 atomic_set(&mad_agent_priv->refcount, 1); 368 init_completion(&mad_agent_priv->comp); 369 370 spin_lock_irqsave(&port_priv->reg_lock, flags); 371 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; 372 373 /* 374 * Make sure MAD registration (if supplied) 375 * is non overlapping with any existing ones 376 */ 377 if (mad_reg_req) { 378 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); 379 if (!is_vendor_class(mgmt_class)) { 380 class = port_priv->version[mad_reg_req-> 381 mgmt_class_version].class; 382 if (class) { 383 method = class->method_table[mgmt_class]; 384 if (method) { 385 if (method_in_use(&method, 386 mad_reg_req)) 387 goto error4; 388 } 389 } 390 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, 391 mgmt_class); 392 } else { 393 /* "New" vendor class range */ 394 vendor = port_priv->version[mad_reg_req-> 395 mgmt_class_version].vendor; 396 if (vendor) { 397 vclass = vendor_class_index(mgmt_class); 398 vendor_class = vendor->vendor_class[vclass]; 399 if (vendor_class) { 400 if (is_vendor_method_in_use( 401 vendor_class, 402 mad_reg_req)) 403 goto error4; 404 } 405 } 406 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 407 } 408 if (ret2) { 409 ret = ERR_PTR(ret2); 410 goto error4; 411 } 412 } 413 414 /* Add mad agent into port's agent list */ 415 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 416 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 417 418 return &mad_agent_priv->agent; 419 420 error4: 421 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 422 kfree(reg_req); 423 error3: 424 kfree(mad_agent_priv); 425 error1: 426 return ret; 427 } 428 EXPORT_SYMBOL(ib_register_mad_agent); 429 430 static inline int is_snooping_sends(int mad_snoop_flags) 431 { 432 return (mad_snoop_flags & 433 (/*IB_MAD_SNOOP_POSTED_SENDS | 434 IB_MAD_SNOOP_RMPP_SENDS |*/ 435 IB_MAD_SNOOP_SEND_COMPLETIONS /*| 436 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); 437 } 438 439 static inline int is_snooping_recvs(int mad_snoop_flags) 440 { 441 return (mad_snoop_flags & 442 (IB_MAD_SNOOP_RECVS /*| 443 IB_MAD_SNOOP_RMPP_RECVS*/)); 444 } 445 446 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, 447 struct ib_mad_snoop_private *mad_snoop_priv) 448 { 449 struct ib_mad_snoop_private **new_snoop_table; 450 unsigned long flags; 451 int i; 452 453 spin_lock_irqsave(&qp_info->snoop_lock, flags); 454 /* Check for empty slot in array. */ 455 for (i = 0; i < qp_info->snoop_table_size; i++) 456 if (!qp_info->snoop_table[i]) 457 break; 458 459 if (i == qp_info->snoop_table_size) { 460 /* Grow table. */ 461 new_snoop_table = krealloc(qp_info->snoop_table, 462 sizeof mad_snoop_priv * 463 (qp_info->snoop_table_size + 1), 464 GFP_ATOMIC); 465 if (!new_snoop_table) { 466 i = -ENOMEM; 467 goto out; 468 } 469 470 qp_info->snoop_table = new_snoop_table; 471 qp_info->snoop_table_size++; 472 } 473 qp_info->snoop_table[i] = mad_snoop_priv; 474 atomic_inc(&qp_info->snoop_count); 475 out: 476 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 477 return i; 478 } 479 480 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, 481 u8 port_num, 482 enum ib_qp_type qp_type, 483 int mad_snoop_flags, 484 ib_mad_snoop_handler snoop_handler, 485 ib_mad_recv_handler recv_handler, 486 void *context) 487 { 488 struct ib_mad_port_private *port_priv; 489 struct ib_mad_agent *ret; 490 struct ib_mad_snoop_private *mad_snoop_priv; 491 int qpn; 492 493 /* Validate parameters */ 494 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || 495 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { 496 ret = ERR_PTR(-EINVAL); 497 goto error1; 498 } 499 qpn = get_spl_qp_index(qp_type); 500 if (qpn == -1) { 501 ret = ERR_PTR(-EINVAL); 502 goto error1; 503 } 504 port_priv = ib_get_mad_port(device, port_num); 505 if (!port_priv) { 506 ret = ERR_PTR(-ENODEV); 507 goto error1; 508 } 509 /* Allocate structures */ 510 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 511 if (!mad_snoop_priv) { 512 ret = ERR_PTR(-ENOMEM); 513 goto error1; 514 } 515 516 /* Now, fill in the various structures */ 517 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 518 mad_snoop_priv->agent.device = device; 519 mad_snoop_priv->agent.recv_handler = recv_handler; 520 mad_snoop_priv->agent.snoop_handler = snoop_handler; 521 mad_snoop_priv->agent.context = context; 522 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 523 mad_snoop_priv->agent.port_num = port_num; 524 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 525 init_completion(&mad_snoop_priv->comp); 526 mad_snoop_priv->snoop_index = register_snoop_agent( 527 &port_priv->qp_info[qpn], 528 mad_snoop_priv); 529 if (mad_snoop_priv->snoop_index < 0) { 530 ret = ERR_PTR(mad_snoop_priv->snoop_index); 531 goto error2; 532 } 533 534 atomic_set(&mad_snoop_priv->refcount, 1); 535 return &mad_snoop_priv->agent; 536 537 error2: 538 kfree(mad_snoop_priv); 539 error1: 540 return ret; 541 } 542 EXPORT_SYMBOL(ib_register_mad_snoop); 543 544 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 545 { 546 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 547 complete(&mad_agent_priv->comp); 548 } 549 550 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) 551 { 552 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 553 complete(&mad_snoop_priv->comp); 554 } 555 556 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 557 { 558 struct ib_mad_port_private *port_priv; 559 unsigned long flags; 560 561 /* Note that we could still be handling received MADs */ 562 563 /* 564 * Canceling all sends results in dropping received response 565 * MADs, preventing us from queuing additional work 566 */ 567 cancel_mads(mad_agent_priv); 568 port_priv = mad_agent_priv->qp_info->port_priv; 569 cancel_delayed_work(&mad_agent_priv->timed_work); 570 571 spin_lock_irqsave(&port_priv->reg_lock, flags); 572 remove_mad_reg_req(mad_agent_priv); 573 list_del(&mad_agent_priv->agent_list); 574 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 575 576 flush_workqueue(port_priv->wq); 577 ib_cancel_rmpp_recvs(mad_agent_priv); 578 579 deref_mad_agent(mad_agent_priv); 580 wait_for_completion(&mad_agent_priv->comp); 581 582 kfree(mad_agent_priv->reg_req); 583 kfree(mad_agent_priv); 584 } 585 586 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) 587 { 588 struct ib_mad_qp_info *qp_info; 589 unsigned long flags; 590 591 qp_info = mad_snoop_priv->qp_info; 592 spin_lock_irqsave(&qp_info->snoop_lock, flags); 593 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; 594 atomic_dec(&qp_info->snoop_count); 595 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 596 597 deref_snoop_agent(mad_snoop_priv); 598 wait_for_completion(&mad_snoop_priv->comp); 599 600 kfree(mad_snoop_priv); 601 } 602 603 /* 604 * ib_unregister_mad_agent - Unregisters a client from using MAD services 605 */ 606 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) 607 { 608 struct ib_mad_agent_private *mad_agent_priv; 609 struct ib_mad_snoop_private *mad_snoop_priv; 610 611 /* If the TID is zero, the agent can only snoop. */ 612 if (mad_agent->hi_tid) { 613 mad_agent_priv = container_of(mad_agent, 614 struct ib_mad_agent_private, 615 agent); 616 unregister_mad_agent(mad_agent_priv); 617 } else { 618 mad_snoop_priv = container_of(mad_agent, 619 struct ib_mad_snoop_private, 620 agent); 621 unregister_mad_snoop(mad_snoop_priv); 622 } 623 return 0; 624 } 625 EXPORT_SYMBOL(ib_unregister_mad_agent); 626 627 static void dequeue_mad(struct ib_mad_list_head *mad_list) 628 { 629 struct ib_mad_queue *mad_queue; 630 unsigned long flags; 631 632 BUG_ON(!mad_list->mad_queue); 633 mad_queue = mad_list->mad_queue; 634 spin_lock_irqsave(&mad_queue->lock, flags); 635 list_del(&mad_list->list); 636 mad_queue->count--; 637 spin_unlock_irqrestore(&mad_queue->lock, flags); 638 } 639 640 static void snoop_send(struct ib_mad_qp_info *qp_info, 641 struct ib_mad_send_buf *send_buf, 642 struct ib_mad_send_wc *mad_send_wc, 643 int mad_snoop_flags) 644 { 645 struct ib_mad_snoop_private *mad_snoop_priv; 646 unsigned long flags; 647 int i; 648 649 spin_lock_irqsave(&qp_info->snoop_lock, flags); 650 for (i = 0; i < qp_info->snoop_table_size; i++) { 651 mad_snoop_priv = qp_info->snoop_table[i]; 652 if (!mad_snoop_priv || 653 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 654 continue; 655 656 atomic_inc(&mad_snoop_priv->refcount); 657 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 658 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 659 send_buf, mad_send_wc); 660 deref_snoop_agent(mad_snoop_priv); 661 spin_lock_irqsave(&qp_info->snoop_lock, flags); 662 } 663 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 664 } 665 666 static void snoop_recv(struct ib_mad_qp_info *qp_info, 667 struct ib_mad_recv_wc *mad_recv_wc, 668 int mad_snoop_flags) 669 { 670 struct ib_mad_snoop_private *mad_snoop_priv; 671 unsigned long flags; 672 int i; 673 674 spin_lock_irqsave(&qp_info->snoop_lock, flags); 675 for (i = 0; i < qp_info->snoop_table_size; i++) { 676 mad_snoop_priv = qp_info->snoop_table[i]; 677 if (!mad_snoop_priv || 678 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 679 continue; 680 681 atomic_inc(&mad_snoop_priv->refcount); 682 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 683 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, 684 mad_recv_wc); 685 deref_snoop_agent(mad_snoop_priv); 686 spin_lock_irqsave(&qp_info->snoop_lock, flags); 687 } 688 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 689 } 690 691 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid, 692 u16 pkey_index, u8 port_num, struct ib_wc *wc) 693 { 694 memset(wc, 0, sizeof *wc); 695 wc->wr_cqe = cqe; 696 wc->status = IB_WC_SUCCESS; 697 wc->opcode = IB_WC_RECV; 698 wc->pkey_index = pkey_index; 699 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 700 wc->src_qp = IB_QP0; 701 wc->qp = qp; 702 wc->slid = slid; 703 wc->sl = 0; 704 wc->dlid_path_bits = 0; 705 wc->port_num = port_num; 706 } 707 708 static size_t mad_priv_size(const struct ib_mad_private *mp) 709 { 710 return sizeof(struct ib_mad_private) + mp->mad_size; 711 } 712 713 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) 714 { 715 size_t size = sizeof(struct ib_mad_private) + mad_size; 716 struct ib_mad_private *ret = kzalloc(size, flags); 717 718 if (ret) 719 ret->mad_size = mad_size; 720 721 return ret; 722 } 723 724 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) 725 { 726 return rdma_max_mad_size(port_priv->device, port_priv->port_num); 727 } 728 729 static size_t mad_priv_dma_size(const struct ib_mad_private *mp) 730 { 731 return sizeof(struct ib_grh) + mp->mad_size; 732 } 733 734 /* 735 * Return 0 if SMP is to be sent 736 * Return 1 if SMP was consumed locally (whether or not solicited) 737 * Return < 0 if error 738 */ 739 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 740 struct ib_mad_send_wr_private *mad_send_wr) 741 { 742 int ret = 0; 743 struct ib_smp *smp = mad_send_wr->send_buf.mad; 744 struct opa_smp *opa_smp = (struct opa_smp *)smp; 745 unsigned long flags; 746 struct ib_mad_local_private *local; 747 struct ib_mad_private *mad_priv; 748 struct ib_mad_port_private *port_priv; 749 struct ib_mad_agent_private *recv_mad_agent = NULL; 750 struct ib_device *device = mad_agent_priv->agent.device; 751 u8 port_num; 752 struct ib_wc mad_wc; 753 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; 754 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); 755 u16 out_mad_pkey_index = 0; 756 u16 drslid; 757 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 758 mad_agent_priv->qp_info->port_priv->port_num); 759 760 if (rdma_cap_ib_switch(device) && 761 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 762 port_num = send_wr->port_num; 763 else 764 port_num = mad_agent_priv->agent.port_num; 765 766 /* 767 * Directed route handling starts if the initial LID routed part of 768 * a request or the ending LID routed part of a response is empty. 769 * If we are at the start of the LID routed part, don't update the 770 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. 771 */ 772 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { 773 u32 opa_drslid; 774 775 if ((opa_get_smp_direction(opa_smp) 776 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 777 OPA_LID_PERMISSIVE && 778 opa_smi_handle_dr_smp_send(opa_smp, 779 rdma_cap_ib_switch(device), 780 port_num) == IB_SMI_DISCARD) { 781 ret = -EINVAL; 782 dev_err(&device->dev, "OPA Invalid directed route\n"); 783 goto out; 784 } 785 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 786 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && 787 opa_drslid & 0xffff0000) { 788 ret = -EINVAL; 789 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 790 opa_drslid); 791 goto out; 792 } 793 drslid = (u16)(opa_drslid & 0x0000ffff); 794 795 /* Check to post send on QP or process locally */ 796 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && 797 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) 798 goto out; 799 } else { 800 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 801 IB_LID_PERMISSIVE && 802 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == 803 IB_SMI_DISCARD) { 804 ret = -EINVAL; 805 dev_err(&device->dev, "Invalid directed route\n"); 806 goto out; 807 } 808 drslid = be16_to_cpu(smp->dr_slid); 809 810 /* Check to post send on QP or process locally */ 811 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && 812 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) 813 goto out; 814 } 815 816 local = kmalloc(sizeof *local, GFP_ATOMIC); 817 if (!local) { 818 ret = -ENOMEM; 819 goto out; 820 } 821 local->mad_priv = NULL; 822 local->recv_mad_agent = NULL; 823 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); 824 if (!mad_priv) { 825 ret = -ENOMEM; 826 kfree(local); 827 goto out; 828 } 829 830 build_smp_wc(mad_agent_priv->agent.qp, 831 send_wr->wr.wr_cqe, drslid, 832 send_wr->pkey_index, 833 send_wr->port_num, &mad_wc); 834 835 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { 836 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len 837 + mad_send_wr->send_buf.data_len 838 + sizeof(struct ib_grh); 839 } 840 841 /* No GRH for DR SMP */ 842 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, 843 (const struct ib_mad_hdr *)smp, mad_size, 844 (struct ib_mad_hdr *)mad_priv->mad, 845 &mad_size, &out_mad_pkey_index); 846 switch (ret) 847 { 848 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 849 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && 850 mad_agent_priv->agent.recv_handler) { 851 local->mad_priv = mad_priv; 852 local->recv_mad_agent = mad_agent_priv; 853 /* 854 * Reference MAD agent until receive 855 * side of local completion handled 856 */ 857 atomic_inc(&mad_agent_priv->refcount); 858 } else 859 kfree(mad_priv); 860 break; 861 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 862 kfree(mad_priv); 863 break; 864 case IB_MAD_RESULT_SUCCESS: 865 /* Treat like an incoming receive MAD */ 866 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 867 mad_agent_priv->agent.port_num); 868 if (port_priv) { 869 memcpy(mad_priv->mad, smp, mad_priv->mad_size); 870 recv_mad_agent = find_mad_agent(port_priv, 871 (const struct ib_mad_hdr *)mad_priv->mad); 872 } 873 if (!port_priv || !recv_mad_agent) { 874 /* 875 * No receiving agent so drop packet and 876 * generate send completion. 877 */ 878 kfree(mad_priv); 879 break; 880 } 881 local->mad_priv = mad_priv; 882 local->recv_mad_agent = recv_mad_agent; 883 break; 884 default: 885 kfree(mad_priv); 886 kfree(local); 887 ret = -EINVAL; 888 goto out; 889 } 890 891 local->mad_send_wr = mad_send_wr; 892 if (opa) { 893 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; 894 local->return_wc_byte_len = mad_size; 895 } 896 /* Reference MAD agent until send side of local completion handled */ 897 atomic_inc(&mad_agent_priv->refcount); 898 /* Queue local completion to local list */ 899 spin_lock_irqsave(&mad_agent_priv->lock, flags); 900 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); 901 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 902 queue_work(mad_agent_priv->qp_info->port_priv->wq, 903 &mad_agent_priv->local_work); 904 ret = 1; 905 out: 906 return ret; 907 } 908 909 static int get_pad_size(int hdr_len, int data_len, size_t mad_size) 910 { 911 int seg_size, pad; 912 913 seg_size = mad_size - hdr_len; 914 if (data_len && seg_size) { 915 pad = seg_size - data_len % seg_size; 916 return pad == seg_size ? 0 : pad; 917 } else 918 return seg_size; 919 } 920 921 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) 922 { 923 struct ib_rmpp_segment *s, *t; 924 925 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { 926 list_del(&s->list); 927 kfree(s); 928 } 929 } 930 931 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, 932 size_t mad_size, gfp_t gfp_mask) 933 { 934 struct ib_mad_send_buf *send_buf = &send_wr->send_buf; 935 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; 936 struct ib_rmpp_segment *seg = NULL; 937 int left, seg_size, pad; 938 939 send_buf->seg_size = mad_size - send_buf->hdr_len; 940 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; 941 seg_size = send_buf->seg_size; 942 pad = send_wr->pad; 943 944 /* Allocate data segments. */ 945 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { 946 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); 947 if (!seg) { 948 free_send_rmpp_list(send_wr); 949 return -ENOMEM; 950 } 951 seg->num = ++send_buf->seg_count; 952 list_add_tail(&seg->list, &send_wr->rmpp_list); 953 } 954 955 /* Zero any padding */ 956 if (pad) 957 memset(seg->data + seg_size - pad, 0, pad); 958 959 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> 960 agent.rmpp_version; 961 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 962 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 963 964 send_wr->cur_seg = container_of(send_wr->rmpp_list.next, 965 struct ib_rmpp_segment, list); 966 send_wr->last_ack_seg = send_wr->cur_seg; 967 return 0; 968 } 969 970 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) 971 { 972 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); 973 } 974 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); 975 976 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 977 u32 remote_qpn, u16 pkey_index, 978 int rmpp_active, 979 int hdr_len, int data_len, 980 gfp_t gfp_mask, 981 u8 base_version) 982 { 983 struct ib_mad_agent_private *mad_agent_priv; 984 struct ib_mad_send_wr_private *mad_send_wr; 985 int pad, message_size, ret, size; 986 void *buf; 987 size_t mad_size; 988 bool opa; 989 990 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 991 agent); 992 993 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); 994 995 if (opa && base_version == OPA_MGMT_BASE_VERSION) 996 mad_size = sizeof(struct opa_mad); 997 else 998 mad_size = sizeof(struct ib_mad); 999 1000 pad = get_pad_size(hdr_len, data_len, mad_size); 1001 message_size = hdr_len + data_len + pad; 1002 1003 if (ib_mad_kernel_rmpp_agent(mad_agent)) { 1004 if (!rmpp_active && message_size > mad_size) 1005 return ERR_PTR(-EINVAL); 1006 } else 1007 if (rmpp_active || message_size > mad_size) 1008 return ERR_PTR(-EINVAL); 1009 1010 size = rmpp_active ? hdr_len : mad_size; 1011 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); 1012 if (!buf) 1013 return ERR_PTR(-ENOMEM); 1014 1015 mad_send_wr = buf + size; 1016 INIT_LIST_HEAD(&mad_send_wr->rmpp_list); 1017 mad_send_wr->send_buf.mad = buf; 1018 mad_send_wr->send_buf.hdr_len = hdr_len; 1019 mad_send_wr->send_buf.data_len = data_len; 1020 mad_send_wr->pad = pad; 1021 1022 mad_send_wr->mad_agent_priv = mad_agent_priv; 1023 mad_send_wr->sg_list[0].length = hdr_len; 1024 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; 1025 1026 /* OPA MADs don't have to be the full 2048 bytes */ 1027 if (opa && base_version == OPA_MGMT_BASE_VERSION && 1028 data_len < mad_size - hdr_len) 1029 mad_send_wr->sg_list[1].length = data_len; 1030 else 1031 mad_send_wr->sg_list[1].length = mad_size - hdr_len; 1032 1033 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; 1034 1035 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1036 1037 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1038 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; 1039 mad_send_wr->send_wr.wr.num_sge = 2; 1040 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; 1041 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; 1042 mad_send_wr->send_wr.remote_qpn = remote_qpn; 1043 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; 1044 mad_send_wr->send_wr.pkey_index = pkey_index; 1045 1046 if (rmpp_active) { 1047 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); 1048 if (ret) { 1049 kfree(buf); 1050 return ERR_PTR(ret); 1051 } 1052 } 1053 1054 mad_send_wr->send_buf.mad_agent = mad_agent; 1055 atomic_inc(&mad_agent_priv->refcount); 1056 return &mad_send_wr->send_buf; 1057 } 1058 EXPORT_SYMBOL(ib_create_send_mad); 1059 1060 int ib_get_mad_data_offset(u8 mgmt_class) 1061 { 1062 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 1063 return IB_MGMT_SA_HDR; 1064 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1065 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1066 (mgmt_class == IB_MGMT_CLASS_BIS)) 1067 return IB_MGMT_DEVICE_HDR; 1068 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1069 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 1070 return IB_MGMT_VENDOR_HDR; 1071 else 1072 return IB_MGMT_MAD_HDR; 1073 } 1074 EXPORT_SYMBOL(ib_get_mad_data_offset); 1075 1076 int ib_is_mad_class_rmpp(u8 mgmt_class) 1077 { 1078 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 1079 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 1080 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 1081 (mgmt_class == IB_MGMT_CLASS_BIS) || 1082 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 1083 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 1084 return 1; 1085 return 0; 1086 } 1087 EXPORT_SYMBOL(ib_is_mad_class_rmpp); 1088 1089 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 1090 { 1091 struct ib_mad_send_wr_private *mad_send_wr; 1092 struct list_head *list; 1093 1094 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1095 send_buf); 1096 list = &mad_send_wr->cur_seg->list; 1097 1098 if (mad_send_wr->cur_seg->num < seg_num) { 1099 list_for_each_entry(mad_send_wr->cur_seg, list, list) 1100 if (mad_send_wr->cur_seg->num == seg_num) 1101 break; 1102 } else if (mad_send_wr->cur_seg->num > seg_num) { 1103 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) 1104 if (mad_send_wr->cur_seg->num == seg_num) 1105 break; 1106 } 1107 return mad_send_wr->cur_seg->data; 1108 } 1109 EXPORT_SYMBOL(ib_get_rmpp_segment); 1110 1111 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) 1112 { 1113 if (mad_send_wr->send_buf.seg_count) 1114 return ib_get_rmpp_segment(&mad_send_wr->send_buf, 1115 mad_send_wr->seg_num); 1116 else 1117 return mad_send_wr->send_buf.mad + 1118 mad_send_wr->send_buf.hdr_len; 1119 } 1120 1121 void ib_free_send_mad(struct ib_mad_send_buf *send_buf) 1122 { 1123 struct ib_mad_agent_private *mad_agent_priv; 1124 struct ib_mad_send_wr_private *mad_send_wr; 1125 1126 mad_agent_priv = container_of(send_buf->mad_agent, 1127 struct ib_mad_agent_private, agent); 1128 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 1129 send_buf); 1130 1131 free_send_rmpp_list(mad_send_wr); 1132 kfree(send_buf->mad); 1133 deref_mad_agent(mad_agent_priv); 1134 } 1135 EXPORT_SYMBOL(ib_free_send_mad); 1136 1137 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 1138 { 1139 struct ib_mad_qp_info *qp_info; 1140 struct list_head *list; 1141 struct ib_send_wr *bad_send_wr; 1142 struct ib_mad_agent *mad_agent; 1143 struct ib_sge *sge; 1144 unsigned long flags; 1145 int ret; 1146 1147 /* Set WR ID to find mad_send_wr upon completion */ 1148 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1149 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1150 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; 1151 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; 1152 1153 mad_agent = mad_send_wr->send_buf.mad_agent; 1154 sge = mad_send_wr->sg_list; 1155 sge[0].addr = ib_dma_map_single(mad_agent->device, 1156 mad_send_wr->send_buf.mad, 1157 sge[0].length, 1158 DMA_TO_DEVICE); 1159 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) 1160 return -ENOMEM; 1161 1162 mad_send_wr->header_mapping = sge[0].addr; 1163 1164 sge[1].addr = ib_dma_map_single(mad_agent->device, 1165 ib_get_payload(mad_send_wr), 1166 sge[1].length, 1167 DMA_TO_DEVICE); 1168 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { 1169 ib_dma_unmap_single(mad_agent->device, 1170 mad_send_wr->header_mapping, 1171 sge[0].length, DMA_TO_DEVICE); 1172 return -ENOMEM; 1173 } 1174 mad_send_wr->payload_mapping = sge[1].addr; 1175 1176 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1177 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1178 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, 1179 &bad_send_wr); 1180 list = &qp_info->send_queue.list; 1181 } else { 1182 ret = 0; 1183 list = &qp_info->overflow_list; 1184 } 1185 1186 if (!ret) { 1187 qp_info->send_queue.count++; 1188 list_add_tail(&mad_send_wr->mad_list.list, list); 1189 } 1190 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1191 if (ret) { 1192 ib_dma_unmap_single(mad_agent->device, 1193 mad_send_wr->header_mapping, 1194 sge[0].length, DMA_TO_DEVICE); 1195 ib_dma_unmap_single(mad_agent->device, 1196 mad_send_wr->payload_mapping, 1197 sge[1].length, DMA_TO_DEVICE); 1198 } 1199 return ret; 1200 } 1201 1202 /* 1203 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 1204 * with the registered client 1205 */ 1206 int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 1207 struct ib_mad_send_buf **bad_send_buf) 1208 { 1209 struct ib_mad_agent_private *mad_agent_priv; 1210 struct ib_mad_send_buf *next_send_buf; 1211 struct ib_mad_send_wr_private *mad_send_wr; 1212 unsigned long flags; 1213 int ret = -EINVAL; 1214 1215 /* Walk list of send WRs and post each on send list */ 1216 for (; send_buf; send_buf = next_send_buf) { 1217 1218 mad_send_wr = container_of(send_buf, 1219 struct ib_mad_send_wr_private, 1220 send_buf); 1221 mad_agent_priv = mad_send_wr->mad_agent_priv; 1222 1223 if (!send_buf->mad_agent->send_handler || 1224 (send_buf->timeout_ms && 1225 !send_buf->mad_agent->recv_handler)) { 1226 ret = -EINVAL; 1227 goto error; 1228 } 1229 1230 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1231 if (mad_agent_priv->agent.rmpp_version) { 1232 ret = -EINVAL; 1233 goto error; 1234 } 1235 } 1236 1237 /* 1238 * Save pointer to next work request to post in case the 1239 * current one completes, and the user modifies the work 1240 * request associated with the completion 1241 */ 1242 next_send_buf = send_buf->next; 1243 mad_send_wr->send_wr.ah = send_buf->ah; 1244 1245 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1246 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1247 ret = handle_outgoing_dr_smp(mad_agent_priv, 1248 mad_send_wr); 1249 if (ret < 0) /* error */ 1250 goto error; 1251 else if (ret == 1) /* locally consumed */ 1252 continue; 1253 } 1254 1255 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; 1256 /* Timeout will be updated after send completes */ 1257 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); 1258 mad_send_wr->max_retries = send_buf->retries; 1259 mad_send_wr->retries_left = send_buf->retries; 1260 send_buf->retries = 0; 1261 /* Reference for work request to QP + response */ 1262 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 1263 mad_send_wr->status = IB_WC_SUCCESS; 1264 1265 /* Reference MAD agent until send completes */ 1266 atomic_inc(&mad_agent_priv->refcount); 1267 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1268 list_add_tail(&mad_send_wr->agent_list, 1269 &mad_agent_priv->send_list); 1270 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1271 1272 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1273 ret = ib_send_rmpp_mad(mad_send_wr); 1274 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) 1275 ret = ib_send_mad(mad_send_wr); 1276 } else 1277 ret = ib_send_mad(mad_send_wr); 1278 if (ret < 0) { 1279 /* Fail send request */ 1280 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1281 list_del(&mad_send_wr->agent_list); 1282 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1283 atomic_dec(&mad_agent_priv->refcount); 1284 goto error; 1285 } 1286 } 1287 return 0; 1288 error: 1289 if (bad_send_buf) 1290 *bad_send_buf = send_buf; 1291 return ret; 1292 } 1293 EXPORT_SYMBOL(ib_post_send_mad); 1294 1295 /* 1296 * ib_free_recv_mad - Returns data buffers used to receive 1297 * a MAD to the access layer 1298 */ 1299 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1300 { 1301 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; 1302 struct ib_mad_private_header *mad_priv_hdr; 1303 struct ib_mad_private *priv; 1304 struct list_head free_list; 1305 1306 INIT_LIST_HEAD(&free_list); 1307 list_splice_init(&mad_recv_wc->rmpp_list, &free_list); 1308 1309 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, 1310 &free_list, list) { 1311 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, 1312 recv_buf); 1313 mad_priv_hdr = container_of(mad_recv_wc, 1314 struct ib_mad_private_header, 1315 recv_wc); 1316 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1317 header); 1318 kfree(priv); 1319 } 1320 } 1321 EXPORT_SYMBOL(ib_free_recv_mad); 1322 1323 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, 1324 u8 rmpp_version, 1325 ib_mad_send_handler send_handler, 1326 ib_mad_recv_handler recv_handler, 1327 void *context) 1328 { 1329 return ERR_PTR(-EINVAL); /* XXX: for now */ 1330 } 1331 EXPORT_SYMBOL(ib_redirect_mad_qp); 1332 1333 int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 1334 struct ib_wc *wc) 1335 { 1336 dev_err(&mad_agent->device->dev, 1337 "ib_process_mad_wc() not implemented yet\n"); 1338 return 0; 1339 } 1340 EXPORT_SYMBOL(ib_process_mad_wc); 1341 1342 static int method_in_use(struct ib_mad_mgmt_method_table **method, 1343 struct ib_mad_reg_req *mad_reg_req) 1344 { 1345 int i; 1346 1347 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { 1348 if ((*method)->agent[i]) { 1349 pr_err("Method %d already in use\n", i); 1350 return -EINVAL; 1351 } 1352 } 1353 return 0; 1354 } 1355 1356 static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1357 { 1358 /* Allocate management method table */ 1359 *method = kzalloc(sizeof **method, GFP_ATOMIC); 1360 return (*method) ? 0 : (-ENOMEM); 1361 } 1362 1363 /* 1364 * Check to see if there are any methods still in use 1365 */ 1366 static int check_method_table(struct ib_mad_mgmt_method_table *method) 1367 { 1368 int i; 1369 1370 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) 1371 if (method->agent[i]) 1372 return 1; 1373 return 0; 1374 } 1375 1376 /* 1377 * Check to see if there are any method tables for this class still in use 1378 */ 1379 static int check_class_table(struct ib_mad_mgmt_class_table *class) 1380 { 1381 int i; 1382 1383 for (i = 0; i < MAX_MGMT_CLASS; i++) 1384 if (class->method_table[i]) 1385 return 1; 1386 return 0; 1387 } 1388 1389 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) 1390 { 1391 int i; 1392 1393 for (i = 0; i < MAX_MGMT_OUI; i++) 1394 if (vendor_class->method_table[i]) 1395 return 1; 1396 return 0; 1397 } 1398 1399 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, 1400 const char *oui) 1401 { 1402 int i; 1403 1404 for (i = 0; i < MAX_MGMT_OUI; i++) 1405 /* Is there matching OUI for this vendor class ? */ 1406 if (!memcmp(vendor_class->oui[i], oui, 3)) 1407 return i; 1408 1409 return -1; 1410 } 1411 1412 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) 1413 { 1414 int i; 1415 1416 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) 1417 if (vendor->vendor_class[i]) 1418 return 1; 1419 1420 return 0; 1421 } 1422 1423 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, 1424 struct ib_mad_agent_private *agent) 1425 { 1426 int i; 1427 1428 /* Remove any methods for this mad agent */ 1429 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { 1430 if (method->agent[i] == agent) { 1431 method->agent[i] = NULL; 1432 } 1433 } 1434 } 1435 1436 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1437 struct ib_mad_agent_private *agent_priv, 1438 u8 mgmt_class) 1439 { 1440 struct ib_mad_port_private *port_priv; 1441 struct ib_mad_mgmt_class_table **class; 1442 struct ib_mad_mgmt_method_table **method; 1443 int i, ret; 1444 1445 port_priv = agent_priv->qp_info->port_priv; 1446 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1447 if (!*class) { 1448 /* Allocate management class table for "new" class version */ 1449 *class = kzalloc(sizeof **class, GFP_ATOMIC); 1450 if (!*class) { 1451 ret = -ENOMEM; 1452 goto error1; 1453 } 1454 1455 /* Allocate method table for this management class */ 1456 method = &(*class)->method_table[mgmt_class]; 1457 if ((ret = allocate_method_table(method))) 1458 goto error2; 1459 } else { 1460 method = &(*class)->method_table[mgmt_class]; 1461 if (!*method) { 1462 /* Allocate method table for this management class */ 1463 if ((ret = allocate_method_table(method))) 1464 goto error1; 1465 } 1466 } 1467 1468 /* Now, make sure methods are not already in use */ 1469 if (method_in_use(method, mad_reg_req)) 1470 goto error3; 1471 1472 /* Finally, add in methods being registered */ 1473 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1474 (*method)->agent[i] = agent_priv; 1475 1476 return 0; 1477 1478 error3: 1479 /* Remove any methods for this mad agent */ 1480 remove_methods_mad_agent(*method, agent_priv); 1481 /* Now, check to see if there are any methods in use */ 1482 if (!check_method_table(*method)) { 1483 /* If not, release management method table */ 1484 kfree(*method); 1485 *method = NULL; 1486 } 1487 ret = -EINVAL; 1488 goto error1; 1489 error2: 1490 kfree(*class); 1491 *class = NULL; 1492 error1: 1493 return ret; 1494 } 1495 1496 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1497 struct ib_mad_agent_private *agent_priv) 1498 { 1499 struct ib_mad_port_private *port_priv; 1500 struct ib_mad_mgmt_vendor_class_table **vendor_table; 1501 struct ib_mad_mgmt_vendor_class_table *vendor = NULL; 1502 struct ib_mad_mgmt_vendor_class *vendor_class = NULL; 1503 struct ib_mad_mgmt_method_table **method; 1504 int i, ret = -ENOMEM; 1505 u8 vclass; 1506 1507 /* "New" vendor (with OUI) class */ 1508 vclass = vendor_class_index(mad_reg_req->mgmt_class); 1509 port_priv = agent_priv->qp_info->port_priv; 1510 vendor_table = &port_priv->version[ 1511 mad_reg_req->mgmt_class_version].vendor; 1512 if (!*vendor_table) { 1513 /* Allocate mgmt vendor class table for "new" class version */ 1514 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); 1515 if (!vendor) 1516 goto error1; 1517 1518 *vendor_table = vendor; 1519 } 1520 if (!(*vendor_table)->vendor_class[vclass]) { 1521 /* Allocate table for this management vendor class */ 1522 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); 1523 if (!vendor_class) 1524 goto error2; 1525 1526 (*vendor_table)->vendor_class[vclass] = vendor_class; 1527 } 1528 for (i = 0; i < MAX_MGMT_OUI; i++) { 1529 /* Is there matching OUI for this vendor class ? */ 1530 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], 1531 mad_reg_req->oui, 3)) { 1532 method = &(*vendor_table)->vendor_class[ 1533 vclass]->method_table[i]; 1534 BUG_ON(!*method); 1535 goto check_in_use; 1536 } 1537 } 1538 for (i = 0; i < MAX_MGMT_OUI; i++) { 1539 /* OUI slot available ? */ 1540 if (!is_vendor_oui((*vendor_table)->vendor_class[ 1541 vclass]->oui[i])) { 1542 method = &(*vendor_table)->vendor_class[ 1543 vclass]->method_table[i]; 1544 BUG_ON(*method); 1545 /* Allocate method table for this OUI */ 1546 if ((ret = allocate_method_table(method))) 1547 goto error3; 1548 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], 1549 mad_reg_req->oui, 3); 1550 goto check_in_use; 1551 } 1552 } 1553 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); 1554 goto error3; 1555 1556 check_in_use: 1557 /* Now, make sure methods are not already in use */ 1558 if (method_in_use(method, mad_reg_req)) 1559 goto error4; 1560 1561 /* Finally, add in methods being registered */ 1562 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1563 (*method)->agent[i] = agent_priv; 1564 1565 return 0; 1566 1567 error4: 1568 /* Remove any methods for this mad agent */ 1569 remove_methods_mad_agent(*method, agent_priv); 1570 /* Now, check to see if there are any methods in use */ 1571 if (!check_method_table(*method)) { 1572 /* If not, release management method table */ 1573 kfree(*method); 1574 *method = NULL; 1575 } 1576 ret = -EINVAL; 1577 error3: 1578 if (vendor_class) { 1579 (*vendor_table)->vendor_class[vclass] = NULL; 1580 kfree(vendor_class); 1581 } 1582 error2: 1583 if (vendor) { 1584 *vendor_table = NULL; 1585 kfree(vendor); 1586 } 1587 error1: 1588 return ret; 1589 } 1590 1591 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) 1592 { 1593 struct ib_mad_port_private *port_priv; 1594 struct ib_mad_mgmt_class_table *class; 1595 struct ib_mad_mgmt_method_table *method; 1596 struct ib_mad_mgmt_vendor_class_table *vendor; 1597 struct ib_mad_mgmt_vendor_class *vendor_class; 1598 int index; 1599 u8 mgmt_class; 1600 1601 /* 1602 * Was MAD registration request supplied 1603 * with original registration ? 1604 */ 1605 if (!agent_priv->reg_req) { 1606 goto out; 1607 } 1608 1609 port_priv = agent_priv->qp_info->port_priv; 1610 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); 1611 class = port_priv->version[ 1612 agent_priv->reg_req->mgmt_class_version].class; 1613 if (!class) 1614 goto vendor_check; 1615 1616 method = class->method_table[mgmt_class]; 1617 if (method) { 1618 /* Remove any methods for this mad agent */ 1619 remove_methods_mad_agent(method, agent_priv); 1620 /* Now, check to see if there are any methods still in use */ 1621 if (!check_method_table(method)) { 1622 /* If not, release management method table */ 1623 kfree(method); 1624 class->method_table[mgmt_class] = NULL; 1625 /* Any management classes left ? */ 1626 if (!check_class_table(class)) { 1627 /* If not, release management class table */ 1628 kfree(class); 1629 port_priv->version[ 1630 agent_priv->reg_req-> 1631 mgmt_class_version].class = NULL; 1632 } 1633 } 1634 } 1635 1636 vendor_check: 1637 if (!is_vendor_class(mgmt_class)) 1638 goto out; 1639 1640 /* normalize mgmt_class to vendor range 2 */ 1641 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); 1642 vendor = port_priv->version[ 1643 agent_priv->reg_req->mgmt_class_version].vendor; 1644 1645 if (!vendor) 1646 goto out; 1647 1648 vendor_class = vendor->vendor_class[mgmt_class]; 1649 if (vendor_class) { 1650 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); 1651 if (index < 0) 1652 goto out; 1653 method = vendor_class->method_table[index]; 1654 if (method) { 1655 /* Remove any methods for this mad agent */ 1656 remove_methods_mad_agent(method, agent_priv); 1657 /* 1658 * Now, check to see if there are 1659 * any methods still in use 1660 */ 1661 if (!check_method_table(method)) { 1662 /* If not, release management method table */ 1663 kfree(method); 1664 vendor_class->method_table[index] = NULL; 1665 memset(vendor_class->oui[index], 0, 3); 1666 /* Any OUIs left ? */ 1667 if (!check_vendor_class(vendor_class)) { 1668 /* If not, release vendor class table */ 1669 kfree(vendor_class); 1670 vendor->vendor_class[mgmt_class] = NULL; 1671 /* Any other vendor classes left ? */ 1672 if (!check_vendor_table(vendor)) { 1673 kfree(vendor); 1674 port_priv->version[ 1675 agent_priv->reg_req-> 1676 mgmt_class_version]. 1677 vendor = NULL; 1678 } 1679 } 1680 } 1681 } 1682 } 1683 1684 out: 1685 return; 1686 } 1687 1688 static struct ib_mad_agent_private * 1689 find_mad_agent(struct ib_mad_port_private *port_priv, 1690 const struct ib_mad_hdr *mad_hdr) 1691 { 1692 struct ib_mad_agent_private *mad_agent = NULL; 1693 unsigned long flags; 1694 1695 spin_lock_irqsave(&port_priv->reg_lock, flags); 1696 if (ib_response_mad(mad_hdr)) { 1697 u32 hi_tid; 1698 struct ib_mad_agent_private *entry; 1699 1700 /* 1701 * Routing is based on high 32 bits of transaction ID 1702 * of MAD. 1703 */ 1704 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; 1705 list_for_each_entry(entry, &port_priv->agent_list, agent_list) { 1706 if (entry->agent.hi_tid == hi_tid) { 1707 mad_agent = entry; 1708 break; 1709 } 1710 } 1711 } else { 1712 struct ib_mad_mgmt_class_table *class; 1713 struct ib_mad_mgmt_method_table *method; 1714 struct ib_mad_mgmt_vendor_class_table *vendor; 1715 struct ib_mad_mgmt_vendor_class *vendor_class; 1716 const struct ib_vendor_mad *vendor_mad; 1717 int index; 1718 1719 /* 1720 * Routing is based on version, class, and method 1721 * For "newer" vendor MADs, also based on OUI 1722 */ 1723 if (mad_hdr->class_version >= MAX_MGMT_VERSION) 1724 goto out; 1725 if (!is_vendor_class(mad_hdr->mgmt_class)) { 1726 class = port_priv->version[ 1727 mad_hdr->class_version].class; 1728 if (!class) 1729 goto out; 1730 if (convert_mgmt_class(mad_hdr->mgmt_class) >= 1731 ARRAY_SIZE(class->method_table)) 1732 goto out; 1733 method = class->method_table[convert_mgmt_class( 1734 mad_hdr->mgmt_class)]; 1735 if (method) 1736 mad_agent = method->agent[mad_hdr->method & 1737 ~IB_MGMT_METHOD_RESP]; 1738 } else { 1739 vendor = port_priv->version[ 1740 mad_hdr->class_version].vendor; 1741 if (!vendor) 1742 goto out; 1743 vendor_class = vendor->vendor_class[vendor_class_index( 1744 mad_hdr->mgmt_class)]; 1745 if (!vendor_class) 1746 goto out; 1747 /* Find matching OUI */ 1748 vendor_mad = (const struct ib_vendor_mad *)mad_hdr; 1749 index = find_vendor_oui(vendor_class, vendor_mad->oui); 1750 if (index == -1) 1751 goto out; 1752 method = vendor_class->method_table[index]; 1753 if (method) { 1754 mad_agent = method->agent[mad_hdr->method & 1755 ~IB_MGMT_METHOD_RESP]; 1756 } 1757 } 1758 } 1759 1760 if (mad_agent) { 1761 if (mad_agent->agent.recv_handler) 1762 atomic_inc(&mad_agent->refcount); 1763 else { 1764 dev_notice(&port_priv->device->dev, 1765 "No receive handler for client %p on port %d\n", 1766 &mad_agent->agent, port_priv->port_num); 1767 mad_agent = NULL; 1768 } 1769 } 1770 out: 1771 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 1772 1773 return mad_agent; 1774 } 1775 1776 static int validate_mad(const struct ib_mad_hdr *mad_hdr, 1777 const struct ib_mad_qp_info *qp_info, 1778 bool opa) 1779 { 1780 int valid = 0; 1781 u32 qp_num = qp_info->qp->qp_num; 1782 1783 /* Make sure MAD base version is understood */ 1784 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && 1785 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { 1786 pr_err("MAD received with unsupported base version %d %s\n", 1787 mad_hdr->base_version, opa ? "(opa)" : ""); 1788 goto out; 1789 } 1790 1791 /* Filter SMI packets sent to other than QP0 */ 1792 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || 1793 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 1794 if (qp_num == 0) 1795 valid = 1; 1796 } else { 1797 /* CM attributes other than ClassPortInfo only use Send method */ 1798 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && 1799 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && 1800 (mad_hdr->method != IB_MGMT_METHOD_SEND)) 1801 goto out; 1802 /* Filter GSI packets sent to QP0 */ 1803 if (qp_num != 0) 1804 valid = 1; 1805 } 1806 1807 out: 1808 return valid; 1809 } 1810 1811 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, 1812 const struct ib_mad_hdr *mad_hdr) 1813 { 1814 struct ib_rmpp_mad *rmpp_mad; 1815 1816 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; 1817 return !mad_agent_priv->agent.rmpp_version || 1818 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || 1819 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 1820 IB_MGMT_RMPP_FLAG_ACTIVE) || 1821 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1822 } 1823 1824 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, 1825 const struct ib_mad_recv_wc *rwc) 1826 { 1827 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == 1828 rwc->recv_buf.mad->mad_hdr.mgmt_class; 1829 } 1830 1831 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, 1832 const struct ib_mad_send_wr_private *wr, 1833 const struct ib_mad_recv_wc *rwc ) 1834 { 1835 struct ib_ah_attr attr; 1836 u8 send_resp, rcv_resp; 1837 union ib_gid sgid; 1838 struct ib_device *device = mad_agent_priv->agent.device; 1839 u8 port_num = mad_agent_priv->agent.port_num; 1840 u8 lmc; 1841 1842 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); 1843 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); 1844 1845 if (send_resp == rcv_resp) 1846 /* both requests, or both responses. GIDs different */ 1847 return 0; 1848 1849 if (ib_query_ah(wr->send_buf.ah, &attr)) 1850 /* Assume not equal, to avoid false positives. */ 1851 return 0; 1852 1853 if (!!(attr.ah_flags & IB_AH_GRH) != 1854 !!(rwc->wc->wc_flags & IB_WC_GRH)) 1855 /* one has GID, other does not. Assume different */ 1856 return 0; 1857 1858 if (!send_resp && rcv_resp) { 1859 /* is request/response. */ 1860 if (!(attr.ah_flags & IB_AH_GRH)) { 1861 if (ib_get_cached_lmc(device, port_num, &lmc)) 1862 return 0; 1863 return (!lmc || !((attr.src_path_bits ^ 1864 rwc->wc->dlid_path_bits) & 1865 ((1 << lmc) - 1))); 1866 } else { 1867 if (ib_get_cached_gid(device, port_num, 1868 attr.grh.sgid_index, &sgid, NULL)) 1869 return 0; 1870 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 1871 16); 1872 } 1873 } 1874 1875 if (!(attr.ah_flags & IB_AH_GRH)) 1876 return attr.dlid == rwc->wc->slid; 1877 else 1878 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw, 1879 16); 1880 } 1881 1882 static inline int is_direct(u8 class) 1883 { 1884 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); 1885 } 1886 1887 struct ib_mad_send_wr_private* 1888 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, 1889 const struct ib_mad_recv_wc *wc) 1890 { 1891 struct ib_mad_send_wr_private *wr; 1892 const struct ib_mad_hdr *mad_hdr; 1893 1894 mad_hdr = &wc->recv_buf.mad->mad_hdr; 1895 1896 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { 1897 if ((wr->tid == mad_hdr->tid) && 1898 rcv_has_same_class(wr, wc) && 1899 /* 1900 * Don't check GID for direct routed MADs. 1901 * These might have permissive LIDs. 1902 */ 1903 (is_direct(mad_hdr->mgmt_class) || 1904 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1905 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1906 } 1907 1908 /* 1909 * It's possible to receive the response before we've 1910 * been notified that the send has completed 1911 */ 1912 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { 1913 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && 1914 wr->tid == mad_hdr->tid && 1915 wr->timeout && 1916 rcv_has_same_class(wr, wc) && 1917 /* 1918 * Don't check GID for direct routed MADs. 1919 * These might have permissive LIDs. 1920 */ 1921 (is_direct(mad_hdr->mgmt_class) || 1922 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1923 /* Verify request has not been canceled */ 1924 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1925 } 1926 return NULL; 1927 } 1928 1929 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) 1930 { 1931 mad_send_wr->timeout = 0; 1932 if (mad_send_wr->refcount == 1) 1933 list_move_tail(&mad_send_wr->agent_list, 1934 &mad_send_wr->mad_agent_priv->done_list); 1935 } 1936 1937 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1938 struct ib_mad_recv_wc *mad_recv_wc) 1939 { 1940 struct ib_mad_send_wr_private *mad_send_wr; 1941 struct ib_mad_send_wc mad_send_wc; 1942 unsigned long flags; 1943 1944 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1945 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1946 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 1947 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 1948 mad_recv_wc); 1949 if (!mad_recv_wc) { 1950 deref_mad_agent(mad_agent_priv); 1951 return; 1952 } 1953 } 1954 1955 /* Complete corresponding request */ 1956 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { 1957 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1958 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 1959 if (!mad_send_wr) { 1960 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1961 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) 1962 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) 1963 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) 1964 & IB_MGMT_RMPP_FLAG_ACTIVE)) { 1965 /* user rmpp is in effect 1966 * and this is an active RMPP MAD 1967 */ 1968 mad_agent_priv->agent.recv_handler( 1969 &mad_agent_priv->agent, NULL, 1970 mad_recv_wc); 1971 atomic_dec(&mad_agent_priv->refcount); 1972 } else { 1973 /* not user rmpp, revert to normal behavior and 1974 * drop the mad */ 1975 ib_free_recv_mad(mad_recv_wc); 1976 deref_mad_agent(mad_agent_priv); 1977 return; 1978 } 1979 } else { 1980 ib_mark_mad_done(mad_send_wr); 1981 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1982 1983 /* Defined behavior is to complete response before request */ 1984 mad_agent_priv->agent.recv_handler( 1985 &mad_agent_priv->agent, 1986 &mad_send_wr->send_buf, 1987 mad_recv_wc); 1988 atomic_dec(&mad_agent_priv->refcount); 1989 1990 mad_send_wc.status = IB_WC_SUCCESS; 1991 mad_send_wc.vendor_err = 0; 1992 mad_send_wc.send_buf = &mad_send_wr->send_buf; 1993 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 1994 } 1995 } else { 1996 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, 1997 mad_recv_wc); 1998 deref_mad_agent(mad_agent_priv); 1999 } 2000 } 2001 2002 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, 2003 const struct ib_mad_qp_info *qp_info, 2004 const struct ib_wc *wc, 2005 int port_num, 2006 struct ib_mad_private *recv, 2007 struct ib_mad_private *response) 2008 { 2009 enum smi_forward_action retsmi; 2010 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2011 2012 if (smi_handle_dr_smp_recv(smp, 2013 rdma_cap_ib_switch(port_priv->device), 2014 port_num, 2015 port_priv->device->phys_port_cnt) == 2016 IB_SMI_DISCARD) 2017 return IB_SMI_DISCARD; 2018 2019 retsmi = smi_check_forward_dr_smp(smp); 2020 if (retsmi == IB_SMI_LOCAL) 2021 return IB_SMI_HANDLE; 2022 2023 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2024 if (smi_handle_dr_smp_send(smp, 2025 rdma_cap_ib_switch(port_priv->device), 2026 port_num) == IB_SMI_DISCARD) 2027 return IB_SMI_DISCARD; 2028 2029 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2030 return IB_SMI_DISCARD; 2031 } else if (rdma_cap_ib_switch(port_priv->device)) { 2032 /* forward case for switches */ 2033 memcpy(response, recv, mad_priv_size(response)); 2034 response->header.recv_wc.wc = &response->header.wc; 2035 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2036 response->header.recv_wc.recv_buf.grh = &response->grh; 2037 2038 agent_send_response((const struct ib_mad_hdr *)response->mad, 2039 &response->grh, wc, 2040 port_priv->device, 2041 smi_get_fwd_port(smp), 2042 qp_info->qp->qp_num, 2043 response->mad_size, 2044 false); 2045 2046 return IB_SMI_DISCARD; 2047 } 2048 return IB_SMI_HANDLE; 2049 } 2050 2051 static bool generate_unmatched_resp(const struct ib_mad_private *recv, 2052 struct ib_mad_private *response, 2053 size_t *resp_len, bool opa) 2054 { 2055 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; 2056 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; 2057 2058 if (recv_hdr->method == IB_MGMT_METHOD_GET || 2059 recv_hdr->method == IB_MGMT_METHOD_SET) { 2060 memcpy(response, recv, mad_priv_size(response)); 2061 response->header.recv_wc.wc = &response->header.wc; 2062 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; 2063 response->header.recv_wc.recv_buf.grh = &response->grh; 2064 resp_hdr->method = IB_MGMT_METHOD_GET_RESP; 2065 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 2066 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2067 resp_hdr->status |= IB_SMP_DIRECTION; 2068 2069 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { 2070 if (recv_hdr->mgmt_class == 2071 IB_MGMT_CLASS_SUBN_LID_ROUTED || 2072 recv_hdr->mgmt_class == 2073 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 2074 *resp_len = opa_get_smp_header_size( 2075 (struct opa_smp *)recv->mad); 2076 else 2077 *resp_len = sizeof(struct ib_mad_hdr); 2078 } 2079 2080 return true; 2081 } else { 2082 return false; 2083 } 2084 } 2085 2086 static enum smi_action 2087 handle_opa_smi(struct ib_mad_port_private *port_priv, 2088 struct ib_mad_qp_info *qp_info, 2089 struct ib_wc *wc, 2090 int port_num, 2091 struct ib_mad_private *recv, 2092 struct ib_mad_private *response) 2093 { 2094 enum smi_forward_action retsmi; 2095 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2096 2097 if (opa_smi_handle_dr_smp_recv(smp, 2098 rdma_cap_ib_switch(port_priv->device), 2099 port_num, 2100 port_priv->device->phys_port_cnt) == 2101 IB_SMI_DISCARD) 2102 return IB_SMI_DISCARD; 2103 2104 retsmi = opa_smi_check_forward_dr_smp(smp); 2105 if (retsmi == IB_SMI_LOCAL) 2106 return IB_SMI_HANDLE; 2107 2108 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2109 if (opa_smi_handle_dr_smp_send(smp, 2110 rdma_cap_ib_switch(port_priv->device), 2111 port_num) == IB_SMI_DISCARD) 2112 return IB_SMI_DISCARD; 2113 2114 if (opa_smi_check_local_smp(smp, port_priv->device) == 2115 IB_SMI_DISCARD) 2116 return IB_SMI_DISCARD; 2117 2118 } else if (rdma_cap_ib_switch(port_priv->device)) { 2119 /* forward case for switches */ 2120 memcpy(response, recv, mad_priv_size(response)); 2121 response->header.recv_wc.wc = &response->header.wc; 2122 response->header.recv_wc.recv_buf.opa_mad = 2123 (struct opa_mad *)response->mad; 2124 response->header.recv_wc.recv_buf.grh = &response->grh; 2125 2126 agent_send_response((const struct ib_mad_hdr *)response->mad, 2127 &response->grh, wc, 2128 port_priv->device, 2129 opa_smi_get_fwd_port(smp), 2130 qp_info->qp->qp_num, 2131 recv->header.wc.byte_len, 2132 true); 2133 2134 return IB_SMI_DISCARD; 2135 } 2136 2137 return IB_SMI_HANDLE; 2138 } 2139 2140 static enum smi_action 2141 handle_smi(struct ib_mad_port_private *port_priv, 2142 struct ib_mad_qp_info *qp_info, 2143 struct ib_wc *wc, 2144 int port_num, 2145 struct ib_mad_private *recv, 2146 struct ib_mad_private *response, 2147 bool opa) 2148 { 2149 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; 2150 2151 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && 2152 mad_hdr->class_version == OPA_SM_CLASS_VERSION) 2153 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, 2154 response); 2155 2156 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); 2157 } 2158 2159 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) 2160 { 2161 struct ib_mad_port_private *port_priv = cq->cq_context; 2162 struct ib_mad_list_head *mad_list = 2163 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2164 struct ib_mad_qp_info *qp_info; 2165 struct ib_mad_private_header *mad_priv_hdr; 2166 struct ib_mad_private *recv, *response = NULL; 2167 struct ib_mad_agent_private *mad_agent; 2168 int port_num; 2169 int ret = IB_MAD_RESULT_SUCCESS; 2170 size_t mad_size; 2171 u16 resp_mad_pkey_index = 0; 2172 bool opa; 2173 2174 if (list_empty_careful(&port_priv->port_list)) 2175 return; 2176 2177 if (wc->status != IB_WC_SUCCESS) { 2178 /* 2179 * Receive errors indicate that the QP has entered the error 2180 * state - error handling/shutdown code will cleanup 2181 */ 2182 return; 2183 } 2184 2185 qp_info = mad_list->mad_queue->qp_info; 2186 dequeue_mad(mad_list); 2187 2188 opa = rdma_cap_opa_mad(qp_info->port_priv->device, 2189 qp_info->port_priv->port_num); 2190 2191 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 2192 mad_list); 2193 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 2194 ib_dma_unmap_single(port_priv->device, 2195 recv->header.mapping, 2196 mad_priv_dma_size(recv), 2197 DMA_FROM_DEVICE); 2198 2199 /* Setup MAD receive work completion from "normal" work completion */ 2200 recv->header.wc = *wc; 2201 recv->header.recv_wc.wc = &recv->header.wc; 2202 2203 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { 2204 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); 2205 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2206 } else { 2207 recv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2208 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2209 } 2210 2211 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; 2212 recv->header.recv_wc.recv_buf.grh = &recv->grh; 2213 2214 if (atomic_read(&qp_info->snoop_count)) 2215 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); 2216 2217 /* Validate MAD */ 2218 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) 2219 goto out; 2220 2221 mad_size = recv->mad_size; 2222 response = alloc_mad_private(mad_size, GFP_KERNEL); 2223 if (!response) 2224 goto out; 2225 2226 if (rdma_cap_ib_switch(port_priv->device)) 2227 port_num = wc->port_num; 2228 else 2229 port_num = port_priv->port_num; 2230 2231 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == 2232 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 2233 if (handle_smi(port_priv, qp_info, wc, port_num, recv, 2234 response, opa) 2235 == IB_SMI_DISCARD) 2236 goto out; 2237 } 2238 2239 /* Give driver "right of first refusal" on incoming MAD */ 2240 if (port_priv->device->process_mad) { 2241 ret = port_priv->device->process_mad(port_priv->device, 0, 2242 port_priv->port_num, 2243 wc, &recv->grh, 2244 (const struct ib_mad_hdr *)recv->mad, 2245 recv->mad_size, 2246 (struct ib_mad_hdr *)response->mad, 2247 &mad_size, &resp_mad_pkey_index); 2248 2249 if (opa) 2250 wc->pkey_index = resp_mad_pkey_index; 2251 2252 if (ret & IB_MAD_RESULT_SUCCESS) { 2253 if (ret & IB_MAD_RESULT_CONSUMED) 2254 goto out; 2255 if (ret & IB_MAD_RESULT_REPLY) { 2256 agent_send_response((const struct ib_mad_hdr *)response->mad, 2257 &recv->grh, wc, 2258 port_priv->device, 2259 port_num, 2260 qp_info->qp->qp_num, 2261 mad_size, opa); 2262 goto out; 2263 } 2264 } 2265 } 2266 2267 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); 2268 if (mad_agent) { 2269 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); 2270 /* 2271 * recv is freed up in error cases in ib_mad_complete_recv 2272 * or via recv_handler in ib_mad_complete_recv() 2273 */ 2274 recv = NULL; 2275 } else if ((ret & IB_MAD_RESULT_SUCCESS) && 2276 generate_unmatched_resp(recv, response, &mad_size, opa)) { 2277 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, 2278 port_priv->device, port_num, 2279 qp_info->qp->qp_num, mad_size, opa); 2280 } 2281 2282 out: 2283 /* Post another receive request for this QP */ 2284 if (response) { 2285 ib_mad_post_receive_mads(qp_info, response); 2286 kfree(recv); 2287 } else 2288 ib_mad_post_receive_mads(qp_info, recv); 2289 } 2290 2291 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) 2292 { 2293 struct ib_mad_send_wr_private *mad_send_wr; 2294 unsigned long delay; 2295 2296 if (list_empty(&mad_agent_priv->wait_list)) { 2297 cancel_delayed_work(&mad_agent_priv->timed_work); 2298 } else { 2299 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2300 struct ib_mad_send_wr_private, 2301 agent_list); 2302 2303 if (time_after(mad_agent_priv->timeout, 2304 mad_send_wr->timeout)) { 2305 mad_agent_priv->timeout = mad_send_wr->timeout; 2306 delay = mad_send_wr->timeout - jiffies; 2307 if ((long)delay <= 0) 2308 delay = 1; 2309 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2310 &mad_agent_priv->timed_work, delay); 2311 } 2312 } 2313 } 2314 2315 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) 2316 { 2317 struct ib_mad_agent_private *mad_agent_priv; 2318 struct ib_mad_send_wr_private *temp_mad_send_wr; 2319 struct list_head *list_item; 2320 unsigned long delay; 2321 2322 mad_agent_priv = mad_send_wr->mad_agent_priv; 2323 list_del(&mad_send_wr->agent_list); 2324 2325 delay = mad_send_wr->timeout; 2326 mad_send_wr->timeout += jiffies; 2327 2328 if (delay) { 2329 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { 2330 temp_mad_send_wr = list_entry(list_item, 2331 struct ib_mad_send_wr_private, 2332 agent_list); 2333 if (time_after(mad_send_wr->timeout, 2334 temp_mad_send_wr->timeout)) 2335 break; 2336 } 2337 } 2338 else 2339 list_item = &mad_agent_priv->wait_list; 2340 list_add(&mad_send_wr->agent_list, list_item); 2341 2342 /* Reschedule a work item if we have a shorter timeout */ 2343 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) 2344 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2345 &mad_agent_priv->timed_work, delay); 2346 } 2347 2348 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2349 int timeout_ms) 2350 { 2351 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2352 wait_for_response(mad_send_wr); 2353 } 2354 2355 /* 2356 * Process a send work completion 2357 */ 2358 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 2359 struct ib_mad_send_wc *mad_send_wc) 2360 { 2361 struct ib_mad_agent_private *mad_agent_priv; 2362 unsigned long flags; 2363 int ret; 2364 2365 mad_agent_priv = mad_send_wr->mad_agent_priv; 2366 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2367 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { 2368 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); 2369 if (ret == IB_RMPP_RESULT_CONSUMED) 2370 goto done; 2371 } else 2372 ret = IB_RMPP_RESULT_UNHANDLED; 2373 2374 if (mad_send_wc->status != IB_WC_SUCCESS && 2375 mad_send_wr->status == IB_WC_SUCCESS) { 2376 mad_send_wr->status = mad_send_wc->status; 2377 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2378 } 2379 2380 if (--mad_send_wr->refcount > 0) { 2381 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && 2382 mad_send_wr->status == IB_WC_SUCCESS) { 2383 wait_for_response(mad_send_wr); 2384 } 2385 goto done; 2386 } 2387 2388 /* Remove send from MAD agent and notify client of completion */ 2389 list_del(&mad_send_wr->agent_list); 2390 adjust_timeout(mad_agent_priv); 2391 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2392 2393 if (mad_send_wr->status != IB_WC_SUCCESS ) 2394 mad_send_wc->status = mad_send_wr->status; 2395 if (ret == IB_RMPP_RESULT_INTERNAL) 2396 ib_rmpp_send_handler(mad_send_wc); 2397 else 2398 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2399 mad_send_wc); 2400 2401 /* Release reference on agent taken when sending */ 2402 deref_mad_agent(mad_agent_priv); 2403 return; 2404 done: 2405 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2406 } 2407 2408 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) 2409 { 2410 struct ib_mad_port_private *port_priv = cq->cq_context; 2411 struct ib_mad_list_head *mad_list = 2412 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2413 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; 2414 struct ib_mad_qp_info *qp_info; 2415 struct ib_mad_queue *send_queue; 2416 struct ib_send_wr *bad_send_wr; 2417 struct ib_mad_send_wc mad_send_wc; 2418 unsigned long flags; 2419 int ret; 2420 2421 if (list_empty_careful(&port_priv->port_list)) 2422 return; 2423 2424 if (wc->status != IB_WC_SUCCESS) { 2425 if (!ib_mad_send_error(port_priv, wc)) 2426 return; 2427 } 2428 2429 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2430 mad_list); 2431 send_queue = mad_list->mad_queue; 2432 qp_info = send_queue->qp_info; 2433 2434 retry: 2435 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2436 mad_send_wr->header_mapping, 2437 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2438 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2439 mad_send_wr->payload_mapping, 2440 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2441 queued_send_wr = NULL; 2442 spin_lock_irqsave(&send_queue->lock, flags); 2443 list_del(&mad_list->list); 2444 2445 /* Move queued send to the send queue */ 2446 if (send_queue->count-- > send_queue->max_active) { 2447 mad_list = container_of(qp_info->overflow_list.next, 2448 struct ib_mad_list_head, list); 2449 queued_send_wr = container_of(mad_list, 2450 struct ib_mad_send_wr_private, 2451 mad_list); 2452 list_move_tail(&mad_list->list, &send_queue->list); 2453 } 2454 spin_unlock_irqrestore(&send_queue->lock, flags); 2455 2456 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2457 mad_send_wc.status = wc->status; 2458 mad_send_wc.vendor_err = wc->vendor_err; 2459 if (atomic_read(&qp_info->snoop_count)) 2460 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, 2461 IB_MAD_SNOOP_SEND_COMPLETIONS); 2462 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2463 2464 if (queued_send_wr) { 2465 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, 2466 &bad_send_wr); 2467 if (ret) { 2468 dev_err(&port_priv->device->dev, 2469 "ib_post_send failed: %d\n", ret); 2470 mad_send_wr = queued_send_wr; 2471 wc->status = IB_WC_LOC_QP_OP_ERR; 2472 goto retry; 2473 } 2474 } 2475 } 2476 2477 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) 2478 { 2479 struct ib_mad_send_wr_private *mad_send_wr; 2480 struct ib_mad_list_head *mad_list; 2481 unsigned long flags; 2482 2483 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 2484 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { 2485 mad_send_wr = container_of(mad_list, 2486 struct ib_mad_send_wr_private, 2487 mad_list); 2488 mad_send_wr->retry = 1; 2489 } 2490 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 2491 } 2492 2493 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, 2494 struct ib_wc *wc) 2495 { 2496 struct ib_mad_list_head *mad_list = 2497 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); 2498 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; 2499 struct ib_mad_send_wr_private *mad_send_wr; 2500 int ret; 2501 2502 /* 2503 * Send errors will transition the QP to SQE - move 2504 * QP to RTS and repost flushed work requests 2505 */ 2506 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2507 mad_list); 2508 if (wc->status == IB_WC_WR_FLUSH_ERR) { 2509 if (mad_send_wr->retry) { 2510 /* Repost send */ 2511 struct ib_send_wr *bad_send_wr; 2512 2513 mad_send_wr->retry = 0; 2514 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, 2515 &bad_send_wr); 2516 if (!ret) 2517 return false; 2518 } 2519 } else { 2520 struct ib_qp_attr *attr; 2521 2522 /* Transition QP to RTS and fail offending send */ 2523 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2524 if (attr) { 2525 attr->qp_state = IB_QPS_RTS; 2526 attr->cur_qp_state = IB_QPS_SQE; 2527 ret = ib_modify_qp(qp_info->qp, attr, 2528 IB_QP_STATE | IB_QP_CUR_STATE); 2529 kfree(attr); 2530 if (ret) 2531 dev_err(&port_priv->device->dev, 2532 "%s - ib_modify_qp to RTS: %d\n", 2533 __func__, ret); 2534 else 2535 mark_sends_for_retry(qp_info); 2536 } 2537 } 2538 2539 return true; 2540 } 2541 2542 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) 2543 { 2544 unsigned long flags; 2545 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; 2546 struct ib_mad_send_wc mad_send_wc; 2547 struct list_head cancel_list; 2548 2549 INIT_LIST_HEAD(&cancel_list); 2550 2551 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2552 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2553 &mad_agent_priv->send_list, agent_list) { 2554 if (mad_send_wr->status == IB_WC_SUCCESS) { 2555 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2556 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2557 } 2558 } 2559 2560 /* Empty wait list to prevent receives from finding a request */ 2561 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); 2562 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2563 2564 /* Report all cancelled requests */ 2565 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 2566 mad_send_wc.vendor_err = 0; 2567 2568 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2569 &cancel_list, agent_list) { 2570 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2571 list_del(&mad_send_wr->agent_list); 2572 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2573 &mad_send_wc); 2574 atomic_dec(&mad_agent_priv->refcount); 2575 } 2576 } 2577 2578 static struct ib_mad_send_wr_private* 2579 find_send_wr(struct ib_mad_agent_private *mad_agent_priv, 2580 struct ib_mad_send_buf *send_buf) 2581 { 2582 struct ib_mad_send_wr_private *mad_send_wr; 2583 2584 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2585 agent_list) { 2586 if (&mad_send_wr->send_buf == send_buf) 2587 return mad_send_wr; 2588 } 2589 2590 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2591 agent_list) { 2592 if (is_rmpp_data_mad(mad_agent_priv, 2593 mad_send_wr->send_buf.mad) && 2594 &mad_send_wr->send_buf == send_buf) 2595 return mad_send_wr; 2596 } 2597 return NULL; 2598 } 2599 2600 int ib_modify_mad(struct ib_mad_agent *mad_agent, 2601 struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2602 { 2603 struct ib_mad_agent_private *mad_agent_priv; 2604 struct ib_mad_send_wr_private *mad_send_wr; 2605 unsigned long flags; 2606 int active; 2607 2608 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2609 agent); 2610 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2611 mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2612 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2613 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2614 return -EINVAL; 2615 } 2616 2617 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); 2618 if (!timeout_ms) { 2619 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2620 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2621 } 2622 2623 mad_send_wr->send_buf.timeout_ms = timeout_ms; 2624 if (active) 2625 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2626 else 2627 ib_reset_mad_timeout(mad_send_wr, timeout_ms); 2628 2629 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2630 return 0; 2631 } 2632 EXPORT_SYMBOL(ib_modify_mad); 2633 2634 void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2635 struct ib_mad_send_buf *send_buf) 2636 { 2637 ib_modify_mad(mad_agent, send_buf, 0); 2638 } 2639 EXPORT_SYMBOL(ib_cancel_mad); 2640 2641 static void local_completions(struct work_struct *work) 2642 { 2643 struct ib_mad_agent_private *mad_agent_priv; 2644 struct ib_mad_local_private *local; 2645 struct ib_mad_agent_private *recv_mad_agent; 2646 unsigned long flags; 2647 int free_mad; 2648 struct ib_wc wc; 2649 struct ib_mad_send_wc mad_send_wc; 2650 bool opa; 2651 2652 mad_agent_priv = 2653 container_of(work, struct ib_mad_agent_private, local_work); 2654 2655 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 2656 mad_agent_priv->qp_info->port_priv->port_num); 2657 2658 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2659 while (!list_empty(&mad_agent_priv->local_list)) { 2660 local = list_entry(mad_agent_priv->local_list.next, 2661 struct ib_mad_local_private, 2662 completion_list); 2663 list_del(&local->completion_list); 2664 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2665 free_mad = 0; 2666 if (local->mad_priv) { 2667 u8 base_version; 2668 recv_mad_agent = local->recv_mad_agent; 2669 if (!recv_mad_agent) { 2670 dev_err(&mad_agent_priv->agent.device->dev, 2671 "No receive MAD agent for local completion\n"); 2672 free_mad = 1; 2673 goto local_send_completion; 2674 } 2675 2676 /* 2677 * Defined behavior is to complete response 2678 * before request 2679 */ 2680 build_smp_wc(recv_mad_agent->agent.qp, 2681 local->mad_send_wr->send_wr.wr.wr_cqe, 2682 be16_to_cpu(IB_LID_PERMISSIVE), 2683 local->mad_send_wr->send_wr.pkey_index, 2684 recv_mad_agent->agent.port_num, &wc); 2685 2686 local->mad_priv->header.recv_wc.wc = &wc; 2687 2688 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; 2689 if (opa && base_version == OPA_MGMT_BASE_VERSION) { 2690 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; 2691 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); 2692 } else { 2693 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); 2694 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); 2695 } 2696 2697 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); 2698 list_add(&local->mad_priv->header.recv_wc.recv_buf.list, 2699 &local->mad_priv->header.recv_wc.rmpp_list); 2700 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2701 local->mad_priv->header.recv_wc.recv_buf.mad = 2702 (struct ib_mad *)local->mad_priv->mad; 2703 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) 2704 snoop_recv(recv_mad_agent->qp_info, 2705 &local->mad_priv->header.recv_wc, 2706 IB_MAD_SNOOP_RECVS); 2707 recv_mad_agent->agent.recv_handler( 2708 &recv_mad_agent->agent, 2709 &local->mad_send_wr->send_buf, 2710 &local->mad_priv->header.recv_wc); 2711 spin_lock_irqsave(&recv_mad_agent->lock, flags); 2712 atomic_dec(&recv_mad_agent->refcount); 2713 spin_unlock_irqrestore(&recv_mad_agent->lock, flags); 2714 } 2715 2716 local_send_completion: 2717 /* Complete send */ 2718 mad_send_wc.status = IB_WC_SUCCESS; 2719 mad_send_wc.vendor_err = 0; 2720 mad_send_wc.send_buf = &local->mad_send_wr->send_buf; 2721 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2722 snoop_send(mad_agent_priv->qp_info, 2723 &local->mad_send_wr->send_buf, 2724 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); 2725 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2726 &mad_send_wc); 2727 2728 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2729 atomic_dec(&mad_agent_priv->refcount); 2730 if (free_mad) 2731 kfree(local->mad_priv); 2732 kfree(local); 2733 } 2734 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2735 } 2736 2737 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) 2738 { 2739 int ret; 2740 2741 if (!mad_send_wr->retries_left) 2742 return -ETIMEDOUT; 2743 2744 mad_send_wr->retries_left--; 2745 mad_send_wr->send_buf.retries++; 2746 2747 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 2748 2749 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { 2750 ret = ib_retry_rmpp(mad_send_wr); 2751 switch (ret) { 2752 case IB_RMPP_RESULT_UNHANDLED: 2753 ret = ib_send_mad(mad_send_wr); 2754 break; 2755 case IB_RMPP_RESULT_CONSUMED: 2756 ret = 0; 2757 break; 2758 default: 2759 ret = -ECOMM; 2760 break; 2761 } 2762 } else 2763 ret = ib_send_mad(mad_send_wr); 2764 2765 if (!ret) { 2766 mad_send_wr->refcount++; 2767 list_add_tail(&mad_send_wr->agent_list, 2768 &mad_send_wr->mad_agent_priv->send_list); 2769 } 2770 return ret; 2771 } 2772 2773 static void timeout_sends(struct work_struct *work) 2774 { 2775 struct ib_mad_agent_private *mad_agent_priv; 2776 struct ib_mad_send_wr_private *mad_send_wr; 2777 struct ib_mad_send_wc mad_send_wc; 2778 unsigned long flags, delay; 2779 2780 mad_agent_priv = container_of(work, struct ib_mad_agent_private, 2781 timed_work.work); 2782 mad_send_wc.vendor_err = 0; 2783 2784 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2785 while (!list_empty(&mad_agent_priv->wait_list)) { 2786 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2787 struct ib_mad_send_wr_private, 2788 agent_list); 2789 2790 if (time_after(mad_send_wr->timeout, jiffies)) { 2791 delay = mad_send_wr->timeout - jiffies; 2792 if ((long)delay <= 0) 2793 delay = 1; 2794 queue_delayed_work(mad_agent_priv->qp_info-> 2795 port_priv->wq, 2796 &mad_agent_priv->timed_work, delay); 2797 break; 2798 } 2799 2800 list_del(&mad_send_wr->agent_list); 2801 if (mad_send_wr->status == IB_WC_SUCCESS && 2802 !retry_send(mad_send_wr)) 2803 continue; 2804 2805 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2806 2807 if (mad_send_wr->status == IB_WC_SUCCESS) 2808 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2809 else 2810 mad_send_wc.status = mad_send_wr->status; 2811 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2812 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2813 &mad_send_wc); 2814 2815 atomic_dec(&mad_agent_priv->refcount); 2816 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2817 } 2818 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2819 } 2820 2821 /* 2822 * Allocate receive MADs and post receive WRs for them 2823 */ 2824 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 2825 struct ib_mad_private *mad) 2826 { 2827 unsigned long flags; 2828 int post, ret; 2829 struct ib_mad_private *mad_priv; 2830 struct ib_sge sg_list; 2831 struct ib_recv_wr recv_wr, *bad_recv_wr; 2832 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; 2833 2834 /* Initialize common scatter list fields */ 2835 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; 2836 2837 /* Initialize common receive WR fields */ 2838 recv_wr.next = NULL; 2839 recv_wr.sg_list = &sg_list; 2840 recv_wr.num_sge = 1; 2841 2842 do { 2843 /* Allocate and map receive buffer */ 2844 if (mad) { 2845 mad_priv = mad; 2846 mad = NULL; 2847 } else { 2848 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), 2849 GFP_ATOMIC); 2850 if (!mad_priv) { 2851 ret = -ENOMEM; 2852 break; 2853 } 2854 } 2855 sg_list.length = mad_priv_dma_size(mad_priv); 2856 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, 2857 &mad_priv->grh, 2858 mad_priv_dma_size(mad_priv), 2859 DMA_FROM_DEVICE); 2860 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2861 sg_list.addr))) { 2862 ret = -ENOMEM; 2863 break; 2864 } 2865 mad_priv->header.mapping = sg_list.addr; 2866 mad_priv->header.mad_list.mad_queue = recv_queue; 2867 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; 2868 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; 2869 2870 /* Post receive WR */ 2871 spin_lock_irqsave(&recv_queue->lock, flags); 2872 post = (++recv_queue->count < recv_queue->max_active); 2873 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); 2874 spin_unlock_irqrestore(&recv_queue->lock, flags); 2875 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); 2876 if (ret) { 2877 spin_lock_irqsave(&recv_queue->lock, flags); 2878 list_del(&mad_priv->header.mad_list.list); 2879 recv_queue->count--; 2880 spin_unlock_irqrestore(&recv_queue->lock, flags); 2881 ib_dma_unmap_single(qp_info->port_priv->device, 2882 mad_priv->header.mapping, 2883 mad_priv_dma_size(mad_priv), 2884 DMA_FROM_DEVICE); 2885 kfree(mad_priv); 2886 dev_err(&qp_info->port_priv->device->dev, 2887 "ib_post_recv failed: %d\n", ret); 2888 break; 2889 } 2890 } while (post); 2891 2892 return ret; 2893 } 2894 2895 /* 2896 * Return all the posted receive MADs 2897 */ 2898 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) 2899 { 2900 struct ib_mad_private_header *mad_priv_hdr; 2901 struct ib_mad_private *recv; 2902 struct ib_mad_list_head *mad_list; 2903 2904 if (!qp_info->qp) 2905 return; 2906 2907 while (!list_empty(&qp_info->recv_queue.list)) { 2908 2909 mad_list = list_entry(qp_info->recv_queue.list.next, 2910 struct ib_mad_list_head, list); 2911 mad_priv_hdr = container_of(mad_list, 2912 struct ib_mad_private_header, 2913 mad_list); 2914 recv = container_of(mad_priv_hdr, struct ib_mad_private, 2915 header); 2916 2917 /* Remove from posted receive MAD list */ 2918 list_del(&mad_list->list); 2919 2920 ib_dma_unmap_single(qp_info->port_priv->device, 2921 recv->header.mapping, 2922 mad_priv_dma_size(recv), 2923 DMA_FROM_DEVICE); 2924 kfree(recv); 2925 } 2926 2927 qp_info->recv_queue.count = 0; 2928 } 2929 2930 /* 2931 * Start the port 2932 */ 2933 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) 2934 { 2935 int ret, i; 2936 struct ib_qp_attr *attr; 2937 struct ib_qp *qp; 2938 u16 pkey_index; 2939 2940 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2941 if (!attr) 2942 return -ENOMEM; 2943 2944 ret = ib_find_pkey(port_priv->device, port_priv->port_num, 2945 IB_DEFAULT_PKEY_FULL, &pkey_index); 2946 if (ret) 2947 pkey_index = 0; 2948 2949 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2950 qp = port_priv->qp_info[i].qp; 2951 if (!qp) 2952 continue; 2953 2954 /* 2955 * PKey index for QP1 is irrelevant but 2956 * one is needed for the Reset to Init transition 2957 */ 2958 attr->qp_state = IB_QPS_INIT; 2959 attr->pkey_index = pkey_index; 2960 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 2961 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 2962 IB_QP_PKEY_INDEX | IB_QP_QKEY); 2963 if (ret) { 2964 dev_err(&port_priv->device->dev, 2965 "Couldn't change QP%d state to INIT: %d\n", 2966 i, ret); 2967 goto out; 2968 } 2969 2970 attr->qp_state = IB_QPS_RTR; 2971 ret = ib_modify_qp(qp, attr, IB_QP_STATE); 2972 if (ret) { 2973 dev_err(&port_priv->device->dev, 2974 "Couldn't change QP%d state to RTR: %d\n", 2975 i, ret); 2976 goto out; 2977 } 2978 2979 attr->qp_state = IB_QPS_RTS; 2980 attr->sq_psn = IB_MAD_SEND_Q_PSN; 2981 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); 2982 if (ret) { 2983 dev_err(&port_priv->device->dev, 2984 "Couldn't change QP%d state to RTS: %d\n", 2985 i, ret); 2986 goto out; 2987 } 2988 } 2989 2990 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 2991 if (ret) { 2992 dev_err(&port_priv->device->dev, 2993 "Failed to request completion notification: %d\n", 2994 ret); 2995 goto out; 2996 } 2997 2998 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2999 if (!port_priv->qp_info[i].qp) 3000 continue; 3001 3002 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); 3003 if (ret) { 3004 dev_err(&port_priv->device->dev, 3005 "Couldn't post receive WRs\n"); 3006 goto out; 3007 } 3008 } 3009 out: 3010 kfree(attr); 3011 return ret; 3012 } 3013 3014 static void qp_event_handler(struct ib_event *event, void *qp_context) 3015 { 3016 struct ib_mad_qp_info *qp_info = qp_context; 3017 3018 /* It's worse than that! He's dead, Jim! */ 3019 dev_err(&qp_info->port_priv->device->dev, 3020 "Fatal error (%d) on MAD QP (%d)\n", 3021 event->event, qp_info->qp->qp_num); 3022 } 3023 3024 static void init_mad_queue(struct ib_mad_qp_info *qp_info, 3025 struct ib_mad_queue *mad_queue) 3026 { 3027 mad_queue->qp_info = qp_info; 3028 mad_queue->count = 0; 3029 spin_lock_init(&mad_queue->lock); 3030 INIT_LIST_HEAD(&mad_queue->list); 3031 } 3032 3033 static void init_mad_qp(struct ib_mad_port_private *port_priv, 3034 struct ib_mad_qp_info *qp_info) 3035 { 3036 qp_info->port_priv = port_priv; 3037 init_mad_queue(qp_info, &qp_info->send_queue); 3038 init_mad_queue(qp_info, &qp_info->recv_queue); 3039 INIT_LIST_HEAD(&qp_info->overflow_list); 3040 spin_lock_init(&qp_info->snoop_lock); 3041 qp_info->snoop_table = NULL; 3042 qp_info->snoop_table_size = 0; 3043 atomic_set(&qp_info->snoop_count, 0); 3044 } 3045 3046 static int create_mad_qp(struct ib_mad_qp_info *qp_info, 3047 enum ib_qp_type qp_type) 3048 { 3049 struct ib_qp_init_attr qp_init_attr; 3050 int ret; 3051 3052 memset(&qp_init_attr, 0, sizeof qp_init_attr); 3053 qp_init_attr.send_cq = qp_info->port_priv->cq; 3054 qp_init_attr.recv_cq = qp_info->port_priv->cq; 3055 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 3056 qp_init_attr.cap.max_send_wr = mad_sendq_size; 3057 qp_init_attr.cap.max_recv_wr = mad_recvq_size; 3058 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 3059 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 3060 qp_init_attr.qp_type = qp_type; 3061 qp_init_attr.port_num = qp_info->port_priv->port_num; 3062 qp_init_attr.qp_context = qp_info; 3063 qp_init_attr.event_handler = qp_event_handler; 3064 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); 3065 if (IS_ERR(qp_info->qp)) { 3066 dev_err(&qp_info->port_priv->device->dev, 3067 "Couldn't create ib_mad QP%d\n", 3068 get_spl_qp_index(qp_type)); 3069 ret = PTR_ERR(qp_info->qp); 3070 goto error; 3071 } 3072 /* Use minimum queue sizes unless the CQ is resized */ 3073 qp_info->send_queue.max_active = mad_sendq_size; 3074 qp_info->recv_queue.max_active = mad_recvq_size; 3075 return 0; 3076 3077 error: 3078 return ret; 3079 } 3080 3081 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) 3082 { 3083 if (!qp_info->qp) 3084 return; 3085 3086 ib_destroy_qp(qp_info->qp); 3087 kfree(qp_info->snoop_table); 3088 } 3089 3090 /* 3091 * Open the port 3092 * Create the QP, PD, MR, and CQ if needed 3093 */ 3094 static int ib_mad_port_open(struct ib_device *device, 3095 int port_num) 3096 { 3097 int ret, cq_size; 3098 struct ib_mad_port_private *port_priv; 3099 unsigned long flags; 3100 char name[sizeof "ib_mad123"]; 3101 int has_smi; 3102 3103 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) 3104 return -EFAULT; 3105 3106 if (WARN_ON(rdma_cap_opa_mad(device, port_num) && 3107 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) 3108 return -EFAULT; 3109 3110 /* Create new device info */ 3111 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 3112 if (!port_priv) 3113 return -ENOMEM; 3114 3115 port_priv->device = device; 3116 port_priv->port_num = port_num; 3117 spin_lock_init(&port_priv->reg_lock); 3118 INIT_LIST_HEAD(&port_priv->agent_list); 3119 init_mad_qp(port_priv, &port_priv->qp_info[0]); 3120 init_mad_qp(port_priv, &port_priv->qp_info[1]); 3121 3122 cq_size = mad_sendq_size + mad_recvq_size; 3123 has_smi = rdma_cap_ib_smi(device, port_num); 3124 if (has_smi) 3125 cq_size *= 2; 3126 3127 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, 3128 IB_POLL_WORKQUEUE); 3129 if (IS_ERR(port_priv->cq)) { 3130 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); 3131 ret = PTR_ERR(port_priv->cq); 3132 goto error3; 3133 } 3134 3135 port_priv->pd = ib_alloc_pd(device, 0); 3136 if (IS_ERR(port_priv->pd)) { 3137 dev_err(&device->dev, "Couldn't create ib_mad PD\n"); 3138 ret = PTR_ERR(port_priv->pd); 3139 goto error4; 3140 } 3141 3142 if (has_smi) { 3143 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); 3144 if (ret) 3145 goto error6; 3146 } 3147 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); 3148 if (ret) 3149 goto error7; 3150 3151 snprintf(name, sizeof name, "ib_mad%d", port_num); 3152 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 3153 if (!port_priv->wq) { 3154 ret = -ENOMEM; 3155 goto error8; 3156 } 3157 3158 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3159 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 3160 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3161 3162 ret = ib_mad_port_start(port_priv); 3163 if (ret) { 3164 dev_err(&device->dev, "Couldn't start port\n"); 3165 goto error9; 3166 } 3167 3168 return 0; 3169 3170 error9: 3171 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3172 list_del_init(&port_priv->port_list); 3173 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3174 3175 destroy_workqueue(port_priv->wq); 3176 error8: 3177 destroy_mad_qp(&port_priv->qp_info[1]); 3178 error7: 3179 destroy_mad_qp(&port_priv->qp_info[0]); 3180 error6: 3181 ib_dealloc_pd(port_priv->pd); 3182 error4: 3183 ib_free_cq(port_priv->cq); 3184 cleanup_recv_queue(&port_priv->qp_info[1]); 3185 cleanup_recv_queue(&port_priv->qp_info[0]); 3186 error3: 3187 kfree(port_priv); 3188 3189 return ret; 3190 } 3191 3192 /* 3193 * Close the port 3194 * If there are no classes using the port, free the port 3195 * resources (CQ, MR, PD, QP) and remove the port's info structure 3196 */ 3197 static int ib_mad_port_close(struct ib_device *device, int port_num) 3198 { 3199 struct ib_mad_port_private *port_priv; 3200 unsigned long flags; 3201 3202 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 3203 port_priv = __ib_get_mad_port(device, port_num); 3204 if (port_priv == NULL) { 3205 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3206 dev_err(&device->dev, "Port %d not found\n", port_num); 3207 return -ENODEV; 3208 } 3209 list_del_init(&port_priv->port_list); 3210 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 3211 3212 destroy_workqueue(port_priv->wq); 3213 destroy_mad_qp(&port_priv->qp_info[1]); 3214 destroy_mad_qp(&port_priv->qp_info[0]); 3215 ib_dealloc_pd(port_priv->pd); 3216 ib_free_cq(port_priv->cq); 3217 cleanup_recv_queue(&port_priv->qp_info[1]); 3218 cleanup_recv_queue(&port_priv->qp_info[0]); 3219 /* XXX: Handle deallocation of MAD registration tables */ 3220 3221 kfree(port_priv); 3222 3223 return 0; 3224 } 3225 3226 static void ib_mad_init_device(struct ib_device *device) 3227 { 3228 int start, i; 3229 3230 start = rdma_start_port(device); 3231 3232 for (i = start; i <= rdma_end_port(device); i++) { 3233 if (!rdma_cap_ib_mad(device, i)) 3234 continue; 3235 3236 if (ib_mad_port_open(device, i)) { 3237 dev_err(&device->dev, "Couldn't open port %d\n", i); 3238 goto error; 3239 } 3240 if (ib_agent_port_open(device, i)) { 3241 dev_err(&device->dev, 3242 "Couldn't open port %d for agents\n", i); 3243 goto error_agent; 3244 } 3245 } 3246 return; 3247 3248 error_agent: 3249 if (ib_mad_port_close(device, i)) 3250 dev_err(&device->dev, "Couldn't close port %d\n", i); 3251 3252 error: 3253 while (--i >= start) { 3254 if (!rdma_cap_ib_mad(device, i)) 3255 continue; 3256 3257 if (ib_agent_port_close(device, i)) 3258 dev_err(&device->dev, 3259 "Couldn't close port %d for agents\n", i); 3260 if (ib_mad_port_close(device, i)) 3261 dev_err(&device->dev, "Couldn't close port %d\n", i); 3262 } 3263 } 3264 3265 static void ib_mad_remove_device(struct ib_device *device, void *client_data) 3266 { 3267 int i; 3268 3269 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { 3270 if (!rdma_cap_ib_mad(device, i)) 3271 continue; 3272 3273 if (ib_agent_port_close(device, i)) 3274 dev_err(&device->dev, 3275 "Couldn't close port %d for agents\n", i); 3276 if (ib_mad_port_close(device, i)) 3277 dev_err(&device->dev, "Couldn't close port %d\n", i); 3278 } 3279 } 3280 3281 static struct ib_client mad_client = { 3282 .name = "mad", 3283 .add = ib_mad_init_device, 3284 .remove = ib_mad_remove_device 3285 }; 3286 3287 int ib_mad_init(void) 3288 { 3289 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); 3290 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); 3291 3292 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); 3293 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); 3294 3295 INIT_LIST_HEAD(&ib_mad_port_list); 3296 3297 if (ib_register_client(&mad_client)) { 3298 pr_err("Couldn't register ib_mad client\n"); 3299 return -EINVAL; 3300 } 3301 3302 return 0; 3303 } 3304 3305 void ib_mad_cleanup(void) 3306 { 3307 ib_unregister_client(&mad_client); 3308 } 3309