1 /* 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2009 HNR Consulting. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 * 35 */ 36 #include <linux/dma-mapping.h> 37 #include <linux/slab.h> 38 #include <linux/module.h> 39 #include <rdma/ib_cache.h> 40 41 #include "mad_priv.h" 42 #include "mad_rmpp.h" 43 #include "smi.h" 44 #include "agent.h" 45 46 MODULE_LICENSE("Dual BSD/GPL"); 47 MODULE_DESCRIPTION("kernel IB MAD API"); 48 MODULE_AUTHOR("Hal Rosenstock"); 49 MODULE_AUTHOR("Sean Hefty"); 50 51 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 52 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 53 54 module_param_named(send_queue_size, mad_sendq_size, int, 0444); 55 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); 56 module_param_named(recv_queue_size, mad_recvq_size, int, 0444); 57 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); 58 59 static struct kmem_cache *ib_mad_cache; 60 61 static struct list_head ib_mad_port_list; 62 static u32 ib_mad_client_id = 0; 63 64 /* Port list lock */ 65 static DEFINE_SPINLOCK(ib_mad_port_list_lock); 66 67 /* Forward declarations */ 68 static int method_in_use(struct ib_mad_mgmt_method_table **method, 69 struct ib_mad_reg_req *mad_reg_req); 70 static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 71 static struct ib_mad_agent_private *find_mad_agent( 72 struct ib_mad_port_private *port_priv, 73 struct ib_mad *mad); 74 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 75 struct ib_mad_private *mad); 76 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 77 static void timeout_sends(struct work_struct *work); 78 static void local_completions(struct work_struct *work); 79 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 80 struct ib_mad_agent_private *agent_priv, 81 u8 mgmt_class); 82 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 83 struct ib_mad_agent_private *agent_priv); 84 85 /* 86 * Returns a ib_mad_port_private structure or NULL for a device/port 87 * Assumes ib_mad_port_list_lock is being held 88 */ 89 static inline struct ib_mad_port_private * 90 __ib_get_mad_port(struct ib_device *device, int port_num) 91 { 92 struct ib_mad_port_private *entry; 93 94 list_for_each_entry(entry, &ib_mad_port_list, port_list) { 95 if (entry->device == device && entry->port_num == port_num) 96 return entry; 97 } 98 return NULL; 99 } 100 101 /* 102 * Wrapper function to return a ib_mad_port_private structure or NULL 103 * for a device/port 104 */ 105 static inline struct ib_mad_port_private * 106 ib_get_mad_port(struct ib_device *device, int port_num) 107 { 108 struct ib_mad_port_private *entry; 109 unsigned long flags; 110 111 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 112 entry = __ib_get_mad_port(device, port_num); 113 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 114 115 return entry; 116 } 117 118 static inline u8 convert_mgmt_class(u8 mgmt_class) 119 { 120 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */ 121 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ? 122 0 : mgmt_class; 123 } 124 125 static int get_spl_qp_index(enum ib_qp_type qp_type) 126 { 127 switch (qp_type) 128 { 129 case IB_QPT_SMI: 130 return 0; 131 case IB_QPT_GSI: 132 return 1; 133 default: 134 return -1; 135 } 136 } 137 138 static int vendor_class_index(u8 mgmt_class) 139 { 140 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; 141 } 142 143 static int is_vendor_class(u8 mgmt_class) 144 { 145 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) || 146 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END)) 147 return 0; 148 return 1; 149 } 150 151 static int is_vendor_oui(char *oui) 152 { 153 if (oui[0] || oui[1] || oui[2]) 154 return 1; 155 return 0; 156 } 157 158 static int is_vendor_method_in_use( 159 struct ib_mad_mgmt_vendor_class *vendor_class, 160 struct ib_mad_reg_req *mad_reg_req) 161 { 162 struct ib_mad_mgmt_method_table *method; 163 int i; 164 165 for (i = 0; i < MAX_MGMT_OUI; i++) { 166 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { 167 method = vendor_class->method_table[i]; 168 if (method) { 169 if (method_in_use(&method, mad_reg_req)) 170 return 1; 171 else 172 break; 173 } 174 } 175 } 176 return 0; 177 } 178 179 int ib_response_mad(struct ib_mad *mad) 180 { 181 return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) || 182 (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || 183 ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) && 184 (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP))); 185 } 186 EXPORT_SYMBOL(ib_response_mad); 187 188 /* 189 * ib_register_mad_agent - Register to send/receive MADs 190 */ 191 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, 192 u8 port_num, 193 enum ib_qp_type qp_type, 194 struct ib_mad_reg_req *mad_reg_req, 195 u8 rmpp_version, 196 ib_mad_send_handler send_handler, 197 ib_mad_recv_handler recv_handler, 198 void *context) 199 { 200 struct ib_mad_port_private *port_priv; 201 struct ib_mad_agent *ret = ERR_PTR(-EINVAL); 202 struct ib_mad_agent_private *mad_agent_priv; 203 struct ib_mad_reg_req *reg_req = NULL; 204 struct ib_mad_mgmt_class_table *class; 205 struct ib_mad_mgmt_vendor_class_table *vendor; 206 struct ib_mad_mgmt_vendor_class *vendor_class; 207 struct ib_mad_mgmt_method_table *method; 208 int ret2, qpn; 209 unsigned long flags; 210 u8 mgmt_class, vclass; 211 212 /* Validate parameters */ 213 qpn = get_spl_qp_index(qp_type); 214 if (qpn == -1) 215 goto error1; 216 217 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) 218 goto error1; 219 220 /* Validate MAD registration request if supplied */ 221 if (mad_reg_req) { 222 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) 223 goto error1; 224 if (!recv_handler) 225 goto error1; 226 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { 227 /* 228 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only 229 * one in this range currently allowed 230 */ 231 if (mad_reg_req->mgmt_class != 232 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 233 goto error1; 234 } else if (mad_reg_req->mgmt_class == 0) { 235 /* 236 * Class 0 is reserved in IBA and is used for 237 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 238 */ 239 goto error1; 240 } else if (is_vendor_class(mad_reg_req->mgmt_class)) { 241 /* 242 * If class is in "new" vendor range, 243 * ensure supplied OUI is not zero 244 */ 245 if (!is_vendor_oui(mad_reg_req->oui)) 246 goto error1; 247 } 248 /* Make sure class supplied is consistent with RMPP */ 249 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 250 if (rmpp_version) 251 goto error1; 252 } 253 /* Make sure class supplied is consistent with QP type */ 254 if (qp_type == IB_QPT_SMI) { 255 if ((mad_reg_req->mgmt_class != 256 IB_MGMT_CLASS_SUBN_LID_ROUTED) && 257 (mad_reg_req->mgmt_class != 258 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) 259 goto error1; 260 } else { 261 if ((mad_reg_req->mgmt_class == 262 IB_MGMT_CLASS_SUBN_LID_ROUTED) || 263 (mad_reg_req->mgmt_class == 264 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) 265 goto error1; 266 } 267 } else { 268 /* No registration request supplied */ 269 if (!send_handler) 270 goto error1; 271 } 272 273 /* Validate device and port */ 274 port_priv = ib_get_mad_port(device, port_num); 275 if (!port_priv) { 276 ret = ERR_PTR(-ENODEV); 277 goto error1; 278 } 279 280 /* Verify the QP requested is supported. For example, Ethernet devices 281 * will not have QP0 */ 282 if (!port_priv->qp_info[qpn].qp) { 283 ret = ERR_PTR(-EPROTONOSUPPORT); 284 goto error1; 285 } 286 287 /* Allocate structures */ 288 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); 289 if (!mad_agent_priv) { 290 ret = ERR_PTR(-ENOMEM); 291 goto error1; 292 } 293 294 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, 295 IB_ACCESS_LOCAL_WRITE); 296 if (IS_ERR(mad_agent_priv->agent.mr)) { 297 ret = ERR_PTR(-ENOMEM); 298 goto error2; 299 } 300 301 if (mad_reg_req) { 302 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); 303 if (!reg_req) { 304 ret = ERR_PTR(-ENOMEM); 305 goto error3; 306 } 307 } 308 309 /* Now, fill in the various structures */ 310 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 311 mad_agent_priv->reg_req = reg_req; 312 mad_agent_priv->agent.rmpp_version = rmpp_version; 313 mad_agent_priv->agent.device = device; 314 mad_agent_priv->agent.recv_handler = recv_handler; 315 mad_agent_priv->agent.send_handler = send_handler; 316 mad_agent_priv->agent.context = context; 317 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; 318 mad_agent_priv->agent.port_num = port_num; 319 spin_lock_init(&mad_agent_priv->lock); 320 INIT_LIST_HEAD(&mad_agent_priv->send_list); 321 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 322 INIT_LIST_HEAD(&mad_agent_priv->done_list); 323 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); 324 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); 325 INIT_LIST_HEAD(&mad_agent_priv->local_list); 326 INIT_WORK(&mad_agent_priv->local_work, local_completions); 327 atomic_set(&mad_agent_priv->refcount, 1); 328 init_completion(&mad_agent_priv->comp); 329 330 spin_lock_irqsave(&port_priv->reg_lock, flags); 331 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; 332 333 /* 334 * Make sure MAD registration (if supplied) 335 * is non overlapping with any existing ones 336 */ 337 if (mad_reg_req) { 338 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); 339 if (!is_vendor_class(mgmt_class)) { 340 class = port_priv->version[mad_reg_req-> 341 mgmt_class_version].class; 342 if (class) { 343 method = class->method_table[mgmt_class]; 344 if (method) { 345 if (method_in_use(&method, 346 mad_reg_req)) 347 goto error4; 348 } 349 } 350 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, 351 mgmt_class); 352 } else { 353 /* "New" vendor class range */ 354 vendor = port_priv->version[mad_reg_req-> 355 mgmt_class_version].vendor; 356 if (vendor) { 357 vclass = vendor_class_index(mgmt_class); 358 vendor_class = vendor->vendor_class[vclass]; 359 if (vendor_class) { 360 if (is_vendor_method_in_use( 361 vendor_class, 362 mad_reg_req)) 363 goto error4; 364 } 365 } 366 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); 367 } 368 if (ret2) { 369 ret = ERR_PTR(ret2); 370 goto error4; 371 } 372 } 373 374 /* Add mad agent into port's agent list */ 375 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); 376 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 377 378 return &mad_agent_priv->agent; 379 380 error4: 381 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 382 kfree(reg_req); 383 error3: 384 ib_dereg_mr(mad_agent_priv->agent.mr); 385 error2: 386 kfree(mad_agent_priv); 387 error1: 388 return ret; 389 } 390 EXPORT_SYMBOL(ib_register_mad_agent); 391 392 static inline int is_snooping_sends(int mad_snoop_flags) 393 { 394 return (mad_snoop_flags & 395 (/*IB_MAD_SNOOP_POSTED_SENDS | 396 IB_MAD_SNOOP_RMPP_SENDS |*/ 397 IB_MAD_SNOOP_SEND_COMPLETIONS /*| 398 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/)); 399 } 400 401 static inline int is_snooping_recvs(int mad_snoop_flags) 402 { 403 return (mad_snoop_flags & 404 (IB_MAD_SNOOP_RECVS /*| 405 IB_MAD_SNOOP_RMPP_RECVS*/)); 406 } 407 408 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, 409 struct ib_mad_snoop_private *mad_snoop_priv) 410 { 411 struct ib_mad_snoop_private **new_snoop_table; 412 unsigned long flags; 413 int i; 414 415 spin_lock_irqsave(&qp_info->snoop_lock, flags); 416 /* Check for empty slot in array. */ 417 for (i = 0; i < qp_info->snoop_table_size; i++) 418 if (!qp_info->snoop_table[i]) 419 break; 420 421 if (i == qp_info->snoop_table_size) { 422 /* Grow table. */ 423 new_snoop_table = krealloc(qp_info->snoop_table, 424 sizeof mad_snoop_priv * 425 (qp_info->snoop_table_size + 1), 426 GFP_ATOMIC); 427 if (!new_snoop_table) { 428 i = -ENOMEM; 429 goto out; 430 } 431 432 qp_info->snoop_table = new_snoop_table; 433 qp_info->snoop_table_size++; 434 } 435 qp_info->snoop_table[i] = mad_snoop_priv; 436 atomic_inc(&qp_info->snoop_count); 437 out: 438 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 439 return i; 440 } 441 442 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, 443 u8 port_num, 444 enum ib_qp_type qp_type, 445 int mad_snoop_flags, 446 ib_mad_snoop_handler snoop_handler, 447 ib_mad_recv_handler recv_handler, 448 void *context) 449 { 450 struct ib_mad_port_private *port_priv; 451 struct ib_mad_agent *ret; 452 struct ib_mad_snoop_private *mad_snoop_priv; 453 int qpn; 454 455 /* Validate parameters */ 456 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || 457 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) { 458 ret = ERR_PTR(-EINVAL); 459 goto error1; 460 } 461 qpn = get_spl_qp_index(qp_type); 462 if (qpn == -1) { 463 ret = ERR_PTR(-EINVAL); 464 goto error1; 465 } 466 port_priv = ib_get_mad_port(device, port_num); 467 if (!port_priv) { 468 ret = ERR_PTR(-ENODEV); 469 goto error1; 470 } 471 /* Allocate structures */ 472 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL); 473 if (!mad_snoop_priv) { 474 ret = ERR_PTR(-ENOMEM); 475 goto error1; 476 } 477 478 /* Now, fill in the various structures */ 479 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; 480 mad_snoop_priv->agent.device = device; 481 mad_snoop_priv->agent.recv_handler = recv_handler; 482 mad_snoop_priv->agent.snoop_handler = snoop_handler; 483 mad_snoop_priv->agent.context = context; 484 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 485 mad_snoop_priv->agent.port_num = port_num; 486 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 487 init_completion(&mad_snoop_priv->comp); 488 mad_snoop_priv->snoop_index = register_snoop_agent( 489 &port_priv->qp_info[qpn], 490 mad_snoop_priv); 491 if (mad_snoop_priv->snoop_index < 0) { 492 ret = ERR_PTR(mad_snoop_priv->snoop_index); 493 goto error2; 494 } 495 496 atomic_set(&mad_snoop_priv->refcount, 1); 497 return &mad_snoop_priv->agent; 498 499 error2: 500 kfree(mad_snoop_priv); 501 error1: 502 return ret; 503 } 504 EXPORT_SYMBOL(ib_register_mad_snoop); 505 506 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 507 { 508 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 509 complete(&mad_agent_priv->comp); 510 } 511 512 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) 513 { 514 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 515 complete(&mad_snoop_priv->comp); 516 } 517 518 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 519 { 520 struct ib_mad_port_private *port_priv; 521 unsigned long flags; 522 523 /* Note that we could still be handling received MADs */ 524 525 /* 526 * Canceling all sends results in dropping received response 527 * MADs, preventing us from queuing additional work 528 */ 529 cancel_mads(mad_agent_priv); 530 port_priv = mad_agent_priv->qp_info->port_priv; 531 cancel_delayed_work(&mad_agent_priv->timed_work); 532 533 spin_lock_irqsave(&port_priv->reg_lock, flags); 534 remove_mad_reg_req(mad_agent_priv); 535 list_del(&mad_agent_priv->agent_list); 536 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 537 538 flush_workqueue(port_priv->wq); 539 ib_cancel_rmpp_recvs(mad_agent_priv); 540 541 deref_mad_agent(mad_agent_priv); 542 wait_for_completion(&mad_agent_priv->comp); 543 544 kfree(mad_agent_priv->reg_req); 545 ib_dereg_mr(mad_agent_priv->agent.mr); 546 kfree(mad_agent_priv); 547 } 548 549 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) 550 { 551 struct ib_mad_qp_info *qp_info; 552 unsigned long flags; 553 554 qp_info = mad_snoop_priv->qp_info; 555 spin_lock_irqsave(&qp_info->snoop_lock, flags); 556 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; 557 atomic_dec(&qp_info->snoop_count); 558 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 559 560 deref_snoop_agent(mad_snoop_priv); 561 wait_for_completion(&mad_snoop_priv->comp); 562 563 kfree(mad_snoop_priv); 564 } 565 566 /* 567 * ib_unregister_mad_agent - Unregisters a client from using MAD services 568 */ 569 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent) 570 { 571 struct ib_mad_agent_private *mad_agent_priv; 572 struct ib_mad_snoop_private *mad_snoop_priv; 573 574 /* If the TID is zero, the agent can only snoop. */ 575 if (mad_agent->hi_tid) { 576 mad_agent_priv = container_of(mad_agent, 577 struct ib_mad_agent_private, 578 agent); 579 unregister_mad_agent(mad_agent_priv); 580 } else { 581 mad_snoop_priv = container_of(mad_agent, 582 struct ib_mad_snoop_private, 583 agent); 584 unregister_mad_snoop(mad_snoop_priv); 585 } 586 return 0; 587 } 588 EXPORT_SYMBOL(ib_unregister_mad_agent); 589 590 static void dequeue_mad(struct ib_mad_list_head *mad_list) 591 { 592 struct ib_mad_queue *mad_queue; 593 unsigned long flags; 594 595 BUG_ON(!mad_list->mad_queue); 596 mad_queue = mad_list->mad_queue; 597 spin_lock_irqsave(&mad_queue->lock, flags); 598 list_del(&mad_list->list); 599 mad_queue->count--; 600 spin_unlock_irqrestore(&mad_queue->lock, flags); 601 } 602 603 static void snoop_send(struct ib_mad_qp_info *qp_info, 604 struct ib_mad_send_buf *send_buf, 605 struct ib_mad_send_wc *mad_send_wc, 606 int mad_snoop_flags) 607 { 608 struct ib_mad_snoop_private *mad_snoop_priv; 609 unsigned long flags; 610 int i; 611 612 spin_lock_irqsave(&qp_info->snoop_lock, flags); 613 for (i = 0; i < qp_info->snoop_table_size; i++) { 614 mad_snoop_priv = qp_info->snoop_table[i]; 615 if (!mad_snoop_priv || 616 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 617 continue; 618 619 atomic_inc(&mad_snoop_priv->refcount); 620 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 621 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 622 send_buf, mad_send_wc); 623 deref_snoop_agent(mad_snoop_priv); 624 spin_lock_irqsave(&qp_info->snoop_lock, flags); 625 } 626 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 627 } 628 629 static void snoop_recv(struct ib_mad_qp_info *qp_info, 630 struct ib_mad_recv_wc *mad_recv_wc, 631 int mad_snoop_flags) 632 { 633 struct ib_mad_snoop_private *mad_snoop_priv; 634 unsigned long flags; 635 int i; 636 637 spin_lock_irqsave(&qp_info->snoop_lock, flags); 638 for (i = 0; i < qp_info->snoop_table_size; i++) { 639 mad_snoop_priv = qp_info->snoop_table[i]; 640 if (!mad_snoop_priv || 641 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) 642 continue; 643 644 atomic_inc(&mad_snoop_priv->refcount); 645 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 646 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, 647 mad_recv_wc); 648 deref_snoop_agent(mad_snoop_priv); 649 spin_lock_irqsave(&qp_info->snoop_lock, flags); 650 } 651 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 652 } 653 654 static void build_smp_wc(struct ib_qp *qp, 655 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, 656 struct ib_wc *wc) 657 { 658 memset(wc, 0, sizeof *wc); 659 wc->wr_id = wr_id; 660 wc->status = IB_WC_SUCCESS; 661 wc->opcode = IB_WC_RECV; 662 wc->pkey_index = pkey_index; 663 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 664 wc->src_qp = IB_QP0; 665 wc->qp = qp; 666 wc->slid = slid; 667 wc->sl = 0; 668 wc->dlid_path_bits = 0; 669 wc->port_num = port_num; 670 } 671 672 /* 673 * Return 0 if SMP is to be sent 674 * Return 1 if SMP was consumed locally (whether or not solicited) 675 * Return < 0 if error 676 */ 677 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 678 struct ib_mad_send_wr_private *mad_send_wr) 679 { 680 int ret = 0; 681 struct ib_smp *smp = mad_send_wr->send_buf.mad; 682 unsigned long flags; 683 struct ib_mad_local_private *local; 684 struct ib_mad_private *mad_priv; 685 struct ib_mad_port_private *port_priv; 686 struct ib_mad_agent_private *recv_mad_agent = NULL; 687 struct ib_device *device = mad_agent_priv->agent.device; 688 u8 port_num; 689 struct ib_wc mad_wc; 690 struct ib_send_wr *send_wr = &mad_send_wr->send_wr; 691 692 if (device->node_type == RDMA_NODE_IB_SWITCH && 693 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 694 port_num = send_wr->wr.ud.port_num; 695 else 696 port_num = mad_agent_priv->agent.port_num; 697 698 /* 699 * Directed route handling starts if the initial LID routed part of 700 * a request or the ending LID routed part of a response is empty. 701 * If we are at the start of the LID routed part, don't update the 702 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. 703 */ 704 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 705 IB_LID_PERMISSIVE && 706 smi_handle_dr_smp_send(smp, device->node_type, port_num) == 707 IB_SMI_DISCARD) { 708 ret = -EINVAL; 709 printk(KERN_ERR PFX "Invalid directed route\n"); 710 goto out; 711 } 712 713 /* Check to post send on QP or process locally */ 714 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && 715 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) 716 goto out; 717 718 local = kmalloc(sizeof *local, GFP_ATOMIC); 719 if (!local) { 720 ret = -ENOMEM; 721 printk(KERN_ERR PFX "No memory for ib_mad_local_private\n"); 722 goto out; 723 } 724 local->mad_priv = NULL; 725 local->recv_mad_agent = NULL; 726 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); 727 if (!mad_priv) { 728 ret = -ENOMEM; 729 printk(KERN_ERR PFX "No memory for local response MAD\n"); 730 kfree(local); 731 goto out; 732 } 733 734 build_smp_wc(mad_agent_priv->agent.qp, 735 send_wr->wr_id, be16_to_cpu(smp->dr_slid), 736 send_wr->wr.ud.pkey_index, 737 send_wr->wr.ud.port_num, &mad_wc); 738 739 /* No GRH for DR SMP */ 740 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, 741 (struct ib_mad *)smp, 742 (struct ib_mad *)&mad_priv->mad); 743 switch (ret) 744 { 745 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 746 if (ib_response_mad(&mad_priv->mad.mad) && 747 mad_agent_priv->agent.recv_handler) { 748 local->mad_priv = mad_priv; 749 local->recv_mad_agent = mad_agent_priv; 750 /* 751 * Reference MAD agent until receive 752 * side of local completion handled 753 */ 754 atomic_inc(&mad_agent_priv->refcount); 755 } else 756 kmem_cache_free(ib_mad_cache, mad_priv); 757 break; 758 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: 759 kmem_cache_free(ib_mad_cache, mad_priv); 760 break; 761 case IB_MAD_RESULT_SUCCESS: 762 /* Treat like an incoming receive MAD */ 763 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 764 mad_agent_priv->agent.port_num); 765 if (port_priv) { 766 memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad)); 767 recv_mad_agent = find_mad_agent(port_priv, 768 &mad_priv->mad.mad); 769 } 770 if (!port_priv || !recv_mad_agent) { 771 /* 772 * No receiving agent so drop packet and 773 * generate send completion. 774 */ 775 kmem_cache_free(ib_mad_cache, mad_priv); 776 break; 777 } 778 local->mad_priv = mad_priv; 779 local->recv_mad_agent = recv_mad_agent; 780 break; 781 default: 782 kmem_cache_free(ib_mad_cache, mad_priv); 783 kfree(local); 784 ret = -EINVAL; 785 goto out; 786 } 787 788 local->mad_send_wr = mad_send_wr; 789 /* Reference MAD agent until send side of local completion handled */ 790 atomic_inc(&mad_agent_priv->refcount); 791 /* Queue local completion to local list */ 792 spin_lock_irqsave(&mad_agent_priv->lock, flags); 793 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); 794 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 795 queue_work(mad_agent_priv->qp_info->port_priv->wq, 796 &mad_agent_priv->local_work); 797 ret = 1; 798 out: 799 return ret; 800 } 801 802 static int get_pad_size(int hdr_len, int data_len) 803 { 804 int seg_size, pad; 805 806 seg_size = sizeof(struct ib_mad) - hdr_len; 807 if (data_len && seg_size) { 808 pad = seg_size - data_len % seg_size; 809 return pad == seg_size ? 0 : pad; 810 } else 811 return seg_size; 812 } 813 814 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) 815 { 816 struct ib_rmpp_segment *s, *t; 817 818 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { 819 list_del(&s->list); 820 kfree(s); 821 } 822 } 823 824 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, 825 gfp_t gfp_mask) 826 { 827 struct ib_mad_send_buf *send_buf = &send_wr->send_buf; 828 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; 829 struct ib_rmpp_segment *seg = NULL; 830 int left, seg_size, pad; 831 832 send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len; 833 seg_size = send_buf->seg_size; 834 pad = send_wr->pad; 835 836 /* Allocate data segments. */ 837 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { 838 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); 839 if (!seg) { 840 printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem " 841 "alloc failed for len %zd, gfp %#x\n", 842 sizeof (*seg) + seg_size, gfp_mask); 843 free_send_rmpp_list(send_wr); 844 return -ENOMEM; 845 } 846 seg->num = ++send_buf->seg_count; 847 list_add_tail(&seg->list, &send_wr->rmpp_list); 848 } 849 850 /* Zero any padding */ 851 if (pad) 852 memset(seg->data + seg_size - pad, 0, pad); 853 854 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> 855 agent.rmpp_version; 856 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 857 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 858 859 send_wr->cur_seg = container_of(send_wr->rmpp_list.next, 860 struct ib_rmpp_segment, list); 861 send_wr->last_ack_seg = send_wr->cur_seg; 862 return 0; 863 } 864 865 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 866 u32 remote_qpn, u16 pkey_index, 867 int rmpp_active, 868 int hdr_len, int data_len, 869 gfp_t gfp_mask) 870 { 871 struct ib_mad_agent_private *mad_agent_priv; 872 struct ib_mad_send_wr_private *mad_send_wr; 873 int pad, message_size, ret, size; 874 void *buf; 875 876 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 877 agent); 878 pad = get_pad_size(hdr_len, data_len); 879 message_size = hdr_len + data_len + pad; 880 881 if ((!mad_agent->rmpp_version && 882 (rmpp_active || message_size > sizeof(struct ib_mad))) || 883 (!rmpp_active && message_size > sizeof(struct ib_mad))) 884 return ERR_PTR(-EINVAL); 885 886 size = rmpp_active ? hdr_len : sizeof(struct ib_mad); 887 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); 888 if (!buf) 889 return ERR_PTR(-ENOMEM); 890 891 mad_send_wr = buf + size; 892 INIT_LIST_HEAD(&mad_send_wr->rmpp_list); 893 mad_send_wr->send_buf.mad = buf; 894 mad_send_wr->send_buf.hdr_len = hdr_len; 895 mad_send_wr->send_buf.data_len = data_len; 896 mad_send_wr->pad = pad; 897 898 mad_send_wr->mad_agent_priv = mad_agent_priv; 899 mad_send_wr->sg_list[0].length = hdr_len; 900 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey; 901 mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len; 902 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey; 903 904 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; 905 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; 906 mad_send_wr->send_wr.num_sge = 2; 907 mad_send_wr->send_wr.opcode = IB_WR_SEND; 908 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; 909 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; 910 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; 911 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; 912 913 if (rmpp_active) { 914 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask); 915 if (ret) { 916 kfree(buf); 917 return ERR_PTR(ret); 918 } 919 } 920 921 mad_send_wr->send_buf.mad_agent = mad_agent; 922 atomic_inc(&mad_agent_priv->refcount); 923 return &mad_send_wr->send_buf; 924 } 925 EXPORT_SYMBOL(ib_create_send_mad); 926 927 int ib_get_mad_data_offset(u8 mgmt_class) 928 { 929 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 930 return IB_MGMT_SA_HDR; 931 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 932 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 933 (mgmt_class == IB_MGMT_CLASS_BIS)) 934 return IB_MGMT_DEVICE_HDR; 935 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 936 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 937 return IB_MGMT_VENDOR_HDR; 938 else 939 return IB_MGMT_MAD_HDR; 940 } 941 EXPORT_SYMBOL(ib_get_mad_data_offset); 942 943 int ib_is_mad_class_rmpp(u8 mgmt_class) 944 { 945 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 946 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 947 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 948 (mgmt_class == IB_MGMT_CLASS_BIS) || 949 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 950 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 951 return 1; 952 return 0; 953 } 954 EXPORT_SYMBOL(ib_is_mad_class_rmpp); 955 956 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 957 { 958 struct ib_mad_send_wr_private *mad_send_wr; 959 struct list_head *list; 960 961 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 962 send_buf); 963 list = &mad_send_wr->cur_seg->list; 964 965 if (mad_send_wr->cur_seg->num < seg_num) { 966 list_for_each_entry(mad_send_wr->cur_seg, list, list) 967 if (mad_send_wr->cur_seg->num == seg_num) 968 break; 969 } else if (mad_send_wr->cur_seg->num > seg_num) { 970 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) 971 if (mad_send_wr->cur_seg->num == seg_num) 972 break; 973 } 974 return mad_send_wr->cur_seg->data; 975 } 976 EXPORT_SYMBOL(ib_get_rmpp_segment); 977 978 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr) 979 { 980 if (mad_send_wr->send_buf.seg_count) 981 return ib_get_rmpp_segment(&mad_send_wr->send_buf, 982 mad_send_wr->seg_num); 983 else 984 return mad_send_wr->send_buf.mad + 985 mad_send_wr->send_buf.hdr_len; 986 } 987 988 void ib_free_send_mad(struct ib_mad_send_buf *send_buf) 989 { 990 struct ib_mad_agent_private *mad_agent_priv; 991 struct ib_mad_send_wr_private *mad_send_wr; 992 993 mad_agent_priv = container_of(send_buf->mad_agent, 994 struct ib_mad_agent_private, agent); 995 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, 996 send_buf); 997 998 free_send_rmpp_list(mad_send_wr); 999 kfree(send_buf->mad); 1000 deref_mad_agent(mad_agent_priv); 1001 } 1002 EXPORT_SYMBOL(ib_free_send_mad); 1003 1004 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 1005 { 1006 struct ib_mad_qp_info *qp_info; 1007 struct list_head *list; 1008 struct ib_send_wr *bad_send_wr; 1009 struct ib_mad_agent *mad_agent; 1010 struct ib_sge *sge; 1011 unsigned long flags; 1012 int ret; 1013 1014 /* Set WR ID to find mad_send_wr upon completion */ 1015 qp_info = mad_send_wr->mad_agent_priv->qp_info; 1016 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; 1017 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 1018 1019 mad_agent = mad_send_wr->send_buf.mad_agent; 1020 sge = mad_send_wr->sg_list; 1021 sge[0].addr = ib_dma_map_single(mad_agent->device, 1022 mad_send_wr->send_buf.mad, 1023 sge[0].length, 1024 DMA_TO_DEVICE); 1025 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) 1026 return -ENOMEM; 1027 1028 mad_send_wr->header_mapping = sge[0].addr; 1029 1030 sge[1].addr = ib_dma_map_single(mad_agent->device, 1031 ib_get_payload(mad_send_wr), 1032 sge[1].length, 1033 DMA_TO_DEVICE); 1034 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { 1035 ib_dma_unmap_single(mad_agent->device, 1036 mad_send_wr->header_mapping, 1037 sge[0].length, DMA_TO_DEVICE); 1038 return -ENOMEM; 1039 } 1040 mad_send_wr->payload_mapping = sge[1].addr; 1041 1042 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1043 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1044 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, 1045 &bad_send_wr); 1046 list = &qp_info->send_queue.list; 1047 } else { 1048 ret = 0; 1049 list = &qp_info->overflow_list; 1050 } 1051 1052 if (!ret) { 1053 qp_info->send_queue.count++; 1054 list_add_tail(&mad_send_wr->mad_list.list, list); 1055 } 1056 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1057 if (ret) { 1058 ib_dma_unmap_single(mad_agent->device, 1059 mad_send_wr->header_mapping, 1060 sge[0].length, DMA_TO_DEVICE); 1061 ib_dma_unmap_single(mad_agent->device, 1062 mad_send_wr->payload_mapping, 1063 sge[1].length, DMA_TO_DEVICE); 1064 } 1065 return ret; 1066 } 1067 1068 /* 1069 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 1070 * with the registered client 1071 */ 1072 int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 1073 struct ib_mad_send_buf **bad_send_buf) 1074 { 1075 struct ib_mad_agent_private *mad_agent_priv; 1076 struct ib_mad_send_buf *next_send_buf; 1077 struct ib_mad_send_wr_private *mad_send_wr; 1078 unsigned long flags; 1079 int ret = -EINVAL; 1080 1081 /* Walk list of send WRs and post each on send list */ 1082 for (; send_buf; send_buf = next_send_buf) { 1083 1084 mad_send_wr = container_of(send_buf, 1085 struct ib_mad_send_wr_private, 1086 send_buf); 1087 mad_agent_priv = mad_send_wr->mad_agent_priv; 1088 1089 if (!send_buf->mad_agent->send_handler || 1090 (send_buf->timeout_ms && 1091 !send_buf->mad_agent->recv_handler)) { 1092 ret = -EINVAL; 1093 goto error; 1094 } 1095 1096 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1097 if (mad_agent_priv->agent.rmpp_version) { 1098 ret = -EINVAL; 1099 goto error; 1100 } 1101 } 1102 1103 /* 1104 * Save pointer to next work request to post in case the 1105 * current one completes, and the user modifies the work 1106 * request associated with the completion 1107 */ 1108 next_send_buf = send_buf->next; 1109 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; 1110 1111 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 1112 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1113 ret = handle_outgoing_dr_smp(mad_agent_priv, 1114 mad_send_wr); 1115 if (ret < 0) /* error */ 1116 goto error; 1117 else if (ret == 1) /* locally consumed */ 1118 continue; 1119 } 1120 1121 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; 1122 /* Timeout will be updated after send completes */ 1123 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); 1124 mad_send_wr->max_retries = send_buf->retries; 1125 mad_send_wr->retries_left = send_buf->retries; 1126 send_buf->retries = 0; 1127 /* Reference for work request to QP + response */ 1128 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 1129 mad_send_wr->status = IB_WC_SUCCESS; 1130 1131 /* Reference MAD agent until send completes */ 1132 atomic_inc(&mad_agent_priv->refcount); 1133 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1134 list_add_tail(&mad_send_wr->agent_list, 1135 &mad_agent_priv->send_list); 1136 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1137 1138 if (mad_agent_priv->agent.rmpp_version) { 1139 ret = ib_send_rmpp_mad(mad_send_wr); 1140 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) 1141 ret = ib_send_mad(mad_send_wr); 1142 } else 1143 ret = ib_send_mad(mad_send_wr); 1144 if (ret < 0) { 1145 /* Fail send request */ 1146 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1147 list_del(&mad_send_wr->agent_list); 1148 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1149 atomic_dec(&mad_agent_priv->refcount); 1150 goto error; 1151 } 1152 } 1153 return 0; 1154 error: 1155 if (bad_send_buf) 1156 *bad_send_buf = send_buf; 1157 return ret; 1158 } 1159 EXPORT_SYMBOL(ib_post_send_mad); 1160 1161 /* 1162 * ib_free_recv_mad - Returns data buffers used to receive 1163 * a MAD to the access layer 1164 */ 1165 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1166 { 1167 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; 1168 struct ib_mad_private_header *mad_priv_hdr; 1169 struct ib_mad_private *priv; 1170 struct list_head free_list; 1171 1172 INIT_LIST_HEAD(&free_list); 1173 list_splice_init(&mad_recv_wc->rmpp_list, &free_list); 1174 1175 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, 1176 &free_list, list) { 1177 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, 1178 recv_buf); 1179 mad_priv_hdr = container_of(mad_recv_wc, 1180 struct ib_mad_private_header, 1181 recv_wc); 1182 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1183 header); 1184 kmem_cache_free(ib_mad_cache, priv); 1185 } 1186 } 1187 EXPORT_SYMBOL(ib_free_recv_mad); 1188 1189 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, 1190 u8 rmpp_version, 1191 ib_mad_send_handler send_handler, 1192 ib_mad_recv_handler recv_handler, 1193 void *context) 1194 { 1195 return ERR_PTR(-EINVAL); /* XXX: for now */ 1196 } 1197 EXPORT_SYMBOL(ib_redirect_mad_qp); 1198 1199 int ib_process_mad_wc(struct ib_mad_agent *mad_agent, 1200 struct ib_wc *wc) 1201 { 1202 printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n"); 1203 return 0; 1204 } 1205 EXPORT_SYMBOL(ib_process_mad_wc); 1206 1207 static int method_in_use(struct ib_mad_mgmt_method_table **method, 1208 struct ib_mad_reg_req *mad_reg_req) 1209 { 1210 int i; 1211 1212 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { 1213 if ((*method)->agent[i]) { 1214 printk(KERN_ERR PFX "Method %d already in use\n", i); 1215 return -EINVAL; 1216 } 1217 } 1218 return 0; 1219 } 1220 1221 static int allocate_method_table(struct ib_mad_mgmt_method_table **method) 1222 { 1223 /* Allocate management method table */ 1224 *method = kzalloc(sizeof **method, GFP_ATOMIC); 1225 if (!*method) { 1226 printk(KERN_ERR PFX "No memory for " 1227 "ib_mad_mgmt_method_table\n"); 1228 return -ENOMEM; 1229 } 1230 1231 return 0; 1232 } 1233 1234 /* 1235 * Check to see if there are any methods still in use 1236 */ 1237 static int check_method_table(struct ib_mad_mgmt_method_table *method) 1238 { 1239 int i; 1240 1241 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) 1242 if (method->agent[i]) 1243 return 1; 1244 return 0; 1245 } 1246 1247 /* 1248 * Check to see if there are any method tables for this class still in use 1249 */ 1250 static int check_class_table(struct ib_mad_mgmt_class_table *class) 1251 { 1252 int i; 1253 1254 for (i = 0; i < MAX_MGMT_CLASS; i++) 1255 if (class->method_table[i]) 1256 return 1; 1257 return 0; 1258 } 1259 1260 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) 1261 { 1262 int i; 1263 1264 for (i = 0; i < MAX_MGMT_OUI; i++) 1265 if (vendor_class->method_table[i]) 1266 return 1; 1267 return 0; 1268 } 1269 1270 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, 1271 char *oui) 1272 { 1273 int i; 1274 1275 for (i = 0; i < MAX_MGMT_OUI; i++) 1276 /* Is there matching OUI for this vendor class ? */ 1277 if (!memcmp(vendor_class->oui[i], oui, 3)) 1278 return i; 1279 1280 return -1; 1281 } 1282 1283 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor) 1284 { 1285 int i; 1286 1287 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) 1288 if (vendor->vendor_class[i]) 1289 return 1; 1290 1291 return 0; 1292 } 1293 1294 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, 1295 struct ib_mad_agent_private *agent) 1296 { 1297 int i; 1298 1299 /* Remove any methods for this mad agent */ 1300 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) { 1301 if (method->agent[i] == agent) { 1302 method->agent[i] = NULL; 1303 } 1304 } 1305 } 1306 1307 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1308 struct ib_mad_agent_private *agent_priv, 1309 u8 mgmt_class) 1310 { 1311 struct ib_mad_port_private *port_priv; 1312 struct ib_mad_mgmt_class_table **class; 1313 struct ib_mad_mgmt_method_table **method; 1314 int i, ret; 1315 1316 port_priv = agent_priv->qp_info->port_priv; 1317 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; 1318 if (!*class) { 1319 /* Allocate management class table for "new" class version */ 1320 *class = kzalloc(sizeof **class, GFP_ATOMIC); 1321 if (!*class) { 1322 printk(KERN_ERR PFX "No memory for " 1323 "ib_mad_mgmt_class_table\n"); 1324 ret = -ENOMEM; 1325 goto error1; 1326 } 1327 1328 /* Allocate method table for this management class */ 1329 method = &(*class)->method_table[mgmt_class]; 1330 if ((ret = allocate_method_table(method))) 1331 goto error2; 1332 } else { 1333 method = &(*class)->method_table[mgmt_class]; 1334 if (!*method) { 1335 /* Allocate method table for this management class */ 1336 if ((ret = allocate_method_table(method))) 1337 goto error1; 1338 } 1339 } 1340 1341 /* Now, make sure methods are not already in use */ 1342 if (method_in_use(method, mad_reg_req)) 1343 goto error3; 1344 1345 /* Finally, add in methods being registered */ 1346 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1347 (*method)->agent[i] = agent_priv; 1348 1349 return 0; 1350 1351 error3: 1352 /* Remove any methods for this mad agent */ 1353 remove_methods_mad_agent(*method, agent_priv); 1354 /* Now, check to see if there are any methods in use */ 1355 if (!check_method_table(*method)) { 1356 /* If not, release management method table */ 1357 kfree(*method); 1358 *method = NULL; 1359 } 1360 ret = -EINVAL; 1361 goto error1; 1362 error2: 1363 kfree(*class); 1364 *class = NULL; 1365 error1: 1366 return ret; 1367 } 1368 1369 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, 1370 struct ib_mad_agent_private *agent_priv) 1371 { 1372 struct ib_mad_port_private *port_priv; 1373 struct ib_mad_mgmt_vendor_class_table **vendor_table; 1374 struct ib_mad_mgmt_vendor_class_table *vendor = NULL; 1375 struct ib_mad_mgmt_vendor_class *vendor_class = NULL; 1376 struct ib_mad_mgmt_method_table **method; 1377 int i, ret = -ENOMEM; 1378 u8 vclass; 1379 1380 /* "New" vendor (with OUI) class */ 1381 vclass = vendor_class_index(mad_reg_req->mgmt_class); 1382 port_priv = agent_priv->qp_info->port_priv; 1383 vendor_table = &port_priv->version[ 1384 mad_reg_req->mgmt_class_version].vendor; 1385 if (!*vendor_table) { 1386 /* Allocate mgmt vendor class table for "new" class version */ 1387 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); 1388 if (!vendor) { 1389 printk(KERN_ERR PFX "No memory for " 1390 "ib_mad_mgmt_vendor_class_table\n"); 1391 goto error1; 1392 } 1393 1394 *vendor_table = vendor; 1395 } 1396 if (!(*vendor_table)->vendor_class[vclass]) { 1397 /* Allocate table for this management vendor class */ 1398 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); 1399 if (!vendor_class) { 1400 printk(KERN_ERR PFX "No memory for " 1401 "ib_mad_mgmt_vendor_class\n"); 1402 goto error2; 1403 } 1404 1405 (*vendor_table)->vendor_class[vclass] = vendor_class; 1406 } 1407 for (i = 0; i < MAX_MGMT_OUI; i++) { 1408 /* Is there matching OUI for this vendor class ? */ 1409 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], 1410 mad_reg_req->oui, 3)) { 1411 method = &(*vendor_table)->vendor_class[ 1412 vclass]->method_table[i]; 1413 BUG_ON(!*method); 1414 goto check_in_use; 1415 } 1416 } 1417 for (i = 0; i < MAX_MGMT_OUI; i++) { 1418 /* OUI slot available ? */ 1419 if (!is_vendor_oui((*vendor_table)->vendor_class[ 1420 vclass]->oui[i])) { 1421 method = &(*vendor_table)->vendor_class[ 1422 vclass]->method_table[i]; 1423 BUG_ON(*method); 1424 /* Allocate method table for this OUI */ 1425 if ((ret = allocate_method_table(method))) 1426 goto error3; 1427 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], 1428 mad_reg_req->oui, 3); 1429 goto check_in_use; 1430 } 1431 } 1432 printk(KERN_ERR PFX "All OUI slots in use\n"); 1433 goto error3; 1434 1435 check_in_use: 1436 /* Now, make sure methods are not already in use */ 1437 if (method_in_use(method, mad_reg_req)) 1438 goto error4; 1439 1440 /* Finally, add in methods being registered */ 1441 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) 1442 (*method)->agent[i] = agent_priv; 1443 1444 return 0; 1445 1446 error4: 1447 /* Remove any methods for this mad agent */ 1448 remove_methods_mad_agent(*method, agent_priv); 1449 /* Now, check to see if there are any methods in use */ 1450 if (!check_method_table(*method)) { 1451 /* If not, release management method table */ 1452 kfree(*method); 1453 *method = NULL; 1454 } 1455 ret = -EINVAL; 1456 error3: 1457 if (vendor_class) { 1458 (*vendor_table)->vendor_class[vclass] = NULL; 1459 kfree(vendor_class); 1460 } 1461 error2: 1462 if (vendor) { 1463 *vendor_table = NULL; 1464 kfree(vendor); 1465 } 1466 error1: 1467 return ret; 1468 } 1469 1470 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) 1471 { 1472 struct ib_mad_port_private *port_priv; 1473 struct ib_mad_mgmt_class_table *class; 1474 struct ib_mad_mgmt_method_table *method; 1475 struct ib_mad_mgmt_vendor_class_table *vendor; 1476 struct ib_mad_mgmt_vendor_class *vendor_class; 1477 int index; 1478 u8 mgmt_class; 1479 1480 /* 1481 * Was MAD registration request supplied 1482 * with original registration ? 1483 */ 1484 if (!agent_priv->reg_req) { 1485 goto out; 1486 } 1487 1488 port_priv = agent_priv->qp_info->port_priv; 1489 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); 1490 class = port_priv->version[ 1491 agent_priv->reg_req->mgmt_class_version].class; 1492 if (!class) 1493 goto vendor_check; 1494 1495 method = class->method_table[mgmt_class]; 1496 if (method) { 1497 /* Remove any methods for this mad agent */ 1498 remove_methods_mad_agent(method, agent_priv); 1499 /* Now, check to see if there are any methods still in use */ 1500 if (!check_method_table(method)) { 1501 /* If not, release management method table */ 1502 kfree(method); 1503 class->method_table[mgmt_class] = NULL; 1504 /* Any management classes left ? */ 1505 if (!check_class_table(class)) { 1506 /* If not, release management class table */ 1507 kfree(class); 1508 port_priv->version[ 1509 agent_priv->reg_req-> 1510 mgmt_class_version].class = NULL; 1511 } 1512 } 1513 } 1514 1515 vendor_check: 1516 if (!is_vendor_class(mgmt_class)) 1517 goto out; 1518 1519 /* normalize mgmt_class to vendor range 2 */ 1520 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); 1521 vendor = port_priv->version[ 1522 agent_priv->reg_req->mgmt_class_version].vendor; 1523 1524 if (!vendor) 1525 goto out; 1526 1527 vendor_class = vendor->vendor_class[mgmt_class]; 1528 if (vendor_class) { 1529 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); 1530 if (index < 0) 1531 goto out; 1532 method = vendor_class->method_table[index]; 1533 if (method) { 1534 /* Remove any methods for this mad agent */ 1535 remove_methods_mad_agent(method, agent_priv); 1536 /* 1537 * Now, check to see if there are 1538 * any methods still in use 1539 */ 1540 if (!check_method_table(method)) { 1541 /* If not, release management method table */ 1542 kfree(method); 1543 vendor_class->method_table[index] = NULL; 1544 memset(vendor_class->oui[index], 0, 3); 1545 /* Any OUIs left ? */ 1546 if (!check_vendor_class(vendor_class)) { 1547 /* If not, release vendor class table */ 1548 kfree(vendor_class); 1549 vendor->vendor_class[mgmt_class] = NULL; 1550 /* Any other vendor classes left ? */ 1551 if (!check_vendor_table(vendor)) { 1552 kfree(vendor); 1553 port_priv->version[ 1554 agent_priv->reg_req-> 1555 mgmt_class_version]. 1556 vendor = NULL; 1557 } 1558 } 1559 } 1560 } 1561 } 1562 1563 out: 1564 return; 1565 } 1566 1567 static struct ib_mad_agent_private * 1568 find_mad_agent(struct ib_mad_port_private *port_priv, 1569 struct ib_mad *mad) 1570 { 1571 struct ib_mad_agent_private *mad_agent = NULL; 1572 unsigned long flags; 1573 1574 spin_lock_irqsave(&port_priv->reg_lock, flags); 1575 if (ib_response_mad(mad)) { 1576 u32 hi_tid; 1577 struct ib_mad_agent_private *entry; 1578 1579 /* 1580 * Routing is based on high 32 bits of transaction ID 1581 * of MAD. 1582 */ 1583 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; 1584 list_for_each_entry(entry, &port_priv->agent_list, agent_list) { 1585 if (entry->agent.hi_tid == hi_tid) { 1586 mad_agent = entry; 1587 break; 1588 } 1589 } 1590 } else { 1591 struct ib_mad_mgmt_class_table *class; 1592 struct ib_mad_mgmt_method_table *method; 1593 struct ib_mad_mgmt_vendor_class_table *vendor; 1594 struct ib_mad_mgmt_vendor_class *vendor_class; 1595 struct ib_vendor_mad *vendor_mad; 1596 int index; 1597 1598 /* 1599 * Routing is based on version, class, and method 1600 * For "newer" vendor MADs, also based on OUI 1601 */ 1602 if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION) 1603 goto out; 1604 if (!is_vendor_class(mad->mad_hdr.mgmt_class)) { 1605 class = port_priv->version[ 1606 mad->mad_hdr.class_version].class; 1607 if (!class) 1608 goto out; 1609 if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= 1610 IB_MGMT_MAX_METHODS) 1611 goto out; 1612 method = class->method_table[convert_mgmt_class( 1613 mad->mad_hdr.mgmt_class)]; 1614 if (method) 1615 mad_agent = method->agent[mad->mad_hdr.method & 1616 ~IB_MGMT_METHOD_RESP]; 1617 } else { 1618 vendor = port_priv->version[ 1619 mad->mad_hdr.class_version].vendor; 1620 if (!vendor) 1621 goto out; 1622 vendor_class = vendor->vendor_class[vendor_class_index( 1623 mad->mad_hdr.mgmt_class)]; 1624 if (!vendor_class) 1625 goto out; 1626 /* Find matching OUI */ 1627 vendor_mad = (struct ib_vendor_mad *)mad; 1628 index = find_vendor_oui(vendor_class, vendor_mad->oui); 1629 if (index == -1) 1630 goto out; 1631 method = vendor_class->method_table[index]; 1632 if (method) { 1633 mad_agent = method->agent[mad->mad_hdr.method & 1634 ~IB_MGMT_METHOD_RESP]; 1635 } 1636 } 1637 } 1638 1639 if (mad_agent) { 1640 if (mad_agent->agent.recv_handler) 1641 atomic_inc(&mad_agent->refcount); 1642 else { 1643 printk(KERN_NOTICE PFX "No receive handler for client " 1644 "%p on port %d\n", 1645 &mad_agent->agent, port_priv->port_num); 1646 mad_agent = NULL; 1647 } 1648 } 1649 out: 1650 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 1651 1652 return mad_agent; 1653 } 1654 1655 static int validate_mad(struct ib_mad *mad, u32 qp_num) 1656 { 1657 int valid = 0; 1658 1659 /* Make sure MAD base version is understood */ 1660 if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { 1661 printk(KERN_ERR PFX "MAD received with unsupported base " 1662 "version %d\n", mad->mad_hdr.base_version); 1663 goto out; 1664 } 1665 1666 /* Filter SMI packets sent to other than QP0 */ 1667 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || 1668 (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { 1669 if (qp_num == 0) 1670 valid = 1; 1671 } else { 1672 /* Filter GSI packets sent to QP0 */ 1673 if (qp_num != 0) 1674 valid = 1; 1675 } 1676 1677 out: 1678 return valid; 1679 } 1680 1681 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, 1682 struct ib_mad_hdr *mad_hdr) 1683 { 1684 struct ib_rmpp_mad *rmpp_mad; 1685 1686 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; 1687 return !mad_agent_priv->agent.rmpp_version || 1688 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 1689 IB_MGMT_RMPP_FLAG_ACTIVE) || 1690 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1691 } 1692 1693 static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr, 1694 struct ib_mad_recv_wc *rwc) 1695 { 1696 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class == 1697 rwc->recv_buf.mad->mad_hdr.mgmt_class; 1698 } 1699 1700 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv, 1701 struct ib_mad_send_wr_private *wr, 1702 struct ib_mad_recv_wc *rwc ) 1703 { 1704 struct ib_ah_attr attr; 1705 u8 send_resp, rcv_resp; 1706 union ib_gid sgid; 1707 struct ib_device *device = mad_agent_priv->agent.device; 1708 u8 port_num = mad_agent_priv->agent.port_num; 1709 u8 lmc; 1710 1711 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad); 1712 rcv_resp = ib_response_mad(rwc->recv_buf.mad); 1713 1714 if (send_resp == rcv_resp) 1715 /* both requests, or both responses. GIDs different */ 1716 return 0; 1717 1718 if (ib_query_ah(wr->send_buf.ah, &attr)) 1719 /* Assume not equal, to avoid false positives. */ 1720 return 0; 1721 1722 if (!!(attr.ah_flags & IB_AH_GRH) != 1723 !!(rwc->wc->wc_flags & IB_WC_GRH)) 1724 /* one has GID, other does not. Assume different */ 1725 return 0; 1726 1727 if (!send_resp && rcv_resp) { 1728 /* is request/response. */ 1729 if (!(attr.ah_flags & IB_AH_GRH)) { 1730 if (ib_get_cached_lmc(device, port_num, &lmc)) 1731 return 0; 1732 return (!lmc || !((attr.src_path_bits ^ 1733 rwc->wc->dlid_path_bits) & 1734 ((1 << lmc) - 1))); 1735 } else { 1736 if (ib_get_cached_gid(device, port_num, 1737 attr.grh.sgid_index, &sgid)) 1738 return 0; 1739 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, 1740 16); 1741 } 1742 } 1743 1744 if (!(attr.ah_flags & IB_AH_GRH)) 1745 return attr.dlid == rwc->wc->slid; 1746 else 1747 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw, 1748 16); 1749 } 1750 1751 static inline int is_direct(u8 class) 1752 { 1753 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE); 1754 } 1755 1756 struct ib_mad_send_wr_private* 1757 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, 1758 struct ib_mad_recv_wc *wc) 1759 { 1760 struct ib_mad_send_wr_private *wr; 1761 struct ib_mad *mad; 1762 1763 mad = (struct ib_mad *)wc->recv_buf.mad; 1764 1765 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { 1766 if ((wr->tid == mad->mad_hdr.tid) && 1767 rcv_has_same_class(wr, wc) && 1768 /* 1769 * Don't check GID for direct routed MADs. 1770 * These might have permissive LIDs. 1771 */ 1772 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || 1773 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1774 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1775 } 1776 1777 /* 1778 * It's possible to receive the response before we've 1779 * been notified that the send has completed 1780 */ 1781 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { 1782 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) && 1783 wr->tid == mad->mad_hdr.tid && 1784 wr->timeout && 1785 rcv_has_same_class(wr, wc) && 1786 /* 1787 * Don't check GID for direct routed MADs. 1788 * These might have permissive LIDs. 1789 */ 1790 (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || 1791 rcv_has_same_gid(mad_agent_priv, wr, wc))) 1792 /* Verify request has not been canceled */ 1793 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; 1794 } 1795 return NULL; 1796 } 1797 1798 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) 1799 { 1800 mad_send_wr->timeout = 0; 1801 if (mad_send_wr->refcount == 1) 1802 list_move_tail(&mad_send_wr->agent_list, 1803 &mad_send_wr->mad_agent_priv->done_list); 1804 } 1805 1806 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1807 struct ib_mad_recv_wc *mad_recv_wc) 1808 { 1809 struct ib_mad_send_wr_private *mad_send_wr; 1810 struct ib_mad_send_wc mad_send_wc; 1811 unsigned long flags; 1812 1813 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1814 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1815 if (mad_agent_priv->agent.rmpp_version) { 1816 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 1817 mad_recv_wc); 1818 if (!mad_recv_wc) { 1819 deref_mad_agent(mad_agent_priv); 1820 return; 1821 } 1822 } 1823 1824 /* Complete corresponding request */ 1825 if (ib_response_mad(mad_recv_wc->recv_buf.mad)) { 1826 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1827 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 1828 if (!mad_send_wr) { 1829 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1830 ib_free_recv_mad(mad_recv_wc); 1831 deref_mad_agent(mad_agent_priv); 1832 return; 1833 } 1834 ib_mark_mad_done(mad_send_wr); 1835 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1836 1837 /* Defined behavior is to complete response before request */ 1838 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; 1839 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, 1840 mad_recv_wc); 1841 atomic_dec(&mad_agent_priv->refcount); 1842 1843 mad_send_wc.status = IB_WC_SUCCESS; 1844 mad_send_wc.vendor_err = 0; 1845 mad_send_wc.send_buf = &mad_send_wr->send_buf; 1846 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 1847 } else { 1848 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, 1849 mad_recv_wc); 1850 deref_mad_agent(mad_agent_priv); 1851 } 1852 } 1853 1854 static bool generate_unmatched_resp(struct ib_mad_private *recv, 1855 struct ib_mad_private *response) 1856 { 1857 if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || 1858 recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { 1859 memcpy(response, recv, sizeof *response); 1860 response->header.recv_wc.wc = &response->header.wc; 1861 response->header.recv_wc.recv_buf.mad = &response->mad.mad; 1862 response->header.recv_wc.recv_buf.grh = &response->grh; 1863 response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; 1864 response->mad.mad.mad_hdr.status = 1865 cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); 1866 if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 1867 response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION; 1868 1869 return true; 1870 } else { 1871 return false; 1872 } 1873 } 1874 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, 1875 struct ib_wc *wc) 1876 { 1877 struct ib_mad_qp_info *qp_info; 1878 struct ib_mad_private_header *mad_priv_hdr; 1879 struct ib_mad_private *recv, *response = NULL; 1880 struct ib_mad_list_head *mad_list; 1881 struct ib_mad_agent_private *mad_agent; 1882 int port_num; 1883 int ret = IB_MAD_RESULT_SUCCESS; 1884 1885 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; 1886 qp_info = mad_list->mad_queue->qp_info; 1887 dequeue_mad(mad_list); 1888 1889 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 1890 mad_list); 1891 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 1892 ib_dma_unmap_single(port_priv->device, 1893 recv->header.mapping, 1894 sizeof(struct ib_mad_private) - 1895 sizeof(struct ib_mad_private_header), 1896 DMA_FROM_DEVICE); 1897 1898 /* Setup MAD receive work completion from "normal" work completion */ 1899 recv->header.wc = *wc; 1900 recv->header.recv_wc.wc = &recv->header.wc; 1901 recv->header.recv_wc.mad_len = sizeof(struct ib_mad); 1902 recv->header.recv_wc.recv_buf.mad = &recv->mad.mad; 1903 recv->header.recv_wc.recv_buf.grh = &recv->grh; 1904 1905 if (atomic_read(&qp_info->snoop_count)) 1906 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); 1907 1908 /* Validate MAD */ 1909 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) 1910 goto out; 1911 1912 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); 1913 if (!response) { 1914 printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " 1915 "for response buffer\n"); 1916 goto out; 1917 } 1918 1919 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) 1920 port_num = wc->port_num; 1921 else 1922 port_num = port_priv->port_num; 1923 1924 if (recv->mad.mad.mad_hdr.mgmt_class == 1925 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 1926 enum smi_forward_action retsmi; 1927 1928 if (smi_handle_dr_smp_recv(&recv->mad.smp, 1929 port_priv->device->node_type, 1930 port_num, 1931 port_priv->device->phys_port_cnt) == 1932 IB_SMI_DISCARD) 1933 goto out; 1934 1935 retsmi = smi_check_forward_dr_smp(&recv->mad.smp); 1936 if (retsmi == IB_SMI_LOCAL) 1937 goto local; 1938 1939 if (retsmi == IB_SMI_SEND) { /* don't forward */ 1940 if (smi_handle_dr_smp_send(&recv->mad.smp, 1941 port_priv->device->node_type, 1942 port_num) == IB_SMI_DISCARD) 1943 goto out; 1944 1945 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD) 1946 goto out; 1947 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { 1948 /* forward case for switches */ 1949 memcpy(response, recv, sizeof(*response)); 1950 response->header.recv_wc.wc = &response->header.wc; 1951 response->header.recv_wc.recv_buf.mad = &response->mad.mad; 1952 response->header.recv_wc.recv_buf.grh = &response->grh; 1953 1954 agent_send_response(&response->mad.mad, 1955 &response->grh, wc, 1956 port_priv->device, 1957 smi_get_fwd_port(&recv->mad.smp), 1958 qp_info->qp->qp_num); 1959 1960 goto out; 1961 } 1962 } 1963 1964 local: 1965 /* Give driver "right of first refusal" on incoming MAD */ 1966 if (port_priv->device->process_mad) { 1967 ret = port_priv->device->process_mad(port_priv->device, 0, 1968 port_priv->port_num, 1969 wc, &recv->grh, 1970 &recv->mad.mad, 1971 &response->mad.mad); 1972 if (ret & IB_MAD_RESULT_SUCCESS) { 1973 if (ret & IB_MAD_RESULT_CONSUMED) 1974 goto out; 1975 if (ret & IB_MAD_RESULT_REPLY) { 1976 agent_send_response(&response->mad.mad, 1977 &recv->grh, wc, 1978 port_priv->device, 1979 port_num, 1980 qp_info->qp->qp_num); 1981 goto out; 1982 } 1983 } 1984 } 1985 1986 mad_agent = find_mad_agent(port_priv, &recv->mad.mad); 1987 if (mad_agent) { 1988 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); 1989 /* 1990 * recv is freed up in error cases in ib_mad_complete_recv 1991 * or via recv_handler in ib_mad_complete_recv() 1992 */ 1993 recv = NULL; 1994 } else if ((ret & IB_MAD_RESULT_SUCCESS) && 1995 generate_unmatched_resp(recv, response)) { 1996 agent_send_response(&response->mad.mad, &recv->grh, wc, 1997 port_priv->device, port_num, qp_info->qp->qp_num); 1998 } 1999 2000 out: 2001 /* Post another receive request for this QP */ 2002 if (response) { 2003 ib_mad_post_receive_mads(qp_info, response); 2004 if (recv) 2005 kmem_cache_free(ib_mad_cache, recv); 2006 } else 2007 ib_mad_post_receive_mads(qp_info, recv); 2008 } 2009 2010 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) 2011 { 2012 struct ib_mad_send_wr_private *mad_send_wr; 2013 unsigned long delay; 2014 2015 if (list_empty(&mad_agent_priv->wait_list)) { 2016 cancel_delayed_work(&mad_agent_priv->timed_work); 2017 } else { 2018 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2019 struct ib_mad_send_wr_private, 2020 agent_list); 2021 2022 if (time_after(mad_agent_priv->timeout, 2023 mad_send_wr->timeout)) { 2024 mad_agent_priv->timeout = mad_send_wr->timeout; 2025 delay = mad_send_wr->timeout - jiffies; 2026 if ((long)delay <= 0) 2027 delay = 1; 2028 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2029 &mad_agent_priv->timed_work, delay); 2030 } 2031 } 2032 } 2033 2034 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) 2035 { 2036 struct ib_mad_agent_private *mad_agent_priv; 2037 struct ib_mad_send_wr_private *temp_mad_send_wr; 2038 struct list_head *list_item; 2039 unsigned long delay; 2040 2041 mad_agent_priv = mad_send_wr->mad_agent_priv; 2042 list_del(&mad_send_wr->agent_list); 2043 2044 delay = mad_send_wr->timeout; 2045 mad_send_wr->timeout += jiffies; 2046 2047 if (delay) { 2048 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { 2049 temp_mad_send_wr = list_entry(list_item, 2050 struct ib_mad_send_wr_private, 2051 agent_list); 2052 if (time_after(mad_send_wr->timeout, 2053 temp_mad_send_wr->timeout)) 2054 break; 2055 } 2056 } 2057 else 2058 list_item = &mad_agent_priv->wait_list; 2059 list_add(&mad_send_wr->agent_list, list_item); 2060 2061 /* Reschedule a work item if we have a shorter timeout */ 2062 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) 2063 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2064 &mad_agent_priv->timed_work, delay); 2065 } 2066 2067 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2068 int timeout_ms) 2069 { 2070 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2071 wait_for_response(mad_send_wr); 2072 } 2073 2074 /* 2075 * Process a send work completion 2076 */ 2077 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 2078 struct ib_mad_send_wc *mad_send_wc) 2079 { 2080 struct ib_mad_agent_private *mad_agent_priv; 2081 unsigned long flags; 2082 int ret; 2083 2084 mad_agent_priv = mad_send_wr->mad_agent_priv; 2085 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2086 if (mad_agent_priv->agent.rmpp_version) { 2087 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); 2088 if (ret == IB_RMPP_RESULT_CONSUMED) 2089 goto done; 2090 } else 2091 ret = IB_RMPP_RESULT_UNHANDLED; 2092 2093 if (mad_send_wc->status != IB_WC_SUCCESS && 2094 mad_send_wr->status == IB_WC_SUCCESS) { 2095 mad_send_wr->status = mad_send_wc->status; 2096 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2097 } 2098 2099 if (--mad_send_wr->refcount > 0) { 2100 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && 2101 mad_send_wr->status == IB_WC_SUCCESS) { 2102 wait_for_response(mad_send_wr); 2103 } 2104 goto done; 2105 } 2106 2107 /* Remove send from MAD agent and notify client of completion */ 2108 list_del(&mad_send_wr->agent_list); 2109 adjust_timeout(mad_agent_priv); 2110 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2111 2112 if (mad_send_wr->status != IB_WC_SUCCESS ) 2113 mad_send_wc->status = mad_send_wr->status; 2114 if (ret == IB_RMPP_RESULT_INTERNAL) 2115 ib_rmpp_send_handler(mad_send_wc); 2116 else 2117 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2118 mad_send_wc); 2119 2120 /* Release reference on agent taken when sending */ 2121 deref_mad_agent(mad_agent_priv); 2122 return; 2123 done: 2124 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2125 } 2126 2127 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, 2128 struct ib_wc *wc) 2129 { 2130 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; 2131 struct ib_mad_list_head *mad_list; 2132 struct ib_mad_qp_info *qp_info; 2133 struct ib_mad_queue *send_queue; 2134 struct ib_send_wr *bad_send_wr; 2135 struct ib_mad_send_wc mad_send_wc; 2136 unsigned long flags; 2137 int ret; 2138 2139 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; 2140 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2141 mad_list); 2142 send_queue = mad_list->mad_queue; 2143 qp_info = send_queue->qp_info; 2144 2145 retry: 2146 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2147 mad_send_wr->header_mapping, 2148 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2149 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2150 mad_send_wr->payload_mapping, 2151 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2152 queued_send_wr = NULL; 2153 spin_lock_irqsave(&send_queue->lock, flags); 2154 list_del(&mad_list->list); 2155 2156 /* Move queued send to the send queue */ 2157 if (send_queue->count-- > send_queue->max_active) { 2158 mad_list = container_of(qp_info->overflow_list.next, 2159 struct ib_mad_list_head, list); 2160 queued_send_wr = container_of(mad_list, 2161 struct ib_mad_send_wr_private, 2162 mad_list); 2163 list_move_tail(&mad_list->list, &send_queue->list); 2164 } 2165 spin_unlock_irqrestore(&send_queue->lock, flags); 2166 2167 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2168 mad_send_wc.status = wc->status; 2169 mad_send_wc.vendor_err = wc->vendor_err; 2170 if (atomic_read(&qp_info->snoop_count)) 2171 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, 2172 IB_MAD_SNOOP_SEND_COMPLETIONS); 2173 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 2174 2175 if (queued_send_wr) { 2176 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, 2177 &bad_send_wr); 2178 if (ret) { 2179 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); 2180 mad_send_wr = queued_send_wr; 2181 wc->status = IB_WC_LOC_QP_OP_ERR; 2182 goto retry; 2183 } 2184 } 2185 } 2186 2187 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) 2188 { 2189 struct ib_mad_send_wr_private *mad_send_wr; 2190 struct ib_mad_list_head *mad_list; 2191 unsigned long flags; 2192 2193 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 2194 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { 2195 mad_send_wr = container_of(mad_list, 2196 struct ib_mad_send_wr_private, 2197 mad_list); 2198 mad_send_wr->retry = 1; 2199 } 2200 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 2201 } 2202 2203 static void mad_error_handler(struct ib_mad_port_private *port_priv, 2204 struct ib_wc *wc) 2205 { 2206 struct ib_mad_list_head *mad_list; 2207 struct ib_mad_qp_info *qp_info; 2208 struct ib_mad_send_wr_private *mad_send_wr; 2209 int ret; 2210 2211 /* Determine if failure was a send or receive */ 2212 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; 2213 qp_info = mad_list->mad_queue->qp_info; 2214 if (mad_list->mad_queue == &qp_info->recv_queue) 2215 /* 2216 * Receive errors indicate that the QP has entered the error 2217 * state - error handling/shutdown code will cleanup 2218 */ 2219 return; 2220 2221 /* 2222 * Send errors will transition the QP to SQE - move 2223 * QP to RTS and repost flushed work requests 2224 */ 2225 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, 2226 mad_list); 2227 if (wc->status == IB_WC_WR_FLUSH_ERR) { 2228 if (mad_send_wr->retry) { 2229 /* Repost send */ 2230 struct ib_send_wr *bad_send_wr; 2231 2232 mad_send_wr->retry = 0; 2233 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, 2234 &bad_send_wr); 2235 if (ret) 2236 ib_mad_send_done_handler(port_priv, wc); 2237 } else 2238 ib_mad_send_done_handler(port_priv, wc); 2239 } else { 2240 struct ib_qp_attr *attr; 2241 2242 /* Transition QP to RTS and fail offending send */ 2243 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2244 if (attr) { 2245 attr->qp_state = IB_QPS_RTS; 2246 attr->cur_qp_state = IB_QPS_SQE; 2247 ret = ib_modify_qp(qp_info->qp, attr, 2248 IB_QP_STATE | IB_QP_CUR_STATE); 2249 kfree(attr); 2250 if (ret) 2251 printk(KERN_ERR PFX "mad_error_handler - " 2252 "ib_modify_qp to RTS : %d\n", ret); 2253 else 2254 mark_sends_for_retry(qp_info); 2255 } 2256 ib_mad_send_done_handler(port_priv, wc); 2257 } 2258 } 2259 2260 /* 2261 * IB MAD completion callback 2262 */ 2263 static void ib_mad_completion_handler(struct work_struct *work) 2264 { 2265 struct ib_mad_port_private *port_priv; 2266 struct ib_wc wc; 2267 2268 port_priv = container_of(work, struct ib_mad_port_private, work); 2269 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 2270 2271 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { 2272 if (wc.status == IB_WC_SUCCESS) { 2273 switch (wc.opcode) { 2274 case IB_WC_SEND: 2275 ib_mad_send_done_handler(port_priv, &wc); 2276 break; 2277 case IB_WC_RECV: 2278 ib_mad_recv_done_handler(port_priv, &wc); 2279 break; 2280 default: 2281 BUG_ON(1); 2282 break; 2283 } 2284 } else 2285 mad_error_handler(port_priv, &wc); 2286 } 2287 } 2288 2289 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) 2290 { 2291 unsigned long flags; 2292 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr; 2293 struct ib_mad_send_wc mad_send_wc; 2294 struct list_head cancel_list; 2295 2296 INIT_LIST_HEAD(&cancel_list); 2297 2298 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2299 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2300 &mad_agent_priv->send_list, agent_list) { 2301 if (mad_send_wr->status == IB_WC_SUCCESS) { 2302 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2303 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2304 } 2305 } 2306 2307 /* Empty wait list to prevent receives from finding a request */ 2308 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); 2309 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2310 2311 /* Report all cancelled requests */ 2312 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 2313 mad_send_wc.vendor_err = 0; 2314 2315 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2316 &cancel_list, agent_list) { 2317 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2318 list_del(&mad_send_wr->agent_list); 2319 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2320 &mad_send_wc); 2321 atomic_dec(&mad_agent_priv->refcount); 2322 } 2323 } 2324 2325 static struct ib_mad_send_wr_private* 2326 find_send_wr(struct ib_mad_agent_private *mad_agent_priv, 2327 struct ib_mad_send_buf *send_buf) 2328 { 2329 struct ib_mad_send_wr_private *mad_send_wr; 2330 2331 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2332 agent_list) { 2333 if (&mad_send_wr->send_buf == send_buf) 2334 return mad_send_wr; 2335 } 2336 2337 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2338 agent_list) { 2339 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && 2340 &mad_send_wr->send_buf == send_buf) 2341 return mad_send_wr; 2342 } 2343 return NULL; 2344 } 2345 2346 int ib_modify_mad(struct ib_mad_agent *mad_agent, 2347 struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2348 { 2349 struct ib_mad_agent_private *mad_agent_priv; 2350 struct ib_mad_send_wr_private *mad_send_wr; 2351 unsigned long flags; 2352 int active; 2353 2354 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2355 agent); 2356 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2357 mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2358 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2359 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2360 return -EINVAL; 2361 } 2362 2363 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); 2364 if (!timeout_ms) { 2365 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2366 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2367 } 2368 2369 mad_send_wr->send_buf.timeout_ms = timeout_ms; 2370 if (active) 2371 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2372 else 2373 ib_reset_mad_timeout(mad_send_wr, timeout_ms); 2374 2375 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2376 return 0; 2377 } 2378 EXPORT_SYMBOL(ib_modify_mad); 2379 2380 void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2381 struct ib_mad_send_buf *send_buf) 2382 { 2383 ib_modify_mad(mad_agent, send_buf, 0); 2384 } 2385 EXPORT_SYMBOL(ib_cancel_mad); 2386 2387 static void local_completions(struct work_struct *work) 2388 { 2389 struct ib_mad_agent_private *mad_agent_priv; 2390 struct ib_mad_local_private *local; 2391 struct ib_mad_agent_private *recv_mad_agent; 2392 unsigned long flags; 2393 int free_mad; 2394 struct ib_wc wc; 2395 struct ib_mad_send_wc mad_send_wc; 2396 2397 mad_agent_priv = 2398 container_of(work, struct ib_mad_agent_private, local_work); 2399 2400 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2401 while (!list_empty(&mad_agent_priv->local_list)) { 2402 local = list_entry(mad_agent_priv->local_list.next, 2403 struct ib_mad_local_private, 2404 completion_list); 2405 list_del(&local->completion_list); 2406 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2407 free_mad = 0; 2408 if (local->mad_priv) { 2409 recv_mad_agent = local->recv_mad_agent; 2410 if (!recv_mad_agent) { 2411 printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); 2412 free_mad = 1; 2413 goto local_send_completion; 2414 } 2415 2416 /* 2417 * Defined behavior is to complete response 2418 * before request 2419 */ 2420 build_smp_wc(recv_mad_agent->agent.qp, 2421 (unsigned long) local->mad_send_wr, 2422 be16_to_cpu(IB_LID_PERMISSIVE), 2423 0, recv_mad_agent->agent.port_num, &wc); 2424 2425 local->mad_priv->header.recv_wc.wc = &wc; 2426 local->mad_priv->header.recv_wc.mad_len = 2427 sizeof(struct ib_mad); 2428 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); 2429 list_add(&local->mad_priv->header.recv_wc.recv_buf.list, 2430 &local->mad_priv->header.recv_wc.rmpp_list); 2431 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2432 local->mad_priv->header.recv_wc.recv_buf.mad = 2433 &local->mad_priv->mad.mad; 2434 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) 2435 snoop_recv(recv_mad_agent->qp_info, 2436 &local->mad_priv->header.recv_wc, 2437 IB_MAD_SNOOP_RECVS); 2438 recv_mad_agent->agent.recv_handler( 2439 &recv_mad_agent->agent, 2440 &local->mad_priv->header.recv_wc); 2441 spin_lock_irqsave(&recv_mad_agent->lock, flags); 2442 atomic_dec(&recv_mad_agent->refcount); 2443 spin_unlock_irqrestore(&recv_mad_agent->lock, flags); 2444 } 2445 2446 local_send_completion: 2447 /* Complete send */ 2448 mad_send_wc.status = IB_WC_SUCCESS; 2449 mad_send_wc.vendor_err = 0; 2450 mad_send_wc.send_buf = &local->mad_send_wr->send_buf; 2451 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2452 snoop_send(mad_agent_priv->qp_info, 2453 &local->mad_send_wr->send_buf, 2454 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); 2455 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2456 &mad_send_wc); 2457 2458 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2459 atomic_dec(&mad_agent_priv->refcount); 2460 if (free_mad) 2461 kmem_cache_free(ib_mad_cache, local->mad_priv); 2462 kfree(local); 2463 } 2464 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2465 } 2466 2467 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) 2468 { 2469 int ret; 2470 2471 if (!mad_send_wr->retries_left) 2472 return -ETIMEDOUT; 2473 2474 mad_send_wr->retries_left--; 2475 mad_send_wr->send_buf.retries++; 2476 2477 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 2478 2479 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { 2480 ret = ib_retry_rmpp(mad_send_wr); 2481 switch (ret) { 2482 case IB_RMPP_RESULT_UNHANDLED: 2483 ret = ib_send_mad(mad_send_wr); 2484 break; 2485 case IB_RMPP_RESULT_CONSUMED: 2486 ret = 0; 2487 break; 2488 default: 2489 ret = -ECOMM; 2490 break; 2491 } 2492 } else 2493 ret = ib_send_mad(mad_send_wr); 2494 2495 if (!ret) { 2496 mad_send_wr->refcount++; 2497 list_add_tail(&mad_send_wr->agent_list, 2498 &mad_send_wr->mad_agent_priv->send_list); 2499 } 2500 return ret; 2501 } 2502 2503 static void timeout_sends(struct work_struct *work) 2504 { 2505 struct ib_mad_agent_private *mad_agent_priv; 2506 struct ib_mad_send_wr_private *mad_send_wr; 2507 struct ib_mad_send_wc mad_send_wc; 2508 unsigned long flags, delay; 2509 2510 mad_agent_priv = container_of(work, struct ib_mad_agent_private, 2511 timed_work.work); 2512 mad_send_wc.vendor_err = 0; 2513 2514 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2515 while (!list_empty(&mad_agent_priv->wait_list)) { 2516 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2517 struct ib_mad_send_wr_private, 2518 agent_list); 2519 2520 if (time_after(mad_send_wr->timeout, jiffies)) { 2521 delay = mad_send_wr->timeout - jiffies; 2522 if ((long)delay <= 0) 2523 delay = 1; 2524 queue_delayed_work(mad_agent_priv->qp_info-> 2525 port_priv->wq, 2526 &mad_agent_priv->timed_work, delay); 2527 break; 2528 } 2529 2530 list_del(&mad_send_wr->agent_list); 2531 if (mad_send_wr->status == IB_WC_SUCCESS && 2532 !retry_send(mad_send_wr)) 2533 continue; 2534 2535 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2536 2537 if (mad_send_wr->status == IB_WC_SUCCESS) 2538 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2539 else 2540 mad_send_wc.status = mad_send_wr->status; 2541 mad_send_wc.send_buf = &mad_send_wr->send_buf; 2542 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2543 &mad_send_wc); 2544 2545 atomic_dec(&mad_agent_priv->refcount); 2546 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2547 } 2548 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2549 } 2550 2551 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) 2552 { 2553 struct ib_mad_port_private *port_priv = cq->cq_context; 2554 unsigned long flags; 2555 2556 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 2557 if (!list_empty(&port_priv->port_list)) 2558 queue_work(port_priv->wq, &port_priv->work); 2559 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 2560 } 2561 2562 /* 2563 * Allocate receive MADs and post receive WRs for them 2564 */ 2565 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 2566 struct ib_mad_private *mad) 2567 { 2568 unsigned long flags; 2569 int post, ret; 2570 struct ib_mad_private *mad_priv; 2571 struct ib_sge sg_list; 2572 struct ib_recv_wr recv_wr, *bad_recv_wr; 2573 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; 2574 2575 /* Initialize common scatter list fields */ 2576 sg_list.length = sizeof *mad_priv - sizeof mad_priv->header; 2577 sg_list.lkey = (*qp_info->port_priv->mr).lkey; 2578 2579 /* Initialize common receive WR fields */ 2580 recv_wr.next = NULL; 2581 recv_wr.sg_list = &sg_list; 2582 recv_wr.num_sge = 1; 2583 2584 do { 2585 /* Allocate and map receive buffer */ 2586 if (mad) { 2587 mad_priv = mad; 2588 mad = NULL; 2589 } else { 2590 mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); 2591 if (!mad_priv) { 2592 printk(KERN_ERR PFX "No memory for receive buffer\n"); 2593 ret = -ENOMEM; 2594 break; 2595 } 2596 } 2597 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, 2598 &mad_priv->grh, 2599 sizeof *mad_priv - 2600 sizeof mad_priv->header, 2601 DMA_FROM_DEVICE); 2602 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2603 sg_list.addr))) { 2604 ret = -ENOMEM; 2605 break; 2606 } 2607 mad_priv->header.mapping = sg_list.addr; 2608 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2609 mad_priv->header.mad_list.mad_queue = recv_queue; 2610 2611 /* Post receive WR */ 2612 spin_lock_irqsave(&recv_queue->lock, flags); 2613 post = (++recv_queue->count < recv_queue->max_active); 2614 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); 2615 spin_unlock_irqrestore(&recv_queue->lock, flags); 2616 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); 2617 if (ret) { 2618 spin_lock_irqsave(&recv_queue->lock, flags); 2619 list_del(&mad_priv->header.mad_list.list); 2620 recv_queue->count--; 2621 spin_unlock_irqrestore(&recv_queue->lock, flags); 2622 ib_dma_unmap_single(qp_info->port_priv->device, 2623 mad_priv->header.mapping, 2624 sizeof *mad_priv - 2625 sizeof mad_priv->header, 2626 DMA_FROM_DEVICE); 2627 kmem_cache_free(ib_mad_cache, mad_priv); 2628 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); 2629 break; 2630 } 2631 } while (post); 2632 2633 return ret; 2634 } 2635 2636 /* 2637 * Return all the posted receive MADs 2638 */ 2639 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) 2640 { 2641 struct ib_mad_private_header *mad_priv_hdr; 2642 struct ib_mad_private *recv; 2643 struct ib_mad_list_head *mad_list; 2644 2645 if (!qp_info->qp) 2646 return; 2647 2648 while (!list_empty(&qp_info->recv_queue.list)) { 2649 2650 mad_list = list_entry(qp_info->recv_queue.list.next, 2651 struct ib_mad_list_head, list); 2652 mad_priv_hdr = container_of(mad_list, 2653 struct ib_mad_private_header, 2654 mad_list); 2655 recv = container_of(mad_priv_hdr, struct ib_mad_private, 2656 header); 2657 2658 /* Remove from posted receive MAD list */ 2659 list_del(&mad_list->list); 2660 2661 ib_dma_unmap_single(qp_info->port_priv->device, 2662 recv->header.mapping, 2663 sizeof(struct ib_mad_private) - 2664 sizeof(struct ib_mad_private_header), 2665 DMA_FROM_DEVICE); 2666 kmem_cache_free(ib_mad_cache, recv); 2667 } 2668 2669 qp_info->recv_queue.count = 0; 2670 } 2671 2672 /* 2673 * Start the port 2674 */ 2675 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) 2676 { 2677 int ret, i; 2678 struct ib_qp_attr *attr; 2679 struct ib_qp *qp; 2680 u16 pkey_index; 2681 2682 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2683 if (!attr) { 2684 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); 2685 return -ENOMEM; 2686 } 2687 2688 ret = ib_find_pkey(port_priv->device, port_priv->port_num, 2689 IB_DEFAULT_PKEY_FULL, &pkey_index); 2690 if (ret) 2691 pkey_index = 0; 2692 2693 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2694 qp = port_priv->qp_info[i].qp; 2695 if (!qp) 2696 continue; 2697 2698 /* 2699 * PKey index for QP1 is irrelevant but 2700 * one is needed for the Reset to Init transition 2701 */ 2702 attr->qp_state = IB_QPS_INIT; 2703 attr->pkey_index = pkey_index; 2704 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; 2705 ret = ib_modify_qp(qp, attr, IB_QP_STATE | 2706 IB_QP_PKEY_INDEX | IB_QP_QKEY); 2707 if (ret) { 2708 printk(KERN_ERR PFX "Couldn't change QP%d state to " 2709 "INIT: %d\n", i, ret); 2710 goto out; 2711 } 2712 2713 attr->qp_state = IB_QPS_RTR; 2714 ret = ib_modify_qp(qp, attr, IB_QP_STATE); 2715 if (ret) { 2716 printk(KERN_ERR PFX "Couldn't change QP%d state to " 2717 "RTR: %d\n", i, ret); 2718 goto out; 2719 } 2720 2721 attr->qp_state = IB_QPS_RTS; 2722 attr->sq_psn = IB_MAD_SEND_Q_PSN; 2723 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); 2724 if (ret) { 2725 printk(KERN_ERR PFX "Couldn't change QP%d state to " 2726 "RTS: %d\n", i, ret); 2727 goto out; 2728 } 2729 } 2730 2731 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); 2732 if (ret) { 2733 printk(KERN_ERR PFX "Failed to request completion " 2734 "notification: %d\n", ret); 2735 goto out; 2736 } 2737 2738 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2739 if (!port_priv->qp_info[i].qp) 2740 continue; 2741 2742 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); 2743 if (ret) { 2744 printk(KERN_ERR PFX "Couldn't post receive WRs\n"); 2745 goto out; 2746 } 2747 } 2748 out: 2749 kfree(attr); 2750 return ret; 2751 } 2752 2753 static void qp_event_handler(struct ib_event *event, void *qp_context) 2754 { 2755 struct ib_mad_qp_info *qp_info = qp_context; 2756 2757 /* It's worse than that! He's dead, Jim! */ 2758 printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n", 2759 event->event, qp_info->qp->qp_num); 2760 } 2761 2762 static void init_mad_queue(struct ib_mad_qp_info *qp_info, 2763 struct ib_mad_queue *mad_queue) 2764 { 2765 mad_queue->qp_info = qp_info; 2766 mad_queue->count = 0; 2767 spin_lock_init(&mad_queue->lock); 2768 INIT_LIST_HEAD(&mad_queue->list); 2769 } 2770 2771 static void init_mad_qp(struct ib_mad_port_private *port_priv, 2772 struct ib_mad_qp_info *qp_info) 2773 { 2774 qp_info->port_priv = port_priv; 2775 init_mad_queue(qp_info, &qp_info->send_queue); 2776 init_mad_queue(qp_info, &qp_info->recv_queue); 2777 INIT_LIST_HEAD(&qp_info->overflow_list); 2778 spin_lock_init(&qp_info->snoop_lock); 2779 qp_info->snoop_table = NULL; 2780 qp_info->snoop_table_size = 0; 2781 atomic_set(&qp_info->snoop_count, 0); 2782 } 2783 2784 static int create_mad_qp(struct ib_mad_qp_info *qp_info, 2785 enum ib_qp_type qp_type) 2786 { 2787 struct ib_qp_init_attr qp_init_attr; 2788 int ret; 2789 2790 memset(&qp_init_attr, 0, sizeof qp_init_attr); 2791 qp_init_attr.send_cq = qp_info->port_priv->cq; 2792 qp_init_attr.recv_cq = qp_info->port_priv->cq; 2793 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 2794 qp_init_attr.cap.max_send_wr = mad_sendq_size; 2795 qp_init_attr.cap.max_recv_wr = mad_recvq_size; 2796 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 2797 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 2798 qp_init_attr.qp_type = qp_type; 2799 qp_init_attr.port_num = qp_info->port_priv->port_num; 2800 qp_init_attr.qp_context = qp_info; 2801 qp_init_attr.event_handler = qp_event_handler; 2802 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); 2803 if (IS_ERR(qp_info->qp)) { 2804 printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n", 2805 get_spl_qp_index(qp_type)); 2806 ret = PTR_ERR(qp_info->qp); 2807 goto error; 2808 } 2809 /* Use minimum queue sizes unless the CQ is resized */ 2810 qp_info->send_queue.max_active = mad_sendq_size; 2811 qp_info->recv_queue.max_active = mad_recvq_size; 2812 return 0; 2813 2814 error: 2815 return ret; 2816 } 2817 2818 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) 2819 { 2820 if (!qp_info->qp) 2821 return; 2822 2823 ib_destroy_qp(qp_info->qp); 2824 kfree(qp_info->snoop_table); 2825 } 2826 2827 /* 2828 * Open the port 2829 * Create the QP, PD, MR, and CQ if needed 2830 */ 2831 static int ib_mad_port_open(struct ib_device *device, 2832 int port_num) 2833 { 2834 int ret, cq_size; 2835 struct ib_mad_port_private *port_priv; 2836 unsigned long flags; 2837 char name[sizeof "ib_mad123"]; 2838 int has_smi; 2839 2840 /* Create new device info */ 2841 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 2842 if (!port_priv) { 2843 printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); 2844 return -ENOMEM; 2845 } 2846 2847 port_priv->device = device; 2848 port_priv->port_num = port_num; 2849 spin_lock_init(&port_priv->reg_lock); 2850 INIT_LIST_HEAD(&port_priv->agent_list); 2851 init_mad_qp(port_priv, &port_priv->qp_info[0]); 2852 init_mad_qp(port_priv, &port_priv->qp_info[1]); 2853 2854 cq_size = mad_sendq_size + mad_recvq_size; 2855 has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND; 2856 if (has_smi) 2857 cq_size *= 2; 2858 2859 port_priv->cq = ib_create_cq(port_priv->device, 2860 ib_mad_thread_completion_handler, 2861 NULL, port_priv, cq_size, 0); 2862 if (IS_ERR(port_priv->cq)) { 2863 printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); 2864 ret = PTR_ERR(port_priv->cq); 2865 goto error3; 2866 } 2867 2868 port_priv->pd = ib_alloc_pd(device); 2869 if (IS_ERR(port_priv->pd)) { 2870 printk(KERN_ERR PFX "Couldn't create ib_mad PD\n"); 2871 ret = PTR_ERR(port_priv->pd); 2872 goto error4; 2873 } 2874 2875 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); 2876 if (IS_ERR(port_priv->mr)) { 2877 printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n"); 2878 ret = PTR_ERR(port_priv->mr); 2879 goto error5; 2880 } 2881 2882 if (has_smi) { 2883 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); 2884 if (ret) 2885 goto error6; 2886 } 2887 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); 2888 if (ret) 2889 goto error7; 2890 2891 snprintf(name, sizeof name, "ib_mad%d", port_num); 2892 port_priv->wq = create_singlethread_workqueue(name); 2893 if (!port_priv->wq) { 2894 ret = -ENOMEM; 2895 goto error8; 2896 } 2897 INIT_WORK(&port_priv->work, ib_mad_completion_handler); 2898 2899 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 2900 list_add_tail(&port_priv->port_list, &ib_mad_port_list); 2901 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 2902 2903 ret = ib_mad_port_start(port_priv); 2904 if (ret) { 2905 printk(KERN_ERR PFX "Couldn't start port\n"); 2906 goto error9; 2907 } 2908 2909 return 0; 2910 2911 error9: 2912 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 2913 list_del_init(&port_priv->port_list); 2914 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 2915 2916 destroy_workqueue(port_priv->wq); 2917 error8: 2918 destroy_mad_qp(&port_priv->qp_info[1]); 2919 error7: 2920 destroy_mad_qp(&port_priv->qp_info[0]); 2921 error6: 2922 ib_dereg_mr(port_priv->mr); 2923 error5: 2924 ib_dealloc_pd(port_priv->pd); 2925 error4: 2926 ib_destroy_cq(port_priv->cq); 2927 cleanup_recv_queue(&port_priv->qp_info[1]); 2928 cleanup_recv_queue(&port_priv->qp_info[0]); 2929 error3: 2930 kfree(port_priv); 2931 2932 return ret; 2933 } 2934 2935 /* 2936 * Close the port 2937 * If there are no classes using the port, free the port 2938 * resources (CQ, MR, PD, QP) and remove the port's info structure 2939 */ 2940 static int ib_mad_port_close(struct ib_device *device, int port_num) 2941 { 2942 struct ib_mad_port_private *port_priv; 2943 unsigned long flags; 2944 2945 spin_lock_irqsave(&ib_mad_port_list_lock, flags); 2946 port_priv = __ib_get_mad_port(device, port_num); 2947 if (port_priv == NULL) { 2948 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 2949 printk(KERN_ERR PFX "Port %d not found\n", port_num); 2950 return -ENODEV; 2951 } 2952 list_del_init(&port_priv->port_list); 2953 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); 2954 2955 destroy_workqueue(port_priv->wq); 2956 destroy_mad_qp(&port_priv->qp_info[1]); 2957 destroy_mad_qp(&port_priv->qp_info[0]); 2958 ib_dereg_mr(port_priv->mr); 2959 ib_dealloc_pd(port_priv->pd); 2960 ib_destroy_cq(port_priv->cq); 2961 cleanup_recv_queue(&port_priv->qp_info[1]); 2962 cleanup_recv_queue(&port_priv->qp_info[0]); 2963 /* XXX: Handle deallocation of MAD registration tables */ 2964 2965 kfree(port_priv); 2966 2967 return 0; 2968 } 2969 2970 static void ib_mad_init_device(struct ib_device *device) 2971 { 2972 int start, end, i; 2973 2974 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 2975 return; 2976 2977 if (device->node_type == RDMA_NODE_IB_SWITCH) { 2978 start = 0; 2979 end = 0; 2980 } else { 2981 start = 1; 2982 end = device->phys_port_cnt; 2983 } 2984 2985 for (i = start; i <= end; i++) { 2986 if (ib_mad_port_open(device, i)) { 2987 printk(KERN_ERR PFX "Couldn't open %s port %d\n", 2988 device->name, i); 2989 goto error; 2990 } 2991 if (ib_agent_port_open(device, i)) { 2992 printk(KERN_ERR PFX "Couldn't open %s port %d " 2993 "for agents\n", 2994 device->name, i); 2995 goto error_agent; 2996 } 2997 } 2998 return; 2999 3000 error_agent: 3001 if (ib_mad_port_close(device, i)) 3002 printk(KERN_ERR PFX "Couldn't close %s port %d\n", 3003 device->name, i); 3004 3005 error: 3006 i--; 3007 3008 while (i >= start) { 3009 if (ib_agent_port_close(device, i)) 3010 printk(KERN_ERR PFX "Couldn't close %s port %d " 3011 "for agents\n", 3012 device->name, i); 3013 if (ib_mad_port_close(device, i)) 3014 printk(KERN_ERR PFX "Couldn't close %s port %d\n", 3015 device->name, i); 3016 i--; 3017 } 3018 } 3019 3020 static void ib_mad_remove_device(struct ib_device *device) 3021 { 3022 int i, num_ports, cur_port; 3023 3024 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 3025 return; 3026 3027 if (device->node_type == RDMA_NODE_IB_SWITCH) { 3028 num_ports = 1; 3029 cur_port = 0; 3030 } else { 3031 num_ports = device->phys_port_cnt; 3032 cur_port = 1; 3033 } 3034 for (i = 0; i < num_ports; i++, cur_port++) { 3035 if (ib_agent_port_close(device, cur_port)) 3036 printk(KERN_ERR PFX "Couldn't close %s port %d " 3037 "for agents\n", 3038 device->name, cur_port); 3039 if (ib_mad_port_close(device, cur_port)) 3040 printk(KERN_ERR PFX "Couldn't close %s port %d\n", 3041 device->name, cur_port); 3042 } 3043 } 3044 3045 static struct ib_client mad_client = { 3046 .name = "mad", 3047 .add = ib_mad_init_device, 3048 .remove = ib_mad_remove_device 3049 }; 3050 3051 static int __init ib_mad_init_module(void) 3052 { 3053 int ret; 3054 3055 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); 3056 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); 3057 3058 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); 3059 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); 3060 3061 ib_mad_cache = kmem_cache_create("ib_mad", 3062 sizeof(struct ib_mad_private), 3063 0, 3064 SLAB_HWCACHE_ALIGN, 3065 NULL); 3066 if (!ib_mad_cache) { 3067 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n"); 3068 ret = -ENOMEM; 3069 goto error1; 3070 } 3071 3072 INIT_LIST_HEAD(&ib_mad_port_list); 3073 3074 if (ib_register_client(&mad_client)) { 3075 printk(KERN_ERR PFX "Couldn't register ib_mad client\n"); 3076 ret = -EINVAL; 3077 goto error2; 3078 } 3079 3080 return 0; 3081 3082 error2: 3083 kmem_cache_destroy(ib_mad_cache); 3084 error1: 3085 return ret; 3086 } 3087 3088 static void __exit ib_mad_cleanup_module(void) 3089 { 3090 ib_unregister_client(&mad_client); 3091 kmem_cache_destroy(ib_mad_cache); 3092 } 3093 3094 module_init(ib_mad_init_module); 3095 module_exit(ib_mad_cleanup_module); 3096