1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 5 * Copyright (c) 2008 Cisco. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #define pr_fmt(fmt) "user_mad: " fmt 37 38 #include <linux/module.h> 39 #include <linux/init.h> 40 #include <linux/device.h> 41 #include <linux/err.h> 42 #include <linux/fs.h> 43 #include <linux/cdev.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/poll.h> 46 #include <linux/mutex.h> 47 #include <linux/kref.h> 48 #include <linux/compat.h> 49 #include <linux/sched.h> 50 #include <linux/semaphore.h> 51 #include <linux/slab.h> 52 53 #include <linux/uaccess.h> 54 55 #include <rdma/ib_mad.h> 56 #include <rdma/ib_user_mad.h> 57 58 MODULE_AUTHOR("Roland Dreier"); 59 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); 60 MODULE_LICENSE("Dual BSD/GPL"); 61 62 enum { 63 IB_UMAD_MAX_PORTS = 64, 64 IB_UMAD_MAX_AGENTS = 32, 65 66 IB_UMAD_MAJOR = 231, 67 IB_UMAD_MINOR_BASE = 0 68 }; 69 70 /* 71 * Our lifetime rules for these structs are the following: 72 * device special file is opened, we take a reference on the 73 * ib_umad_port's struct ib_umad_device. We drop these 74 * references in the corresponding close(). 75 * 76 * In addition to references coming from open character devices, there 77 * is one more reference to each ib_umad_device representing the 78 * module's reference taken when allocating the ib_umad_device in 79 * ib_umad_add_one(). 80 * 81 * When destroying an ib_umad_device, we drop the module's reference. 82 */ 83 84 struct ib_umad_port { 85 struct cdev cdev; 86 struct device *dev; 87 88 struct cdev sm_cdev; 89 struct device *sm_dev; 90 struct semaphore sm_sem; 91 92 struct mutex file_mutex; 93 struct list_head file_list; 94 95 struct ib_device *ib_dev; 96 struct ib_umad_device *umad_dev; 97 int dev_num; 98 u8 port_num; 99 }; 100 101 struct ib_umad_device { 102 struct kobject kobj; 103 struct ib_umad_port port[0]; 104 }; 105 106 struct ib_umad_file { 107 struct mutex mutex; 108 struct ib_umad_port *port; 109 struct list_head recv_list; 110 struct list_head send_list; 111 struct list_head port_list; 112 spinlock_t send_lock; 113 wait_queue_head_t recv_wait; 114 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; 115 int agents_dead; 116 u8 use_pkey_index; 117 u8 already_used; 118 }; 119 120 struct ib_umad_packet { 121 struct ib_mad_send_buf *msg; 122 struct ib_mad_recv_wc *recv_wc; 123 struct list_head list; 124 int length; 125 struct ib_user_mad mad; 126 }; 127 128 static struct class *umad_class; 129 130 static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); 131 132 static DEFINE_SPINLOCK(port_lock); 133 static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); 134 135 static void ib_umad_add_one(struct ib_device *device); 136 static void ib_umad_remove_one(struct ib_device *device, void *client_data); 137 138 static void ib_umad_release_dev(struct kobject *kobj) 139 { 140 struct ib_umad_device *dev = 141 container_of(kobj, struct ib_umad_device, kobj); 142 143 kfree(dev); 144 } 145 146 static struct kobj_type ib_umad_dev_ktype = { 147 .release = ib_umad_release_dev, 148 }; 149 150 static int hdr_size(struct ib_umad_file *file) 151 { 152 return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) : 153 sizeof (struct ib_user_mad_hdr_old); 154 } 155 156 /* caller must hold file->mutex */ 157 static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) 158 { 159 return file->agents_dead ? NULL : file->agent[id]; 160 } 161 162 static int queue_packet(struct ib_umad_file *file, 163 struct ib_mad_agent *agent, 164 struct ib_umad_packet *packet) 165 { 166 int ret = 1; 167 168 mutex_lock(&file->mutex); 169 170 for (packet->mad.hdr.id = 0; 171 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; 172 packet->mad.hdr.id++) 173 if (agent == __get_agent(file, packet->mad.hdr.id)) { 174 list_add_tail(&packet->list, &file->recv_list); 175 wake_up_interruptible(&file->recv_wait); 176 ret = 0; 177 break; 178 } 179 180 mutex_unlock(&file->mutex); 181 182 return ret; 183 } 184 185 static void dequeue_send(struct ib_umad_file *file, 186 struct ib_umad_packet *packet) 187 { 188 spin_lock_irq(&file->send_lock); 189 list_del(&packet->list); 190 spin_unlock_irq(&file->send_lock); 191 } 192 193 static void send_handler(struct ib_mad_agent *agent, 194 struct ib_mad_send_wc *send_wc) 195 { 196 struct ib_umad_file *file = agent->context; 197 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; 198 199 dequeue_send(file, packet); 200 rdma_destroy_ah(packet->msg->ah); 201 ib_free_send_mad(packet->msg); 202 203 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { 204 packet->length = IB_MGMT_MAD_HDR; 205 packet->mad.hdr.status = ETIMEDOUT; 206 if (!queue_packet(file, agent, packet)) 207 return; 208 } 209 kfree(packet); 210 } 211 212 static void recv_handler(struct ib_mad_agent *agent, 213 struct ib_mad_send_buf *send_buf, 214 struct ib_mad_recv_wc *mad_recv_wc) 215 { 216 struct ib_umad_file *file = agent->context; 217 struct ib_umad_packet *packet; 218 219 if (mad_recv_wc->wc->status != IB_WC_SUCCESS) 220 goto err1; 221 222 packet = kzalloc(sizeof *packet, GFP_KERNEL); 223 if (!packet) 224 goto err1; 225 226 packet->length = mad_recv_wc->mad_len; 227 packet->recv_wc = mad_recv_wc; 228 229 packet->mad.hdr.status = 0; 230 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; 231 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); 232 /* 233 * On OPA devices it is okay to lose the upper 16 bits of LID as this 234 * information is obtained elsewhere. Mask off the upper 16 bits. 235 */ 236 if (agent->device->port_immutable[agent->port_num].core_cap_flags & 237 RDMA_CORE_PORT_INTEL_OPA) 238 packet->mad.hdr.lid = ib_lid_be16(0xFFFF & 239 mad_recv_wc->wc->slid); 240 else 241 packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); 242 packet->mad.hdr.sl = mad_recv_wc->wc->sl; 243 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; 244 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; 245 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); 246 if (packet->mad.hdr.grh_present) { 247 struct rdma_ah_attr ah_attr; 248 const struct ib_global_route *grh; 249 250 ib_init_ah_from_wc(agent->device, agent->port_num, 251 mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, 252 &ah_attr); 253 254 grh = rdma_ah_read_grh(&ah_attr); 255 packet->mad.hdr.gid_index = grh->sgid_index; 256 packet->mad.hdr.hop_limit = grh->hop_limit; 257 packet->mad.hdr.traffic_class = grh->traffic_class; 258 memcpy(packet->mad.hdr.gid, &grh->dgid, 16); 259 packet->mad.hdr.flow_label = cpu_to_be32(grh->flow_label); 260 } 261 262 if (queue_packet(file, agent, packet)) 263 goto err2; 264 return; 265 266 err2: 267 kfree(packet); 268 err1: 269 ib_free_recv_mad(mad_recv_wc); 270 } 271 272 static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, 273 struct ib_umad_packet *packet, size_t count) 274 { 275 struct ib_mad_recv_buf *recv_buf; 276 int left, seg_payload, offset, max_seg_payload; 277 size_t seg_size; 278 279 recv_buf = &packet->recv_wc->recv_buf; 280 seg_size = packet->recv_wc->mad_seg_size; 281 282 /* We need enough room to copy the first (or only) MAD segment. */ 283 if ((packet->length <= seg_size && 284 count < hdr_size(file) + packet->length) || 285 (packet->length > seg_size && 286 count < hdr_size(file) + seg_size)) 287 return -EINVAL; 288 289 if (copy_to_user(buf, &packet->mad, hdr_size(file))) 290 return -EFAULT; 291 292 buf += hdr_size(file); 293 seg_payload = min_t(int, packet->length, seg_size); 294 if (copy_to_user(buf, recv_buf->mad, seg_payload)) 295 return -EFAULT; 296 297 if (seg_payload < packet->length) { 298 /* 299 * Multipacket RMPP MAD message. Copy remainder of message. 300 * Note that last segment may have a shorter payload. 301 */ 302 if (count < hdr_size(file) + packet->length) { 303 /* 304 * The buffer is too small, return the first RMPP segment, 305 * which includes the RMPP message length. 306 */ 307 return -ENOSPC; 308 } 309 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); 310 max_seg_payload = seg_size - offset; 311 312 for (left = packet->length - seg_payload, buf += seg_payload; 313 left; left -= seg_payload, buf += seg_payload) { 314 recv_buf = container_of(recv_buf->list.next, 315 struct ib_mad_recv_buf, list); 316 seg_payload = min(left, max_seg_payload); 317 if (copy_to_user(buf, ((void *) recv_buf->mad) + offset, 318 seg_payload)) 319 return -EFAULT; 320 } 321 } 322 return hdr_size(file) + packet->length; 323 } 324 325 static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf, 326 struct ib_umad_packet *packet, size_t count) 327 { 328 ssize_t size = hdr_size(file) + packet->length; 329 330 if (count < size) 331 return -EINVAL; 332 333 if (copy_to_user(buf, &packet->mad, hdr_size(file))) 334 return -EFAULT; 335 336 buf += hdr_size(file); 337 338 if (copy_to_user(buf, packet->mad.data, packet->length)) 339 return -EFAULT; 340 341 return size; 342 } 343 344 static ssize_t ib_umad_read(struct file *filp, char __user *buf, 345 size_t count, loff_t *pos) 346 { 347 struct ib_umad_file *file = filp->private_data; 348 struct ib_umad_packet *packet; 349 ssize_t ret; 350 351 if (count < hdr_size(file)) 352 return -EINVAL; 353 354 mutex_lock(&file->mutex); 355 356 while (list_empty(&file->recv_list)) { 357 mutex_unlock(&file->mutex); 358 359 if (filp->f_flags & O_NONBLOCK) 360 return -EAGAIN; 361 362 if (wait_event_interruptible(file->recv_wait, 363 !list_empty(&file->recv_list))) 364 return -ERESTARTSYS; 365 366 mutex_lock(&file->mutex); 367 } 368 369 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); 370 list_del(&packet->list); 371 372 mutex_unlock(&file->mutex); 373 374 if (packet->recv_wc) 375 ret = copy_recv_mad(file, buf, packet, count); 376 else 377 ret = copy_send_mad(file, buf, packet, count); 378 379 if (ret < 0) { 380 /* Requeue packet */ 381 mutex_lock(&file->mutex); 382 list_add(&packet->list, &file->recv_list); 383 mutex_unlock(&file->mutex); 384 } else { 385 if (packet->recv_wc) 386 ib_free_recv_mad(packet->recv_wc); 387 kfree(packet); 388 } 389 return ret; 390 } 391 392 static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) 393 { 394 int left, seg; 395 396 /* Copy class specific header */ 397 if ((msg->hdr_len > IB_MGMT_RMPP_HDR) && 398 copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR, 399 msg->hdr_len - IB_MGMT_RMPP_HDR)) 400 return -EFAULT; 401 402 /* All headers are in place. Copy data segments. */ 403 for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0; 404 seg++, left -= msg->seg_size, buf += msg->seg_size) { 405 if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf, 406 min(left, msg->seg_size))) 407 return -EFAULT; 408 } 409 return 0; 410 } 411 412 static int same_destination(struct ib_user_mad_hdr *hdr1, 413 struct ib_user_mad_hdr *hdr2) 414 { 415 if (!hdr1->grh_present && !hdr2->grh_present) 416 return (hdr1->lid == hdr2->lid); 417 418 if (hdr1->grh_present && hdr2->grh_present) 419 return !memcmp(hdr1->gid, hdr2->gid, 16); 420 421 return 0; 422 } 423 424 static int is_duplicate(struct ib_umad_file *file, 425 struct ib_umad_packet *packet) 426 { 427 struct ib_umad_packet *sent_packet; 428 struct ib_mad_hdr *sent_hdr, *hdr; 429 430 hdr = (struct ib_mad_hdr *) packet->mad.data; 431 list_for_each_entry(sent_packet, &file->send_list, list) { 432 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; 433 434 if ((hdr->tid != sent_hdr->tid) || 435 (hdr->mgmt_class != sent_hdr->mgmt_class)) 436 continue; 437 438 /* 439 * No need to be overly clever here. If two new operations have 440 * the same TID, reject the second as a duplicate. This is more 441 * restrictive than required by the spec. 442 */ 443 if (!ib_response_mad(hdr)) { 444 if (!ib_response_mad(sent_hdr)) 445 return 1; 446 continue; 447 } else if (!ib_response_mad(sent_hdr)) 448 continue; 449 450 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) 451 return 1; 452 } 453 454 return 0; 455 } 456 457 static ssize_t ib_umad_write(struct file *filp, const char __user *buf, 458 size_t count, loff_t *pos) 459 { 460 struct ib_umad_file *file = filp->private_data; 461 struct ib_umad_packet *packet; 462 struct ib_mad_agent *agent; 463 struct rdma_ah_attr ah_attr; 464 struct ib_ah *ah; 465 struct ib_rmpp_mad *rmpp_mad; 466 __be64 *tid; 467 int ret, data_len, hdr_len, copy_offset, rmpp_active; 468 u8 base_version; 469 470 if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) 471 return -EINVAL; 472 473 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); 474 if (!packet) 475 return -ENOMEM; 476 477 if (copy_from_user(&packet->mad, buf, hdr_size(file))) { 478 ret = -EFAULT; 479 goto err; 480 } 481 482 if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { 483 ret = -EINVAL; 484 goto err; 485 } 486 487 buf += hdr_size(file); 488 489 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) { 490 ret = -EFAULT; 491 goto err; 492 } 493 494 mutex_lock(&file->mutex); 495 496 agent = __get_agent(file, packet->mad.hdr.id); 497 if (!agent) { 498 ret = -EINVAL; 499 goto err_up; 500 } 501 502 memset(&ah_attr, 0, sizeof ah_attr); 503 ah_attr.type = rdma_ah_find_type(file->port->ib_dev, 504 file->port->port_num); 505 rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid)); 506 rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl); 507 rdma_ah_set_path_bits(&ah_attr, packet->mad.hdr.path_bits); 508 rdma_ah_set_port_num(&ah_attr, file->port->port_num); 509 if (packet->mad.hdr.grh_present) { 510 rdma_ah_set_grh(&ah_attr, NULL, 511 be32_to_cpu(packet->mad.hdr.flow_label), 512 packet->mad.hdr.gid_index, 513 packet->mad.hdr.hop_limit, 514 packet->mad.hdr.traffic_class); 515 rdma_ah_set_dgid_raw(&ah_attr, packet->mad.hdr.gid); 516 } 517 518 ah = rdma_create_user_ah(agent->qp->pd, &ah_attr, NULL); 519 if (IS_ERR(ah)) { 520 ret = PTR_ERR(ah); 521 goto err_up; 522 } 523 524 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 525 hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); 526 527 if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) 528 && ib_mad_kernel_rmpp_agent(agent)) { 529 copy_offset = IB_MGMT_RMPP_HDR; 530 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 531 IB_MGMT_RMPP_FLAG_ACTIVE; 532 } else { 533 copy_offset = IB_MGMT_MAD_HDR; 534 rmpp_active = 0; 535 } 536 537 base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version; 538 data_len = count - hdr_size(file) - hdr_len; 539 packet->msg = ib_create_send_mad(agent, 540 be32_to_cpu(packet->mad.hdr.qpn), 541 packet->mad.hdr.pkey_index, rmpp_active, 542 hdr_len, data_len, GFP_KERNEL, 543 base_version); 544 if (IS_ERR(packet->msg)) { 545 ret = PTR_ERR(packet->msg); 546 goto err_ah; 547 } 548 549 packet->msg->ah = ah; 550 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; 551 packet->msg->retries = packet->mad.hdr.retries; 552 packet->msg->context[0] = packet; 553 554 /* Copy MAD header. Any RMPP header is already in place. */ 555 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); 556 557 if (!rmpp_active) { 558 if (copy_from_user(packet->msg->mad + copy_offset, 559 buf + copy_offset, 560 hdr_len + data_len - copy_offset)) { 561 ret = -EFAULT; 562 goto err_msg; 563 } 564 } else { 565 ret = copy_rmpp_mad(packet->msg, buf); 566 if (ret) 567 goto err_msg; 568 } 569 570 /* 571 * Set the high-order part of the transaction ID to make MADs from 572 * different agents unique, and allow routing responses back to the 573 * original requestor. 574 */ 575 if (!ib_response_mad(packet->msg->mad)) { 576 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; 577 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | 578 (be64_to_cpup(tid) & 0xffffffff)); 579 rmpp_mad->mad_hdr.tid = *tid; 580 } 581 582 if (!ib_mad_kernel_rmpp_agent(agent) 583 && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) 584 && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { 585 spin_lock_irq(&file->send_lock); 586 list_add_tail(&packet->list, &file->send_list); 587 spin_unlock_irq(&file->send_lock); 588 } else { 589 spin_lock_irq(&file->send_lock); 590 ret = is_duplicate(file, packet); 591 if (!ret) 592 list_add_tail(&packet->list, &file->send_list); 593 spin_unlock_irq(&file->send_lock); 594 if (ret) { 595 ret = -EINVAL; 596 goto err_msg; 597 } 598 } 599 600 ret = ib_post_send_mad(packet->msg, NULL); 601 if (ret) 602 goto err_send; 603 604 mutex_unlock(&file->mutex); 605 return count; 606 607 err_send: 608 dequeue_send(file, packet); 609 err_msg: 610 ib_free_send_mad(packet->msg); 611 err_ah: 612 rdma_destroy_ah(ah); 613 err_up: 614 mutex_unlock(&file->mutex); 615 err: 616 kfree(packet); 617 return ret; 618 } 619 620 static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait) 621 { 622 struct ib_umad_file *file = filp->private_data; 623 624 /* we will always be able to post a MAD send */ 625 unsigned int mask = POLLOUT | POLLWRNORM; 626 627 poll_wait(filp, &file->recv_wait, wait); 628 629 if (!list_empty(&file->recv_list)) 630 mask |= POLLIN | POLLRDNORM; 631 632 return mask; 633 } 634 635 static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, 636 int compat_method_mask) 637 { 638 struct ib_user_mad_reg_req ureq; 639 struct ib_mad_reg_req req; 640 struct ib_mad_agent *agent = NULL; 641 int agent_id; 642 int ret; 643 644 mutex_lock(&file->port->file_mutex); 645 mutex_lock(&file->mutex); 646 647 if (!file->port->ib_dev) { 648 dev_notice(file->port->dev, 649 "ib_umad_reg_agent: invalid device\n"); 650 ret = -EPIPE; 651 goto out; 652 } 653 654 if (copy_from_user(&ureq, arg, sizeof ureq)) { 655 ret = -EFAULT; 656 goto out; 657 } 658 659 if (ureq.qpn != 0 && ureq.qpn != 1) { 660 dev_notice(file->port->dev, 661 "ib_umad_reg_agent: invalid QPN %d specified\n", 662 ureq.qpn); 663 ret = -EINVAL; 664 goto out; 665 } 666 667 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) 668 if (!__get_agent(file, agent_id)) 669 goto found; 670 671 dev_notice(file->port->dev, 672 "ib_umad_reg_agent: Max Agents (%u) reached\n", 673 IB_UMAD_MAX_AGENTS); 674 ret = -ENOMEM; 675 goto out; 676 677 found: 678 if (ureq.mgmt_class) { 679 memset(&req, 0, sizeof(req)); 680 req.mgmt_class = ureq.mgmt_class; 681 req.mgmt_class_version = ureq.mgmt_class_version; 682 memcpy(req.oui, ureq.oui, sizeof req.oui); 683 684 if (compat_method_mask) { 685 u32 *umm = (u32 *) ureq.method_mask; 686 int i; 687 688 for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i) 689 req.method_mask[i] = 690 umm[i * 2] | ((u64) umm[i * 2 + 1] << 32); 691 } else 692 memcpy(req.method_mask, ureq.method_mask, 693 sizeof req.method_mask); 694 } 695 696 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 697 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 698 ureq.mgmt_class ? &req : NULL, 699 ureq.rmpp_version, 700 send_handler, recv_handler, file, 0); 701 if (IS_ERR(agent)) { 702 ret = PTR_ERR(agent); 703 agent = NULL; 704 goto out; 705 } 706 707 if (put_user(agent_id, 708 (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) { 709 ret = -EFAULT; 710 goto out; 711 } 712 713 if (!file->already_used) { 714 file->already_used = 1; 715 if (!file->use_pkey_index) { 716 dev_warn(file->port->dev, 717 "process %s did not enable P_Key index support.\n", 718 current->comm); 719 dev_warn(file->port->dev, 720 " Documentation/infiniband/user_mad.txt has info on the new ABI.\n"); 721 } 722 } 723 724 file->agent[agent_id] = agent; 725 ret = 0; 726 727 out: 728 mutex_unlock(&file->mutex); 729 730 if (ret && agent) 731 ib_unregister_mad_agent(agent); 732 733 mutex_unlock(&file->port->file_mutex); 734 735 return ret; 736 } 737 738 static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) 739 { 740 struct ib_user_mad_reg_req2 ureq; 741 struct ib_mad_reg_req req; 742 struct ib_mad_agent *agent = NULL; 743 int agent_id; 744 int ret; 745 746 mutex_lock(&file->port->file_mutex); 747 mutex_lock(&file->mutex); 748 749 if (!file->port->ib_dev) { 750 dev_notice(file->port->dev, 751 "ib_umad_reg_agent2: invalid device\n"); 752 ret = -EPIPE; 753 goto out; 754 } 755 756 if (copy_from_user(&ureq, arg, sizeof(ureq))) { 757 ret = -EFAULT; 758 goto out; 759 } 760 761 if (ureq.qpn != 0 && ureq.qpn != 1) { 762 dev_notice(file->port->dev, 763 "ib_umad_reg_agent2: invalid QPN %d specified\n", 764 ureq.qpn); 765 ret = -EINVAL; 766 goto out; 767 } 768 769 if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) { 770 dev_notice(file->port->dev, 771 "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n", 772 ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); 773 ret = -EINVAL; 774 775 if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP, 776 (u32 __user *) (arg + offsetof(struct 777 ib_user_mad_reg_req2, flags)))) 778 ret = -EFAULT; 779 780 goto out; 781 } 782 783 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) 784 if (!__get_agent(file, agent_id)) 785 goto found; 786 787 dev_notice(file->port->dev, 788 "ib_umad_reg_agent2: Max Agents (%u) reached\n", 789 IB_UMAD_MAX_AGENTS); 790 ret = -ENOMEM; 791 goto out; 792 793 found: 794 if (ureq.mgmt_class) { 795 memset(&req, 0, sizeof(req)); 796 req.mgmt_class = ureq.mgmt_class; 797 req.mgmt_class_version = ureq.mgmt_class_version; 798 if (ureq.oui & 0xff000000) { 799 dev_notice(file->port->dev, 800 "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n", 801 ureq.oui); 802 ret = -EINVAL; 803 goto out; 804 } 805 req.oui[2] = ureq.oui & 0x0000ff; 806 req.oui[1] = (ureq.oui & 0x00ff00) >> 8; 807 req.oui[0] = (ureq.oui & 0xff0000) >> 16; 808 memcpy(req.method_mask, ureq.method_mask, 809 sizeof(req.method_mask)); 810 } 811 812 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 813 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 814 ureq.mgmt_class ? &req : NULL, 815 ureq.rmpp_version, 816 send_handler, recv_handler, file, 817 ureq.flags); 818 if (IS_ERR(agent)) { 819 ret = PTR_ERR(agent); 820 agent = NULL; 821 goto out; 822 } 823 824 if (put_user(agent_id, 825 (u32 __user *)(arg + 826 offsetof(struct ib_user_mad_reg_req2, id)))) { 827 ret = -EFAULT; 828 goto out; 829 } 830 831 if (!file->already_used) { 832 file->already_used = 1; 833 file->use_pkey_index = 1; 834 } 835 836 file->agent[agent_id] = agent; 837 ret = 0; 838 839 out: 840 mutex_unlock(&file->mutex); 841 842 if (ret && agent) 843 ib_unregister_mad_agent(agent); 844 845 mutex_unlock(&file->port->file_mutex); 846 847 return ret; 848 } 849 850 851 static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) 852 { 853 struct ib_mad_agent *agent = NULL; 854 u32 id; 855 int ret = 0; 856 857 if (get_user(id, arg)) 858 return -EFAULT; 859 860 mutex_lock(&file->port->file_mutex); 861 mutex_lock(&file->mutex); 862 863 if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { 864 ret = -EINVAL; 865 goto out; 866 } 867 868 agent = file->agent[id]; 869 file->agent[id] = NULL; 870 871 out: 872 mutex_unlock(&file->mutex); 873 874 if (agent) 875 ib_unregister_mad_agent(agent); 876 877 mutex_unlock(&file->port->file_mutex); 878 879 return ret; 880 } 881 882 static long ib_umad_enable_pkey(struct ib_umad_file *file) 883 { 884 int ret = 0; 885 886 mutex_lock(&file->mutex); 887 if (file->already_used) 888 ret = -EINVAL; 889 else 890 file->use_pkey_index = 1; 891 mutex_unlock(&file->mutex); 892 893 return ret; 894 } 895 896 static long ib_umad_ioctl(struct file *filp, unsigned int cmd, 897 unsigned long arg) 898 { 899 switch (cmd) { 900 case IB_USER_MAD_REGISTER_AGENT: 901 return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0); 902 case IB_USER_MAD_UNREGISTER_AGENT: 903 return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); 904 case IB_USER_MAD_ENABLE_PKEY: 905 return ib_umad_enable_pkey(filp->private_data); 906 case IB_USER_MAD_REGISTER_AGENT2: 907 return ib_umad_reg_agent2(filp->private_data, (void __user *) arg); 908 default: 909 return -ENOIOCTLCMD; 910 } 911 } 912 913 #ifdef CONFIG_COMPAT 914 static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, 915 unsigned long arg) 916 { 917 switch (cmd) { 918 case IB_USER_MAD_REGISTER_AGENT: 919 return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1); 920 case IB_USER_MAD_UNREGISTER_AGENT: 921 return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); 922 case IB_USER_MAD_ENABLE_PKEY: 923 return ib_umad_enable_pkey(filp->private_data); 924 case IB_USER_MAD_REGISTER_AGENT2: 925 return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg)); 926 default: 927 return -ENOIOCTLCMD; 928 } 929 } 930 #endif 931 932 /* 933 * ib_umad_open() does not need the BKL: 934 * 935 * - the ib_umad_port structures are properly reference counted, and 936 * everything else is purely local to the file being created, so 937 * races against other open calls are not a problem; 938 * - the ioctl method does not affect any global state outside of the 939 * file structure being operated on; 940 */ 941 static int ib_umad_open(struct inode *inode, struct file *filp) 942 { 943 struct ib_umad_port *port; 944 struct ib_umad_file *file; 945 int ret = -ENXIO; 946 947 port = container_of(inode->i_cdev, struct ib_umad_port, cdev); 948 949 mutex_lock(&port->file_mutex); 950 951 if (!port->ib_dev) 952 goto out; 953 954 ret = -ENOMEM; 955 file = kzalloc(sizeof *file, GFP_KERNEL); 956 if (!file) 957 goto out; 958 959 mutex_init(&file->mutex); 960 spin_lock_init(&file->send_lock); 961 INIT_LIST_HEAD(&file->recv_list); 962 INIT_LIST_HEAD(&file->send_list); 963 init_waitqueue_head(&file->recv_wait); 964 965 file->port = port; 966 filp->private_data = file; 967 968 list_add_tail(&file->port_list, &port->file_list); 969 970 ret = nonseekable_open(inode, filp); 971 if (ret) { 972 list_del(&file->port_list); 973 kfree(file); 974 goto out; 975 } 976 977 kobject_get(&port->umad_dev->kobj); 978 979 out: 980 mutex_unlock(&port->file_mutex); 981 return ret; 982 } 983 984 static int ib_umad_close(struct inode *inode, struct file *filp) 985 { 986 struct ib_umad_file *file = filp->private_data; 987 struct ib_umad_device *dev = file->port->umad_dev; 988 struct ib_umad_packet *packet, *tmp; 989 int already_dead; 990 int i; 991 992 mutex_lock(&file->port->file_mutex); 993 mutex_lock(&file->mutex); 994 995 already_dead = file->agents_dead; 996 file->agents_dead = 1; 997 998 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) { 999 if (packet->recv_wc) 1000 ib_free_recv_mad(packet->recv_wc); 1001 kfree(packet); 1002 } 1003 1004 list_del(&file->port_list); 1005 1006 mutex_unlock(&file->mutex); 1007 1008 if (!already_dead) 1009 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) 1010 if (file->agent[i]) 1011 ib_unregister_mad_agent(file->agent[i]); 1012 1013 mutex_unlock(&file->port->file_mutex); 1014 1015 kfree(file); 1016 kobject_put(&dev->kobj); 1017 1018 return 0; 1019 } 1020 1021 static const struct file_operations umad_fops = { 1022 .owner = THIS_MODULE, 1023 .read = ib_umad_read, 1024 .write = ib_umad_write, 1025 .poll = ib_umad_poll, 1026 .unlocked_ioctl = ib_umad_ioctl, 1027 #ifdef CONFIG_COMPAT 1028 .compat_ioctl = ib_umad_compat_ioctl, 1029 #endif 1030 .open = ib_umad_open, 1031 .release = ib_umad_close, 1032 .llseek = no_llseek, 1033 }; 1034 1035 static int ib_umad_sm_open(struct inode *inode, struct file *filp) 1036 { 1037 struct ib_umad_port *port; 1038 struct ib_port_modify props = { 1039 .set_port_cap_mask = IB_PORT_SM 1040 }; 1041 int ret; 1042 1043 port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev); 1044 1045 if (filp->f_flags & O_NONBLOCK) { 1046 if (down_trylock(&port->sm_sem)) { 1047 ret = -EAGAIN; 1048 goto fail; 1049 } 1050 } else { 1051 if (down_interruptible(&port->sm_sem)) { 1052 ret = -ERESTARTSYS; 1053 goto fail; 1054 } 1055 } 1056 1057 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1058 if (ret) 1059 goto err_up_sem; 1060 1061 filp->private_data = port; 1062 1063 ret = nonseekable_open(inode, filp); 1064 if (ret) 1065 goto err_clr_sm_cap; 1066 1067 kobject_get(&port->umad_dev->kobj); 1068 1069 return 0; 1070 1071 err_clr_sm_cap: 1072 swap(props.set_port_cap_mask, props.clr_port_cap_mask); 1073 ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1074 1075 err_up_sem: 1076 up(&port->sm_sem); 1077 1078 fail: 1079 return ret; 1080 } 1081 1082 static int ib_umad_sm_close(struct inode *inode, struct file *filp) 1083 { 1084 struct ib_umad_port *port = filp->private_data; 1085 struct ib_port_modify props = { 1086 .clr_port_cap_mask = IB_PORT_SM 1087 }; 1088 int ret = 0; 1089 1090 mutex_lock(&port->file_mutex); 1091 if (port->ib_dev) 1092 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1093 mutex_unlock(&port->file_mutex); 1094 1095 up(&port->sm_sem); 1096 1097 kobject_put(&port->umad_dev->kobj); 1098 1099 return ret; 1100 } 1101 1102 static const struct file_operations umad_sm_fops = { 1103 .owner = THIS_MODULE, 1104 .open = ib_umad_sm_open, 1105 .release = ib_umad_sm_close, 1106 .llseek = no_llseek, 1107 }; 1108 1109 static struct ib_client umad_client = { 1110 .name = "umad", 1111 .add = ib_umad_add_one, 1112 .remove = ib_umad_remove_one 1113 }; 1114 1115 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 1116 char *buf) 1117 { 1118 struct ib_umad_port *port = dev_get_drvdata(dev); 1119 1120 if (!port) 1121 return -ENODEV; 1122 1123 return sprintf(buf, "%s\n", port->ib_dev->name); 1124 } 1125 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1126 1127 static ssize_t show_port(struct device *dev, struct device_attribute *attr, 1128 char *buf) 1129 { 1130 struct ib_umad_port *port = dev_get_drvdata(dev); 1131 1132 if (!port) 1133 return -ENODEV; 1134 1135 return sprintf(buf, "%d\n", port->port_num); 1136 } 1137 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1138 1139 static CLASS_ATTR_STRING(abi_version, S_IRUGO, 1140 __stringify(IB_USER_MAD_ABI_VERSION)); 1141 1142 static dev_t overflow_maj; 1143 static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); 1144 static int find_overflow_devnum(struct ib_device *device) 1145 { 1146 int ret; 1147 1148 if (!overflow_maj) { 1149 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2, 1150 "infiniband_mad"); 1151 if (ret) { 1152 dev_err(&device->dev, 1153 "couldn't register dynamic device number\n"); 1154 return ret; 1155 } 1156 } 1157 1158 ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS); 1159 if (ret >= IB_UMAD_MAX_PORTS) 1160 return -1; 1161 1162 return ret; 1163 } 1164 1165 static int ib_umad_init_port(struct ib_device *device, int port_num, 1166 struct ib_umad_device *umad_dev, 1167 struct ib_umad_port *port) 1168 { 1169 int devnum; 1170 dev_t base; 1171 1172 spin_lock(&port_lock); 1173 devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); 1174 if (devnum >= IB_UMAD_MAX_PORTS) { 1175 spin_unlock(&port_lock); 1176 devnum = find_overflow_devnum(device); 1177 if (devnum < 0) 1178 return -1; 1179 1180 spin_lock(&port_lock); 1181 port->dev_num = devnum + IB_UMAD_MAX_PORTS; 1182 base = devnum + overflow_maj; 1183 set_bit(devnum, overflow_map); 1184 } else { 1185 port->dev_num = devnum; 1186 base = devnum + base_dev; 1187 set_bit(devnum, dev_map); 1188 } 1189 spin_unlock(&port_lock); 1190 1191 port->ib_dev = device; 1192 port->port_num = port_num; 1193 sema_init(&port->sm_sem, 1); 1194 mutex_init(&port->file_mutex); 1195 INIT_LIST_HEAD(&port->file_list); 1196 1197 cdev_init(&port->cdev, &umad_fops); 1198 port->cdev.owner = THIS_MODULE; 1199 cdev_set_parent(&port->cdev, &umad_dev->kobj); 1200 kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num); 1201 if (cdev_add(&port->cdev, base, 1)) 1202 goto err_cdev; 1203 1204 port->dev = device_create(umad_class, device->dev.parent, 1205 port->cdev.dev, port, 1206 "umad%d", port->dev_num); 1207 if (IS_ERR(port->dev)) 1208 goto err_cdev; 1209 1210 if (device_create_file(port->dev, &dev_attr_ibdev)) 1211 goto err_dev; 1212 if (device_create_file(port->dev, &dev_attr_port)) 1213 goto err_dev; 1214 1215 base += IB_UMAD_MAX_PORTS; 1216 cdev_init(&port->sm_cdev, &umad_sm_fops); 1217 port->sm_cdev.owner = THIS_MODULE; 1218 cdev_set_parent(&port->sm_cdev, &umad_dev->kobj); 1219 kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num); 1220 if (cdev_add(&port->sm_cdev, base, 1)) 1221 goto err_sm_cdev; 1222 1223 port->sm_dev = device_create(umad_class, device->dev.parent, 1224 port->sm_cdev.dev, port, 1225 "issm%d", port->dev_num); 1226 if (IS_ERR(port->sm_dev)) 1227 goto err_sm_cdev; 1228 1229 if (device_create_file(port->sm_dev, &dev_attr_ibdev)) 1230 goto err_sm_dev; 1231 if (device_create_file(port->sm_dev, &dev_attr_port)) 1232 goto err_sm_dev; 1233 1234 return 0; 1235 1236 err_sm_dev: 1237 device_destroy(umad_class, port->sm_cdev.dev); 1238 1239 err_sm_cdev: 1240 cdev_del(&port->sm_cdev); 1241 1242 err_dev: 1243 device_destroy(umad_class, port->cdev.dev); 1244 1245 err_cdev: 1246 cdev_del(&port->cdev); 1247 if (port->dev_num < IB_UMAD_MAX_PORTS) 1248 clear_bit(devnum, dev_map); 1249 else 1250 clear_bit(devnum, overflow_map); 1251 1252 return -1; 1253 } 1254 1255 static void ib_umad_kill_port(struct ib_umad_port *port) 1256 { 1257 struct ib_umad_file *file; 1258 int id; 1259 1260 dev_set_drvdata(port->dev, NULL); 1261 dev_set_drvdata(port->sm_dev, NULL); 1262 1263 device_destroy(umad_class, port->cdev.dev); 1264 device_destroy(umad_class, port->sm_cdev.dev); 1265 1266 cdev_del(&port->cdev); 1267 cdev_del(&port->sm_cdev); 1268 1269 mutex_lock(&port->file_mutex); 1270 1271 port->ib_dev = NULL; 1272 1273 list_for_each_entry(file, &port->file_list, port_list) { 1274 mutex_lock(&file->mutex); 1275 file->agents_dead = 1; 1276 mutex_unlock(&file->mutex); 1277 1278 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) 1279 if (file->agent[id]) 1280 ib_unregister_mad_agent(file->agent[id]); 1281 } 1282 1283 mutex_unlock(&port->file_mutex); 1284 1285 if (port->dev_num < IB_UMAD_MAX_PORTS) 1286 clear_bit(port->dev_num, dev_map); 1287 else 1288 clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map); 1289 } 1290 1291 static void ib_umad_add_one(struct ib_device *device) 1292 { 1293 struct ib_umad_device *umad_dev; 1294 int s, e, i; 1295 int count = 0; 1296 1297 s = rdma_start_port(device); 1298 e = rdma_end_port(device); 1299 1300 umad_dev = kzalloc(sizeof *umad_dev + 1301 (e - s + 1) * sizeof (struct ib_umad_port), 1302 GFP_KERNEL); 1303 if (!umad_dev) 1304 return; 1305 1306 kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype); 1307 1308 for (i = s; i <= e; ++i) { 1309 if (!rdma_cap_ib_mad(device, i)) 1310 continue; 1311 1312 umad_dev->port[i - s].umad_dev = umad_dev; 1313 1314 if (ib_umad_init_port(device, i, umad_dev, 1315 &umad_dev->port[i - s])) 1316 goto err; 1317 1318 count++; 1319 } 1320 1321 if (!count) 1322 goto free; 1323 1324 ib_set_client_data(device, &umad_client, umad_dev); 1325 1326 return; 1327 1328 err: 1329 while (--i >= s) { 1330 if (!rdma_cap_ib_mad(device, i)) 1331 continue; 1332 1333 ib_umad_kill_port(&umad_dev->port[i - s]); 1334 } 1335 free: 1336 kobject_put(&umad_dev->kobj); 1337 } 1338 1339 static void ib_umad_remove_one(struct ib_device *device, void *client_data) 1340 { 1341 struct ib_umad_device *umad_dev = client_data; 1342 int i; 1343 1344 if (!umad_dev) 1345 return; 1346 1347 for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) { 1348 if (rdma_cap_ib_mad(device, i + rdma_start_port(device))) 1349 ib_umad_kill_port(&umad_dev->port[i]); 1350 } 1351 1352 kobject_put(&umad_dev->kobj); 1353 } 1354 1355 static char *umad_devnode(struct device *dev, umode_t *mode) 1356 { 1357 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 1358 } 1359 1360 static int __init ib_umad_init(void) 1361 { 1362 int ret; 1363 1364 ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2, 1365 "infiniband_mad"); 1366 if (ret) { 1367 pr_err("couldn't register device number\n"); 1368 goto out; 1369 } 1370 1371 umad_class = class_create(THIS_MODULE, "infiniband_mad"); 1372 if (IS_ERR(umad_class)) { 1373 ret = PTR_ERR(umad_class); 1374 pr_err("couldn't create class infiniband_mad\n"); 1375 goto out_chrdev; 1376 } 1377 1378 umad_class->devnode = umad_devnode; 1379 1380 ret = class_create_file(umad_class, &class_attr_abi_version.attr); 1381 if (ret) { 1382 pr_err("couldn't create abi_version attribute\n"); 1383 goto out_class; 1384 } 1385 1386 ret = ib_register_client(&umad_client); 1387 if (ret) { 1388 pr_err("couldn't register ib_umad client\n"); 1389 goto out_class; 1390 } 1391 1392 return 0; 1393 1394 out_class: 1395 class_destroy(umad_class); 1396 1397 out_chrdev: 1398 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); 1399 1400 out: 1401 return ret; 1402 } 1403 1404 static void __exit ib_umad_cleanup(void) 1405 { 1406 ib_unregister_client(&umad_client); 1407 class_destroy(umad_class); 1408 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); 1409 if (overflow_maj) 1410 unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2); 1411 } 1412 1413 module_init(ib_umad_init); 1414 module_exit(ib_umad_cleanup); 1415