1 /* 2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Neither the names of the copyright holders nor the names of its 13 * contributors may be used to endorse or promote products derived from 14 * this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU General Public License ("GPL") version 2 as published by the Free 18 * Software Foundation. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/pid.h> 35 #include <linux/pid_namespace.h> 36 #include <net/netlink.h> 37 #include <rdma/rdma_cm.h> 38 #include <rdma/rdma_netlink.h> 39 40 #include "core_priv.h" 41 #include "cma_priv.h" 42 43 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { 44 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 }, 45 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, 46 .len = IB_DEVICE_NAME_MAX - 1}, 47 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 }, 48 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING, 49 .len = IB_FW_VERSION_NAME_MAX - 1}, 50 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 }, 51 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 }, 52 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 }, 53 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 }, 54 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 }, 55 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 }, 56 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 }, 57 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 }, 58 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 }, 59 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED }, 60 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED }, 61 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING, 62 .len = 16 }, 63 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 }, 64 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, 65 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, 66 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 }, 67 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, 68 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, 69 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 }, 70 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 }, 71 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, 72 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 }, 73 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 }, 74 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING, 75 .len = TASK_COMM_LEN }, 76 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED }, 77 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED }, 78 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, 79 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = { 80 .len = sizeof(struct __kernel_sockaddr_storage) }, 81 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = { 82 .len = sizeof(struct __kernel_sockaddr_storage) }, 83 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED }, 84 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED }, 85 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 }, 86 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, 87 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 }, 88 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED }, 89 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED }, 90 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, 91 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 }, 92 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 }, 93 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 }, 94 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED }, 95 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED }, 96 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 }, 97 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 }, 98 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 }, 99 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING, 100 .len = IFNAMSIZ }, 101 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED }, 102 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED }, 103 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING, 104 .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN }, 105 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 }, 106 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 }, 107 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 }, 108 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 }, 109 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, 110 }; 111 112 static int put_driver_name_print_type(struct sk_buff *msg, const char *name, 113 enum rdma_nldev_print_type print_type) 114 { 115 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) 116 return -EMSGSIZE; 117 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC && 118 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) 119 return -EMSGSIZE; 120 121 return 0; 122 } 123 124 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, 125 enum rdma_nldev_print_type print_type, 126 u32 value) 127 { 128 if (put_driver_name_print_type(msg, name, print_type)) 129 return -EMSGSIZE; 130 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) 131 return -EMSGSIZE; 132 133 return 0; 134 } 135 136 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, 137 enum rdma_nldev_print_type print_type, 138 u64 value) 139 { 140 if (put_driver_name_print_type(msg, name, print_type)) 141 return -EMSGSIZE; 142 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, 143 RDMA_NLDEV_ATTR_PAD)) 144 return -EMSGSIZE; 145 146 return 0; 147 } 148 149 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) 150 { 151 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 152 value); 153 } 154 EXPORT_SYMBOL(rdma_nl_put_driver_u32); 155 156 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, 157 u32 value) 158 { 159 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 160 value); 161 } 162 EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex); 163 164 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) 165 { 166 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, 167 value); 168 } 169 EXPORT_SYMBOL(rdma_nl_put_driver_u64); 170 171 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) 172 { 173 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, 174 value); 175 } 176 EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex); 177 178 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) 179 { 180 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) 181 return -EMSGSIZE; 182 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, 183 dev_name(&device->dev))) 184 return -EMSGSIZE; 185 186 return 0; 187 } 188 189 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) 190 { 191 char fw[IB_FW_VERSION_NAME_MAX]; 192 193 if (fill_nldev_handle(msg, device)) 194 return -EMSGSIZE; 195 196 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) 197 return -EMSGSIZE; 198 199 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64)); 200 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 201 device->attrs.device_cap_flags, 202 RDMA_NLDEV_ATTR_PAD)) 203 return -EMSGSIZE; 204 205 ib_get_device_fw_str(device, fw); 206 /* Device without FW has strlen(fw) = 0 */ 207 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) 208 return -EMSGSIZE; 209 210 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, 211 be64_to_cpu(device->node_guid), 212 RDMA_NLDEV_ATTR_PAD)) 213 return -EMSGSIZE; 214 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, 215 be64_to_cpu(device->attrs.sys_image_guid), 216 RDMA_NLDEV_ATTR_PAD)) 217 return -EMSGSIZE; 218 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) 219 return -EMSGSIZE; 220 return 0; 221 } 222 223 static int fill_port_info(struct sk_buff *msg, 224 struct ib_device *device, u32 port, 225 const struct net *net) 226 { 227 struct net_device *netdev = NULL; 228 struct ib_port_attr attr; 229 int ret; 230 u64 cap_flags = 0; 231 232 if (fill_nldev_handle(msg, device)) 233 return -EMSGSIZE; 234 235 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) 236 return -EMSGSIZE; 237 238 ret = ib_query_port(device, port, &attr); 239 if (ret) 240 return ret; 241 242 if (rdma_protocol_ib(device, port)) { 243 BUILD_BUG_ON((sizeof(attr.port_cap_flags) + 244 sizeof(attr.port_cap_flags2)) > sizeof(u64)); 245 cap_flags = attr.port_cap_flags | 246 ((u64)attr.port_cap_flags2 << 32); 247 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, 248 cap_flags, RDMA_NLDEV_ATTR_PAD)) 249 return -EMSGSIZE; 250 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, 251 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) 252 return -EMSGSIZE; 253 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) 254 return -EMSGSIZE; 255 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) 256 return -EMSGSIZE; 257 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) 258 return -EMSGSIZE; 259 } 260 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) 261 return -EMSGSIZE; 262 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) 263 return -EMSGSIZE; 264 265 if (device->ops.get_netdev) 266 netdev = device->ops.get_netdev(device, port); 267 268 if (netdev && net_eq(dev_net(netdev), net)) { 269 ret = nla_put_u32(msg, 270 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); 271 if (ret) 272 goto out; 273 ret = nla_put_string(msg, 274 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); 275 } 276 277 out: 278 if (netdev) 279 dev_put(netdev); 280 return ret; 281 } 282 283 static int fill_res_info_entry(struct sk_buff *msg, 284 const char *name, u64 curr) 285 { 286 struct nlattr *entry_attr; 287 288 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); 289 if (!entry_attr) 290 return -EMSGSIZE; 291 292 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) 293 goto err; 294 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 295 RDMA_NLDEV_ATTR_PAD)) 296 goto err; 297 298 nla_nest_end(msg, entry_attr); 299 return 0; 300 301 err: 302 nla_nest_cancel(msg, entry_attr); 303 return -EMSGSIZE; 304 } 305 306 static int fill_res_info(struct sk_buff *msg, struct ib_device *device) 307 { 308 static const char * const names[RDMA_RESTRACK_MAX] = { 309 [RDMA_RESTRACK_PD] = "pd", 310 [RDMA_RESTRACK_CQ] = "cq", 311 [RDMA_RESTRACK_QP] = "qp", 312 [RDMA_RESTRACK_CM_ID] = "cm_id", 313 [RDMA_RESTRACK_MR] = "mr", 314 [RDMA_RESTRACK_CTX] = "ctx", 315 }; 316 317 struct rdma_restrack_root *res = &device->res; 318 struct nlattr *table_attr; 319 int ret, i, curr; 320 321 if (fill_nldev_handle(msg, device)) 322 return -EMSGSIZE; 323 324 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); 325 if (!table_attr) 326 return -EMSGSIZE; 327 328 for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 329 if (!names[i]) 330 continue; 331 curr = rdma_restrack_count(res, i, task_active_pid_ns(current)); 332 ret = fill_res_info_entry(msg, names[i], curr); 333 if (ret) 334 goto err; 335 } 336 337 nla_nest_end(msg, table_attr); 338 return 0; 339 340 err: 341 nla_nest_cancel(msg, table_attr); 342 return ret; 343 } 344 345 static int fill_res_name_pid(struct sk_buff *msg, 346 struct rdma_restrack_entry *res) 347 { 348 /* 349 * For user resources, user is should read /proc/PID/comm to get the 350 * name of the task file. 351 */ 352 if (rdma_is_kernel_res(res)) { 353 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, 354 res->kern_name)) 355 return -EMSGSIZE; 356 } else { 357 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, 358 task_pid_vnr(res->task))) 359 return -EMSGSIZE; 360 } 361 return 0; 362 } 363 364 static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb, 365 struct rdma_restrack_entry *res, uint32_t port) 366 { 367 struct ib_qp *qp = container_of(res, struct ib_qp, res); 368 struct rdma_restrack_root *resroot = &qp->device->res; 369 struct ib_qp_init_attr qp_init_attr; 370 struct nlattr *entry_attr; 371 struct ib_qp_attr qp_attr; 372 int ret; 373 374 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr); 375 if (ret) 376 return ret; 377 378 if (port && port != qp_attr.port_num) 379 return 0; 380 381 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); 382 if (!entry_attr) 383 goto out; 384 385 /* In create_qp() port is not set yet */ 386 if (qp_attr.port_num && 387 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num)) 388 goto err; 389 390 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num)) 391 goto err; 392 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { 393 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, 394 qp_attr.dest_qp_num)) 395 goto err; 396 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, 397 qp_attr.rq_psn)) 398 goto err; 399 } 400 401 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) 402 goto err; 403 404 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC || 405 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) { 406 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, 407 qp_attr.path_mig_state)) 408 goto err; 409 } 410 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) 411 goto err; 412 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) 413 goto err; 414 415 if (fill_res_name_pid(msg, res)) 416 goto err; 417 418 if (resroot->fill_res_entry(msg, res)) 419 goto err; 420 421 nla_nest_end(msg, entry_attr); 422 return 0; 423 424 err: 425 nla_nest_cancel(msg, entry_attr); 426 out: 427 return -EMSGSIZE; 428 } 429 430 static int fill_res_cm_id_entry(struct sk_buff *msg, 431 struct netlink_callback *cb, 432 struct rdma_restrack_entry *res, uint32_t port) 433 { 434 struct rdma_id_private *id_priv = 435 container_of(res, struct rdma_id_private, res); 436 struct rdma_restrack_root *resroot = &id_priv->id.device->res; 437 struct rdma_cm_id *cm_id = &id_priv->id; 438 struct nlattr *entry_attr; 439 440 if (port && port != cm_id->port_num) 441 return 0; 442 443 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY); 444 if (!entry_attr) 445 goto out; 446 447 if (cm_id->port_num && 448 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) 449 goto err; 450 451 if (id_priv->qp_num) { 452 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) 453 goto err; 454 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) 455 goto err; 456 } 457 458 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) 459 goto err; 460 461 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) 462 goto err; 463 464 if (cm_id->route.addr.src_addr.ss_family && 465 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, 466 sizeof(cm_id->route.addr.src_addr), 467 &cm_id->route.addr.src_addr)) 468 goto err; 469 if (cm_id->route.addr.dst_addr.ss_family && 470 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, 471 sizeof(cm_id->route.addr.dst_addr), 472 &cm_id->route.addr.dst_addr)) 473 goto err; 474 475 if (fill_res_name_pid(msg, res)) 476 goto err; 477 478 if (resroot->fill_res_entry(msg, res)) 479 goto err; 480 481 nla_nest_end(msg, entry_attr); 482 return 0; 483 484 err: 485 nla_nest_cancel(msg, entry_attr); 486 out: 487 return -EMSGSIZE; 488 } 489 490 static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb, 491 struct rdma_restrack_entry *res, uint32_t port) 492 { 493 struct ib_cq *cq = container_of(res, struct ib_cq, res); 494 struct rdma_restrack_root *resroot = &cq->device->res; 495 struct nlattr *entry_attr; 496 497 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY); 498 if (!entry_attr) 499 goto out; 500 501 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) 502 goto err; 503 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 504 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) 505 goto err; 506 507 /* Poll context is only valid for kernel CQs */ 508 if (rdma_is_kernel_res(res) && 509 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) 510 goto err; 511 512 if (fill_res_name_pid(msg, res)) 513 goto err; 514 515 if (resroot->fill_res_entry(msg, res)) 516 goto err; 517 518 nla_nest_end(msg, entry_attr); 519 return 0; 520 521 err: 522 nla_nest_cancel(msg, entry_attr); 523 out: 524 return -EMSGSIZE; 525 } 526 527 static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb, 528 struct rdma_restrack_entry *res, uint32_t port) 529 { 530 struct ib_mr *mr = container_of(res, struct ib_mr, res); 531 struct rdma_restrack_root *resroot = &mr->pd->device->res; 532 struct nlattr *entry_attr; 533 534 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY); 535 if (!entry_attr) 536 goto out; 537 538 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) { 539 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) 540 goto err; 541 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) 542 goto err; 543 } 544 545 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, 546 RDMA_NLDEV_ATTR_PAD)) 547 goto err; 548 549 if (fill_res_name_pid(msg, res)) 550 goto err; 551 552 if (resroot->fill_res_entry(msg, res)) 553 goto err; 554 555 nla_nest_end(msg, entry_attr); 556 return 0; 557 558 err: 559 nla_nest_cancel(msg, entry_attr); 560 out: 561 return -EMSGSIZE; 562 } 563 564 static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, 565 struct rdma_restrack_entry *res, uint32_t port) 566 { 567 struct ib_pd *pd = container_of(res, struct ib_pd, res); 568 struct rdma_restrack_root *resroot = &pd->device->res; 569 struct nlattr *entry_attr; 570 571 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY); 572 if (!entry_attr) 573 goto out; 574 575 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) { 576 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, 577 pd->local_dma_lkey)) 578 goto err; 579 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && 580 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, 581 pd->unsafe_global_rkey)) 582 goto err; 583 } 584 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 585 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 586 goto err; 587 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && 588 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, 589 pd->unsafe_global_rkey)) 590 goto err; 591 592 if (fill_res_name_pid(msg, res)) 593 goto err; 594 595 if (resroot->fill_res_entry(msg, res)) 596 goto err; 597 598 nla_nest_end(msg, entry_attr); 599 return 0; 600 601 err: 602 nla_nest_cancel(msg, entry_attr); 603 out: 604 return -EMSGSIZE; 605 } 606 607 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 608 struct netlink_ext_ack *extack) 609 { 610 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 611 struct ib_device *device; 612 struct sk_buff *msg; 613 u32 index; 614 int err; 615 616 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 617 nldev_policy, extack); 618 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 619 return -EINVAL; 620 621 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 622 623 device = ib_device_get_by_index(index); 624 if (!device) 625 return -EINVAL; 626 627 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 628 if (!msg) { 629 err = -ENOMEM; 630 goto err; 631 } 632 633 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 634 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 635 0, 0); 636 637 err = fill_dev_info(msg, device); 638 if (err) 639 goto err_free; 640 641 nlmsg_end(msg, nlh); 642 643 ib_device_put(device); 644 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 645 646 err_free: 647 nlmsg_free(msg); 648 err: 649 ib_device_put(device); 650 return err; 651 } 652 653 static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 654 struct netlink_ext_ack *extack) 655 { 656 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 657 struct ib_device *device; 658 u32 index; 659 int err; 660 661 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, 662 extack); 663 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 664 return -EINVAL; 665 666 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 667 device = ib_device_get_by_index(index); 668 if (!device) 669 return -EINVAL; 670 671 if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) { 672 char name[IB_DEVICE_NAME_MAX] = {}; 673 674 nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], 675 IB_DEVICE_NAME_MAX); 676 err = ib_device_rename(device, name); 677 } 678 679 ib_device_put(device); 680 return err; 681 } 682 683 static int _nldev_get_dumpit(struct ib_device *device, 684 struct sk_buff *skb, 685 struct netlink_callback *cb, 686 unsigned int idx) 687 { 688 int start = cb->args[0]; 689 struct nlmsghdr *nlh; 690 691 if (idx < start) 692 return 0; 693 694 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 695 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 696 0, NLM_F_MULTI); 697 698 if (fill_dev_info(skb, device)) { 699 nlmsg_cancel(skb, nlh); 700 goto out; 701 } 702 703 nlmsg_end(skb, nlh); 704 705 idx++; 706 707 out: cb->args[0] = idx; 708 return skb->len; 709 } 710 711 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) 712 { 713 /* 714 * There is no need to take lock, because 715 * we are relying on ib_core's lists_rwsem 716 */ 717 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb); 718 } 719 720 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 721 struct netlink_ext_ack *extack) 722 { 723 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 724 struct ib_device *device; 725 struct sk_buff *msg; 726 u32 index; 727 u32 port; 728 int err; 729 730 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 731 nldev_policy, extack); 732 if (err || 733 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || 734 !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 735 return -EINVAL; 736 737 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 738 device = ib_device_get_by_index(index); 739 if (!device) 740 return -EINVAL; 741 742 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 743 if (!rdma_is_port_valid(device, port)) { 744 err = -EINVAL; 745 goto err; 746 } 747 748 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 749 if (!msg) { 750 err = -ENOMEM; 751 goto err; 752 } 753 754 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 755 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 756 0, 0); 757 758 err = fill_port_info(msg, device, port, sock_net(skb->sk)); 759 if (err) 760 goto err_free; 761 762 nlmsg_end(msg, nlh); 763 ib_device_put(device); 764 765 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 766 767 err_free: 768 nlmsg_free(msg); 769 err: 770 ib_device_put(device); 771 return err; 772 } 773 774 static int nldev_port_get_dumpit(struct sk_buff *skb, 775 struct netlink_callback *cb) 776 { 777 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 778 struct ib_device *device; 779 int start = cb->args[0]; 780 struct nlmsghdr *nlh; 781 u32 idx = 0; 782 u32 ifindex; 783 int err; 784 u32 p; 785 786 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 787 nldev_policy, NULL); 788 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 789 return -EINVAL; 790 791 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 792 device = ib_device_get_by_index(ifindex); 793 if (!device) 794 return -EINVAL; 795 796 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 797 /* 798 * The dumpit function returns all information from specific 799 * index. This specific index is taken from the netlink 800 * messages request sent by user and it is available 801 * in cb->args[0]. 802 * 803 * Usually, the user doesn't fill this field and it causes 804 * to return everything. 805 * 806 */ 807 if (idx < start) { 808 idx++; 809 continue; 810 } 811 812 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 813 cb->nlh->nlmsg_seq, 814 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, 815 RDMA_NLDEV_CMD_PORT_GET), 816 0, NLM_F_MULTI); 817 818 if (fill_port_info(skb, device, p, sock_net(skb->sk))) { 819 nlmsg_cancel(skb, nlh); 820 goto out; 821 } 822 idx++; 823 nlmsg_end(skb, nlh); 824 } 825 826 out: 827 ib_device_put(device); 828 cb->args[0] = idx; 829 return skb->len; 830 } 831 832 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 833 struct netlink_ext_ack *extack) 834 { 835 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 836 struct ib_device *device; 837 struct sk_buff *msg; 838 u32 index; 839 int ret; 840 841 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 842 nldev_policy, extack); 843 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 844 return -EINVAL; 845 846 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 847 device = ib_device_get_by_index(index); 848 if (!device) 849 return -EINVAL; 850 851 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 852 if (!msg) { 853 ret = -ENOMEM; 854 goto err; 855 } 856 857 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 858 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 859 0, 0); 860 861 ret = fill_res_info(msg, device); 862 if (ret) 863 goto err_free; 864 865 nlmsg_end(msg, nlh); 866 ib_device_put(device); 867 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); 868 869 err_free: 870 nlmsg_free(msg); 871 err: 872 ib_device_put(device); 873 return ret; 874 } 875 876 static int _nldev_res_get_dumpit(struct ib_device *device, 877 struct sk_buff *skb, 878 struct netlink_callback *cb, 879 unsigned int idx) 880 { 881 int start = cb->args[0]; 882 struct nlmsghdr *nlh; 883 884 if (idx < start) 885 return 0; 886 887 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 888 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 889 0, NLM_F_MULTI); 890 891 if (fill_res_info(skb, device)) { 892 nlmsg_cancel(skb, nlh); 893 goto out; 894 } 895 896 nlmsg_end(skb, nlh); 897 898 idx++; 899 900 out: 901 cb->args[0] = idx; 902 return skb->len; 903 } 904 905 static int nldev_res_get_dumpit(struct sk_buff *skb, 906 struct netlink_callback *cb) 907 { 908 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb); 909 } 910 911 struct nldev_fill_res_entry { 912 int (*fill_res_func)(struct sk_buff *msg, struct netlink_callback *cb, 913 struct rdma_restrack_entry *res, u32 port); 914 enum rdma_nldev_attr nldev_attr; 915 enum rdma_nldev_command nldev_cmd; 916 }; 917 918 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { 919 [RDMA_RESTRACK_QP] = { 920 .fill_res_func = fill_res_qp_entry, 921 .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET, 922 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, 923 }, 924 [RDMA_RESTRACK_CM_ID] = { 925 .fill_res_func = fill_res_cm_id_entry, 926 .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET, 927 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, 928 }, 929 [RDMA_RESTRACK_CQ] = { 930 .fill_res_func = fill_res_cq_entry, 931 .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET, 932 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, 933 }, 934 [RDMA_RESTRACK_MR] = { 935 .fill_res_func = fill_res_mr_entry, 936 .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET, 937 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, 938 }, 939 [RDMA_RESTRACK_PD] = { 940 .fill_res_func = fill_res_pd_entry, 941 .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET, 942 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, 943 }, 944 }; 945 946 static int res_get_common_dumpit(struct sk_buff *skb, 947 struct netlink_callback *cb, 948 enum rdma_restrack_type res_type) 949 { 950 const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; 951 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; 952 struct rdma_restrack_entry *res; 953 int err, ret = 0, idx = 0; 954 struct nlattr *table_attr; 955 struct ib_device *device; 956 int start = cb->args[0]; 957 struct nlmsghdr *nlh; 958 u32 index, port = 0; 959 bool filled = false; 960 961 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 962 nldev_policy, NULL); 963 /* 964 * Right now, we are expecting the device index to get res information, 965 * but it is possible to extend this code to return all devices in 966 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX. 967 * if it doesn't exist, we will iterate over all devices. 968 * 969 * But it is not needed for now. 970 */ 971 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) 972 return -EINVAL; 973 974 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 975 device = ib_device_get_by_index(index); 976 if (!device) 977 return -EINVAL; 978 979 /* 980 * If no PORT_INDEX is supplied, we will return all QPs from that device 981 */ 982 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { 983 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); 984 if (!rdma_is_port_valid(device, port)) { 985 ret = -EINVAL; 986 goto err_index; 987 } 988 } 989 990 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 991 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd), 992 0, NLM_F_MULTI); 993 994 if (fill_nldev_handle(skb, device)) { 995 ret = -EMSGSIZE; 996 goto err; 997 } 998 999 table_attr = nla_nest_start(skb, fe->nldev_attr); 1000 if (!table_attr) { 1001 ret = -EMSGSIZE; 1002 goto err; 1003 } 1004 1005 down_read(&device->res.rwsem); 1006 hash_for_each_possible(device->res.hash, res, node, res_type) { 1007 if (idx < start) 1008 goto next; 1009 1010 if ((rdma_is_kernel_res(res) && 1011 task_active_pid_ns(current) != &init_pid_ns) || 1012 (!rdma_is_kernel_res(res) && task_active_pid_ns(current) != 1013 task_active_pid_ns(res->task))) 1014 /* 1015 * 1. Kern resources should be visible in init 1016 * namspace only 1017 * 2. Present only resources visible in the current 1018 * namespace 1019 */ 1020 goto next; 1021 1022 if (!rdma_restrack_get(res)) 1023 /* 1024 * Resource is under release now, but we are not 1025 * relesing lock now, so it will be released in 1026 * our next pass, once we will get ->next pointer. 1027 */ 1028 goto next; 1029 1030 filled = true; 1031 1032 up_read(&device->res.rwsem); 1033 ret = fe->fill_res_func(skb, cb, res, port); 1034 down_read(&device->res.rwsem); 1035 /* 1036 * Return resource back, but it won't be released till 1037 * the &device->res.rwsem will be released for write. 1038 */ 1039 rdma_restrack_put(res); 1040 1041 if (ret == -EMSGSIZE) 1042 /* 1043 * There is a chance to optimize here. 1044 * It can be done by using list_prepare_entry 1045 * and list_for_each_entry_continue afterwards. 1046 */ 1047 break; 1048 if (ret) 1049 goto res_err; 1050 next: idx++; 1051 } 1052 up_read(&device->res.rwsem); 1053 1054 nla_nest_end(skb, table_attr); 1055 nlmsg_end(skb, nlh); 1056 cb->args[0] = idx; 1057 1058 /* 1059 * No more entries to fill, cancel the message and 1060 * return 0 to mark end of dumpit. 1061 */ 1062 if (!filled) 1063 goto err; 1064 1065 ib_device_put(device); 1066 return skb->len; 1067 1068 res_err: 1069 nla_nest_cancel(skb, table_attr); 1070 up_read(&device->res.rwsem); 1071 1072 err: 1073 nlmsg_cancel(skb, nlh); 1074 1075 err_index: 1076 ib_device_put(device); 1077 return ret; 1078 } 1079 1080 static int nldev_res_get_qp_dumpit(struct sk_buff *skb, 1081 struct netlink_callback *cb) 1082 { 1083 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_QP); 1084 } 1085 1086 static int nldev_res_get_cm_id_dumpit(struct sk_buff *skb, 1087 struct netlink_callback *cb) 1088 { 1089 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CM_ID); 1090 } 1091 1092 static int nldev_res_get_cq_dumpit(struct sk_buff *skb, 1093 struct netlink_callback *cb) 1094 { 1095 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CQ); 1096 } 1097 1098 static int nldev_res_get_mr_dumpit(struct sk_buff *skb, 1099 struct netlink_callback *cb) 1100 { 1101 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR); 1102 } 1103 1104 static int nldev_res_get_pd_dumpit(struct sk_buff *skb, 1105 struct netlink_callback *cb) 1106 { 1107 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_PD); 1108 } 1109 1110 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { 1111 [RDMA_NLDEV_CMD_GET] = { 1112 .doit = nldev_get_doit, 1113 .dump = nldev_get_dumpit, 1114 }, 1115 [RDMA_NLDEV_CMD_SET] = { 1116 .doit = nldev_set_doit, 1117 .flags = RDMA_NL_ADMIN_PERM, 1118 }, 1119 [RDMA_NLDEV_CMD_PORT_GET] = { 1120 .doit = nldev_port_get_doit, 1121 .dump = nldev_port_get_dumpit, 1122 }, 1123 [RDMA_NLDEV_CMD_RES_GET] = { 1124 .doit = nldev_res_get_doit, 1125 .dump = nldev_res_get_dumpit, 1126 }, 1127 [RDMA_NLDEV_CMD_RES_QP_GET] = { 1128 .dump = nldev_res_get_qp_dumpit, 1129 /* 1130 * .doit is not implemented yet for two reasons: 1131 * 1. It is not needed yet. 1132 * 2. There is a need to provide identifier, while it is easy 1133 * for the QPs (device index + port index + LQPN), it is not 1134 * the case for the rest of resources (PD and CQ). Because it 1135 * is better to provide similar interface for all resources, 1136 * let's wait till we will have other resources implemented 1137 * too. 1138 */ 1139 }, 1140 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = { 1141 .dump = nldev_res_get_cm_id_dumpit, 1142 }, 1143 [RDMA_NLDEV_CMD_RES_CQ_GET] = { 1144 .dump = nldev_res_get_cq_dumpit, 1145 }, 1146 [RDMA_NLDEV_CMD_RES_MR_GET] = { 1147 .dump = nldev_res_get_mr_dumpit, 1148 }, 1149 [RDMA_NLDEV_CMD_RES_PD_GET] = { 1150 .dump = nldev_res_get_pd_dumpit, 1151 }, 1152 }; 1153 1154 void __init nldev_init(void) 1155 { 1156 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); 1157 } 1158 1159 void __exit nldev_exit(void) 1160 { 1161 rdma_nl_unregister(RDMA_NL_NLDEV); 1162 } 1163 1164 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5); 1165