Lines Matching refs:msg

161 static int put_driver_name_print_type(struct sk_buff *msg, const char *name,  in put_driver_name_print_type()  argument
164 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) in put_driver_name_print_type()
167 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) in put_driver_name_print_type()
173 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, in _rdma_nl_put_driver_u32() argument
177 if (put_driver_name_print_type(msg, name, print_type)) in _rdma_nl_put_driver_u32()
179 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) in _rdma_nl_put_driver_u32()
185 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, in _rdma_nl_put_driver_u64() argument
189 if (put_driver_name_print_type(msg, name, print_type)) in _rdma_nl_put_driver_u64()
191 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, in _rdma_nl_put_driver_u64()
198 int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name, in rdma_nl_put_driver_string() argument
201 if (put_driver_name_print_type(msg, name, in rdma_nl_put_driver_string()
204 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str)) in rdma_nl_put_driver_string()
211 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) in rdma_nl_put_driver_u32() argument
213 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, in rdma_nl_put_driver_u32()
218 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, in rdma_nl_put_driver_u32_hex() argument
221 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, in rdma_nl_put_driver_u32_hex()
226 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) in rdma_nl_put_driver_u64() argument
228 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, in rdma_nl_put_driver_u64()
233 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) in rdma_nl_put_driver_u64_hex() argument
235 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, in rdma_nl_put_driver_u64_hex()
240 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) in fill_nldev_handle() argument
242 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) in fill_nldev_handle()
244 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, in fill_nldev_handle()
251 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) in fill_dev_info() argument
257 if (fill_nldev_handle(msg, device)) in fill_dev_info()
260 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) in fill_dev_info()
264 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, in fill_dev_info()
271 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) in fill_dev_info()
274 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, in fill_dev_info()
278 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, in fill_dev_info()
282 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) in fill_dev_info()
284 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim)) in fill_dev_info()
294 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); in fill_dev_info()
296 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); in fill_dev_info()
298 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); in fill_dev_info()
300 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); in fill_dev_info()
302 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, in fill_dev_info()
307 static int fill_port_info(struct sk_buff *msg, in fill_port_info() argument
316 if (fill_nldev_handle(msg, device)) in fill_port_info()
319 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) in fill_port_info()
331 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, in fill_port_info()
334 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, in fill_port_info()
337 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) in fill_port_info()
339 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) in fill_port_info()
341 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) in fill_port_info()
344 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) in fill_port_info()
346 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) in fill_port_info()
351 ret = nla_put_u32(msg, in fill_port_info()
355 ret = nla_put_string(msg, in fill_port_info()
365 static int fill_res_info_entry(struct sk_buff *msg, in fill_res_info_entry() argument
370 entry_attr = nla_nest_start_noflag(msg, in fill_res_info_entry()
375 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) in fill_res_info_entry()
377 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, in fill_res_info_entry()
381 nla_nest_end(msg, entry_attr); in fill_res_info_entry()
385 nla_nest_cancel(msg, entry_attr); in fill_res_info_entry()
389 static int fill_res_info(struct sk_buff *msg, struct ib_device *device) in fill_res_info() argument
404 if (fill_nldev_handle(msg, device)) in fill_res_info()
407 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); in fill_res_info()
415 ret = fill_res_info_entry(msg, names[i], curr); in fill_res_info()
420 nla_nest_end(msg, table_attr); in fill_res_info()
424 nla_nest_cancel(msg, table_attr); in fill_res_info()
428 static int fill_res_name_pid(struct sk_buff *msg, in fill_res_name_pid() argument
438 err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, in fill_res_name_pid()
455 err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid); in fill_res_name_pid()
461 static int fill_res_qp_entry_query(struct sk_buff *msg, in fill_res_qp_entry_query() argument
475 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, in fill_res_qp_entry_query()
478 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, in fill_res_qp_entry_query()
483 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) in fill_res_qp_entry_query()
488 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, in fill_res_qp_entry_query()
492 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) in fill_res_qp_entry_query()
494 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) in fill_res_qp_entry_query()
498 return dev->ops.fill_res_qp_entry(msg, qp); in fill_res_qp_entry_query()
504 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_qp_entry() argument
515 if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) in fill_res_qp_entry()
518 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); in fill_res_qp_entry()
523 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) in fill_res_qp_entry()
526 ret = fill_res_name_pid(msg, res); in fill_res_qp_entry()
530 return fill_res_qp_entry_query(msg, res, dev, qp); in fill_res_qp_entry()
533 static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_qp_raw_entry() argument
543 return dev->ops.fill_res_qp_entry_raw(msg, qp); in fill_res_qp_raw_entry()
546 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cm_id_entry() argument
558 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) in fill_res_cm_id_entry()
562 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) in fill_res_cm_id_entry()
564 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) in fill_res_cm_id_entry()
568 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) in fill_res_cm_id_entry()
571 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) in fill_res_cm_id_entry()
575 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, in fill_res_cm_id_entry()
580 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, in fill_res_cm_id_entry()
585 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) in fill_res_cm_id_entry()
588 if (fill_res_name_pid(msg, res)) in fill_res_cm_id_entry()
592 return dev->ops.fill_res_cm_id_entry(msg, cm_id); in fill_res_cm_id_entry()
598 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cq_entry() argument
604 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) in fill_res_cq_entry()
606 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, in fill_res_cq_entry()
612 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) in fill_res_cq_entry()
615 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) in fill_res_cq_entry()
618 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) in fill_res_cq_entry()
621 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, in fill_res_cq_entry()
625 if (fill_res_name_pid(msg, res)) in fill_res_cq_entry()
629 dev->ops.fill_res_cq_entry(msg, cq) : 0; in fill_res_cq_entry()
632 static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cq_raw_entry() argument
640 return dev->ops.fill_res_cq_entry_raw(msg, cq); in fill_res_cq_raw_entry()
643 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_mr_entry() argument
650 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) in fill_res_mr_entry()
652 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) in fill_res_mr_entry()
656 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, in fill_res_mr_entry()
660 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) in fill_res_mr_entry()
664 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) in fill_res_mr_entry()
667 if (fill_res_name_pid(msg, res)) in fill_res_mr_entry()
671 dev->ops.fill_res_mr_entry(msg, mr) : in fill_res_mr_entry()
675 static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_mr_raw_entry() argument
683 return dev->ops.fill_res_mr_entry_raw(msg, mr); in fill_res_mr_raw_entry()
686 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_pd_entry() argument
692 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, in fill_res_pd_entry()
696 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, in fill_res_pd_entry()
700 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, in fill_res_pd_entry()
704 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) in fill_res_pd_entry()
708 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, in fill_res_pd_entry()
712 return fill_res_name_pid(msg, res); in fill_res_pd_entry()
717 static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_ctx_entry() argument
725 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id)) in fill_res_ctx_entry()
728 return fill_res_name_pid(msg, res); in fill_res_ctx_entry()
731 static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range, in fill_res_range_qp_entry() argument
739 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); in fill_res_range_qp_entry()
744 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range)) in fill_res_range_qp_entry()
747 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range)) in fill_res_range_qp_entry()
749 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range)) in fill_res_range_qp_entry()
752 nla_nest_end(msg, entry_attr); in fill_res_range_qp_entry()
756 nla_nest_cancel(msg, entry_attr); in fill_res_range_qp_entry()
760 static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq) in fill_res_srq_qps() argument
769 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); in fill_res_srq_qps()
792 if (fill_res_range_qp_entry(msg, min_range, prev)) in fill_res_srq_qps()
803 if (fill_res_range_qp_entry(msg, min_range, prev)) in fill_res_srq_qps()
806 nla_nest_end(msg, table_attr); in fill_res_srq_qps()
813 nla_nest_cancel(msg, table_attr); in fill_res_srq_qps()
817 static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_srq_entry() argument
822 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id)) in fill_res_srq_entry()
825 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type)) in fill_res_srq_entry()
828 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id)) in fill_res_srq_entry()
832 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, in fill_res_srq_entry()
837 if (fill_res_srq_qps(msg, srq)) in fill_res_srq_entry()
840 return fill_res_name_pid(msg, res); in fill_res_srq_entry()
846 static int fill_stat_counter_mode(struct sk_buff *msg, in fill_stat_counter_mode() argument
851 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) in fill_stat_counter_mode()
856 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) in fill_stat_counter_mode()
860 fill_res_name_pid(msg, &counter->res)) in fill_stat_counter_mode()
867 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn) in fill_stat_counter_qp_entry() argument
871 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); in fill_stat_counter_qp_entry()
875 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) in fill_stat_counter_qp_entry()
878 nla_nest_end(msg, entry_attr); in fill_stat_counter_qp_entry()
882 nla_nest_cancel(msg, entry_attr); in fill_stat_counter_qp_entry()
886 static int fill_stat_counter_qps(struct sk_buff *msg, in fill_stat_counter_qps() argument
896 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); in fill_stat_counter_qps()
907 ret = fill_stat_counter_qp_entry(msg, qp->qp_num); in fill_stat_counter_qps()
913 nla_nest_end(msg, table_attr); in fill_stat_counter_qps()
918 nla_nest_cancel(msg, table_attr); in fill_stat_counter_qps()
922 int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name, in rdma_nl_stat_hwcounter_entry() argument
927 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); in rdma_nl_stat_hwcounter_entry()
931 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, in rdma_nl_stat_hwcounter_entry()
934 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, in rdma_nl_stat_hwcounter_entry()
938 nla_nest_end(msg, entry_attr); in rdma_nl_stat_hwcounter_entry()
942 nla_nest_cancel(msg, entry_attr); in rdma_nl_stat_hwcounter_entry()
947 static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_stat_mr_entry() argument
953 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) in fill_stat_mr_entry()
957 return dev->ops.fill_stat_mr_entry(msg, mr); in fill_stat_mr_entry()
964 static int fill_stat_counter_hwcounters(struct sk_buff *msg, in fill_stat_counter_hwcounters() argument
971 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in fill_stat_counter_hwcounters()
979 if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name, in fill_stat_counter_hwcounters()
985 nla_nest_end(msg, table_attr); in fill_stat_counter_hwcounters()
990 nla_nest_cancel(msg, table_attr); in fill_stat_counter_hwcounters()
994 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_counter_entry() argument
1007 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || in fill_res_counter_entry()
1008 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || in fill_res_counter_entry()
1009 fill_stat_counter_mode(msg, counter) || in fill_res_counter_entry()
1010 fill_stat_counter_qps(msg, counter) || in fill_res_counter_entry()
1011 fill_stat_counter_hwcounters(msg, counter)) in fill_res_counter_entry()
1022 struct sk_buff *msg; in nldev_get_doit() local
1037 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_get_doit()
1038 if (!msg) { in nldev_get_doit()
1043 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_get_doit()
1051 err = fill_dev_info(msg, device); in nldev_get_doit()
1055 nlmsg_end(msg, nlh); in nldev_get_doit()
1058 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_get_doit()
1061 nlmsg_free(msg); in nldev_get_doit()
1162 struct sk_buff *msg; in nldev_port_get_doit() local
1185 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_port_get_doit()
1186 if (!msg) { in nldev_port_get_doit()
1191 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_port_get_doit()
1199 err = fill_port_info(msg, device, port, sock_net(skb->sk)); in nldev_port_get_doit()
1203 nlmsg_end(msg, nlh); in nldev_port_get_doit()
1206 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_port_get_doit()
1209 nlmsg_free(msg); in nldev_port_get_doit()
1278 struct sk_buff *msg; in nldev_res_get_doit() local
1292 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_res_get_doit()
1293 if (!msg) { in nldev_res_get_doit()
1298 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_res_get_doit()
1306 ret = fill_res_info(msg, device); in nldev_res_get_doit()
1310 nlmsg_end(msg, nlh); in nldev_res_get_doit()
1312 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_res_get_doit()
1315 nlmsg_free(msg); in nldev_res_get_doit()
1426 struct sk_buff *msg; in res_get_common_doit() local
1460 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in res_get_common_doit()
1461 if (!msg) { in res_get_common_doit()
1466 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in res_get_common_doit()
1471 if (!nlh || fill_nldev_handle(msg, device)) { in res_get_common_doit()
1478 ret = fill_func(msg, has_cap_net_admin, res, port); in res_get_common_doit()
1483 nlmsg_end(msg, nlh); in res_get_common_doit()
1485 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in res_get_common_doit()
1488 nlmsg_free(msg); in res_get_common_doit()
1772 struct sk_buff *msg; in nldev_get_chardev() local
1803 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_get_chardev()
1804 if (!msg) { in nldev_get_chardev()
1808 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_get_chardev()
1817 data.nl_msg = msg; in nldev_get_chardev()
1822 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV, in nldev_get_chardev()
1827 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi, in nldev_get_chardev()
1831 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME, in nldev_get_chardev()
1837 nlmsg_end(msg, nlh); in nldev_get_chardev()
1841 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_get_chardev()
1846 nlmsg_free(msg); in nldev_get_chardev()
1857 struct sk_buff *msg; in nldev_sys_get_doit() local
1865 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_sys_get_doit()
1866 if (!msg) in nldev_sys_get_doit()
1869 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_sys_get_doit()
1874 nlmsg_free(msg); in nldev_sys_get_doit()
1878 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, in nldev_sys_get_doit()
1881 nlmsg_free(msg); in nldev_sys_get_doit()
1895 nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1); in nldev_sys_get_doit()
1897 nlmsg_end(msg, nlh); in nldev_sys_get_doit()
1898 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_sys_get_doit()
1922 static int nldev_stat_set_mode_doit(struct sk_buff *msg, in nldev_stat_set_mode_doit() argument
1958 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || in nldev_stat_set_mode_doit()
1959 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { in nldev_stat_set_mode_doit()
2021 struct sk_buff *msg; in nldev_stat_set_doit() local
2048 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_set_doit()
2049 if (!msg) { in nldev_stat_set_doit()
2053 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_set_doit()
2057 if (!nlh || fill_nldev_handle(msg, device) || in nldev_stat_set_doit()
2058 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { in nldev_stat_set_doit()
2064 ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port); in nldev_stat_set_doit()
2075 nlmsg_end(msg, nlh); in nldev_stat_set_doit()
2077 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_set_doit()
2080 nlmsg_free(msg); in nldev_stat_set_doit()
2091 struct sk_buff *msg; in nldev_stat_del_doit() local
2117 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_del_doit()
2118 if (!msg) { in nldev_stat_del_doit()
2122 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_del_doit()
2133 if (fill_nldev_handle(msg, device) || in nldev_stat_del_doit()
2134 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || in nldev_stat_del_doit()
2135 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || in nldev_stat_del_doit()
2136 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { in nldev_stat_del_doit()
2145 nlmsg_end(msg, nlh); in nldev_stat_del_doit()
2147 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_del_doit()
2150 nlmsg_free(msg); in nldev_stat_del_doit()
2165 struct sk_buff *msg; in stat_get_doit_default_counter() local
2189 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in stat_get_doit_default_counter()
2190 if (!msg) { in stat_get_doit_default_counter()
2195 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in stat_get_doit_default_counter()
2200 if (!nlh || fill_nldev_handle(msg, device) || in stat_get_doit_default_counter()
2201 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { in stat_get_doit_default_counter()
2214 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in stat_get_doit_default_counter()
2225 if (rdma_nl_stat_hwcounter_entry(msg, in stat_get_doit_default_counter()
2231 nla_nest_end(msg, table_attr); in stat_get_doit_default_counter()
2234 nlmsg_end(msg, nlh); in stat_get_doit_default_counter()
2236 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in stat_get_doit_default_counter()
2239 nla_nest_cancel(msg, table_attr); in stat_get_doit_default_counter()
2243 nlmsg_free(msg); in stat_get_doit_default_counter()
2256 struct sk_buff *msg; in stat_get_doit_qp() local
2278 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in stat_get_doit_qp()
2279 if (!msg) { in stat_get_doit_qp()
2284 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in stat_get_doit_qp()
2297 if (fill_nldev_handle(msg, device) || in stat_get_doit_qp()
2298 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || in stat_get_doit_qp()
2299 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { in stat_get_doit_qp()
2305 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { in stat_get_doit_qp()
2310 nlmsg_end(msg, nlh); in stat_get_doit_qp()
2312 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in stat_get_doit_qp()
2315 nlmsg_free(msg); in stat_get_doit_qp()
2385 struct sk_buff *msg; in nldev_stat_get_counter_status_doit() local
2412 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_get_counter_status_doit()
2413 if (!msg) { in nldev_stat_get_counter_status_doit()
2419 msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_get_counter_status_doit()
2424 if (!nlh || fill_nldev_handle(msg, device) || in nldev_stat_get_counter_status_doit()
2425 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) in nldev_stat_get_counter_status_doit()
2428 table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in nldev_stat_get_counter_status_doit()
2434 entry = nla_nest_start(msg, in nldev_stat_get_counter_status_doit()
2439 if (nla_put_string(msg, in nldev_stat_get_counter_status_doit()
2442 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i)) in nldev_stat_get_counter_status_doit()
2446 (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, in nldev_stat_get_counter_status_doit()
2450 nla_nest_end(msg, entry); in nldev_stat_get_counter_status_doit()
2454 nla_nest_end(msg, table); in nldev_stat_get_counter_status_doit()
2455 nlmsg_end(msg, nlh); in nldev_stat_get_counter_status_doit()
2457 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_get_counter_status_doit()
2460 nla_nest_cancel(msg, entry); in nldev_stat_get_counter_status_doit()
2463 nla_nest_cancel(msg, table); in nldev_stat_get_counter_status_doit()
2465 nlmsg_free(msg); in nldev_stat_get_counter_status_doit()