Lines Matching refs:msg

458 static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)  in vdpa_nl_mgmtdev_handle_fill()  argument
461 nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name)) in vdpa_nl_mgmtdev_handle_fill()
463 if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device))) in vdpa_nl_mgmtdev_handle_fill()
486 static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg, in vdpa_mgmtdev_fill() argument
492 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW); in vdpa_mgmtdev_fill()
495 err = vdpa_nl_mgmtdev_handle_fill(msg, mdev); in vdpa_mgmtdev_fill()
499 if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, in vdpa_mgmtdev_fill()
505 if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, in vdpa_mgmtdev_fill()
510 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES, in vdpa_mgmtdev_fill()
516 genlmsg_end(msg, hdr); in vdpa_mgmtdev_fill()
520 genlmsg_cancel(msg, hdr); in vdpa_mgmtdev_fill()
527 struct sk_buff *msg; in vdpa_nl_cmd_mgmtdev_get_doit() local
530 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in vdpa_nl_cmd_mgmtdev_get_doit()
531 if (!msg) in vdpa_nl_cmd_mgmtdev_get_doit()
543 err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0); in vdpa_nl_cmd_mgmtdev_get_doit()
547 err = genlmsg_reply(msg, info); in vdpa_nl_cmd_mgmtdev_get_doit()
551 nlmsg_free(msg); in vdpa_nl_cmd_mgmtdev_get_doit()
556 vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) in vdpa_nl_cmd_mgmtdev_get_dumpit() argument
569 err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid, in vdpa_nl_cmd_mgmtdev_get_dumpit()
578 return msg->len; in vdpa_nl_cmd_mgmtdev_get_dumpit()
739 vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, in vdpa_dev_fill() argument
749 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW); in vdpa_dev_fill()
753 err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev); in vdpa_dev_fill()
764 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) in vdpa_dev_fill()
766 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) in vdpa_dev_fill()
768 if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id)) in vdpa_dev_fill()
770 if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs)) in vdpa_dev_fill()
772 if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size)) in vdpa_dev_fill()
774 if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size)) in vdpa_dev_fill()
777 genlmsg_end(msg, hdr); in vdpa_dev_fill()
781 genlmsg_cancel(msg, hdr); in vdpa_dev_fill()
788 struct sk_buff *msg; in vdpa_nl_cmd_dev_get_doit() local
796 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in vdpa_nl_cmd_dev_get_doit()
797 if (!msg) in vdpa_nl_cmd_dev_get_doit()
812 err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack); in vdpa_nl_cmd_dev_get_doit()
816 err = genlmsg_reply(msg, info); in vdpa_nl_cmd_dev_get_doit()
825 nlmsg_free(msg); in vdpa_nl_cmd_dev_get_doit()
830 struct sk_buff *msg; member
848 err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, in vdpa_dev_dump()
857 static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) in vdpa_nl_cmd_dev_get_dumpit() argument
861 info.msg = msg; in vdpa_nl_cmd_dev_get_dumpit()
870 return msg->len; in vdpa_nl_cmd_dev_get_dumpit()
873 static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features, in vdpa_dev_net_mq_config_fill() argument
884 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16); in vdpa_dev_net_mq_config_fill()
887 static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features, in vdpa_dev_net_mtu_config_fill() argument
897 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16); in vdpa_dev_net_mtu_config_fill()
900 static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features, in vdpa_dev_net_mac_config_fill() argument
906 return nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, in vdpa_dev_net_mac_config_fill()
910 static int vdpa_dev_net_status_config_fill(struct sk_buff *msg, u64 features, in vdpa_dev_net_status_config_fill() argument
919 return nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16); in vdpa_dev_net_status_config_fill()
922 static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg) in vdpa_dev_net_config_fill() argument
931 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device, in vdpa_dev_net_config_fill()
935 if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config)) in vdpa_dev_net_config_fill()
938 if (vdpa_dev_net_mac_config_fill(msg, features_device, &config)) in vdpa_dev_net_config_fill()
941 if (vdpa_dev_net_status_config_fill(msg, features_device, &config)) in vdpa_dev_net_config_fill()
944 return vdpa_dev_net_mq_config_fill(msg, features_device, &config); in vdpa_dev_net_config_fill()
948 vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, in vdpa_dev_config_fill() argument
958 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, in vdpa_dev_config_fill()
965 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) { in vdpa_dev_config_fill()
971 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) { in vdpa_dev_config_fill()
980 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver, in vdpa_dev_config_fill()
989 err = vdpa_dev_net_config_fill(vdev, msg); in vdpa_dev_config_fill()
999 genlmsg_end(msg, hdr); in vdpa_dev_config_fill()
1003 genlmsg_cancel(msg, hdr); in vdpa_dev_config_fill()
1009 static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg, in vdpa_fill_stats_rec() argument
1025 if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, in vdpa_fill_stats_rec()
1029 err = vdpa_dev_net_mq_config_fill(msg, features, &config); in vdpa_fill_stats_rec()
1033 if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index)) in vdpa_fill_stats_rec()
1036 err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack); in vdpa_fill_stats_rec()
1043 static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg, in vendor_stats_fill() argument
1054 err = vdpa_fill_stats_rec(vdev, msg, info, index); in vendor_stats_fill()
1061 struct sk_buff *msg, in vdpa_dev_vendor_stats_fill() argument
1071 hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, in vdpa_dev_vendor_stats_fill()
1076 if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) { in vdpa_dev_vendor_stats_fill()
1082 if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) { in vdpa_dev_vendor_stats_fill()
1095 err = vendor_stats_fill(vdev, msg, info, index); in vdpa_dev_vendor_stats_fill()
1101 genlmsg_end(msg, hdr); in vdpa_dev_vendor_stats_fill()
1106 genlmsg_cancel(msg, hdr); in vdpa_dev_vendor_stats_fill()
1113 struct sk_buff *msg; in vdpa_nl_cmd_dev_config_get_doit() local
1121 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in vdpa_nl_cmd_dev_config_get_doit()
1122 if (!msg) in vdpa_nl_cmd_dev_config_get_doit()
1138 err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq, in vdpa_nl_cmd_dev_config_get_doit()
1141 err = genlmsg_reply(msg, info); in vdpa_nl_cmd_dev_config_get_doit()
1148 nlmsg_free(msg); in vdpa_nl_cmd_dev_config_get_doit()
1164 err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, in vdpa_dev_config_dump()
1175 vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) in vdpa_nl_cmd_dev_config_get_dumpit() argument
1179 info.msg = msg; in vdpa_nl_cmd_dev_config_get_dumpit()
1188 return msg->len; in vdpa_nl_cmd_dev_config_get_dumpit()
1195 struct sk_buff *msg; in vdpa_nl_cmd_dev_stats_get_doit() local
1208 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in vdpa_nl_cmd_dev_stats_get_doit()
1209 if (!msg) in vdpa_nl_cmd_dev_stats_get_doit()
1226 err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index); in vdpa_nl_cmd_dev_stats_get_doit()
1230 err = genlmsg_reply(msg, info); in vdpa_nl_cmd_dev_stats_get_doit()
1240 nlmsg_free(msg); in vdpa_nl_cmd_dev_stats_get_doit()