1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include "netlink.h" 4 #include "common.h" 5 6 struct rings_req_info { 7 struct ethnl_req_info base; 8 }; 9 10 struct rings_reply_data { 11 struct ethnl_reply_data base; 12 struct ethtool_ringparam ringparam; 13 struct kernel_ethtool_ringparam kernel_ringparam; 14 }; 15 16 #define RINGS_REPDATA(__reply_base) \ 17 container_of(__reply_base, struct rings_reply_data, base) 18 19 const struct nla_policy ethnl_rings_get_policy[] = { 20 [ETHTOOL_A_RINGS_HEADER] = 21 NLA_POLICY_NESTED(ethnl_header_policy), 22 }; 23 24 static int rings_prepare_data(const struct ethnl_req_info *req_base, 25 struct ethnl_reply_data *reply_base, 26 struct genl_info *info) 27 { 28 struct rings_reply_data *data = RINGS_REPDATA(reply_base); 29 struct netlink_ext_ack *extack = info ? info->extack : NULL; 30 struct net_device *dev = reply_base->dev; 31 int ret; 32 33 if (!dev->ethtool_ops->get_ringparam) 34 return -EOPNOTSUPP; 35 ret = ethnl_ops_begin(dev); 36 if (ret < 0) 37 return ret; 38 dev->ethtool_ops->get_ringparam(dev, &data->ringparam, 39 &data->kernel_ringparam, extack); 40 ethnl_ops_complete(dev); 41 42 return 0; 43 } 44 45 static int rings_reply_size(const struct ethnl_req_info *req_base, 46 const struct ethnl_reply_data *reply_base) 47 { 48 return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */ 49 nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */ 50 nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */ 51 nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */ 52 nla_total_size(sizeof(u32)) + /* _RINGS_RX */ 53 nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ 54 nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ 55 nla_total_size(sizeof(u32)) + /* _RINGS_TX */ 56 nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ 57 nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ 58 nla_total_size(sizeof(u32)); /* _RINGS_CQE_SIZE */ 59 } 60 61 static int rings_fill_reply(struct sk_buff *skb, 62 const struct ethnl_req_info *req_base, 63 const struct ethnl_reply_data *reply_base) 64 { 65 const struct rings_reply_data *data = RINGS_REPDATA(reply_base); 66 const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; 67 const struct ethtool_ringparam *ringparam = &data->ringparam; 68 69 WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); 70 71 if ((ringparam->rx_max_pending && 72 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX, 73 ringparam->rx_max_pending) || 74 nla_put_u32(skb, ETHTOOL_A_RINGS_RX, 75 ringparam->rx_pending))) || 76 (ringparam->rx_mini_max_pending && 77 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX, 78 ringparam->rx_mini_max_pending) || 79 nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI, 80 ringparam->rx_mini_pending))) || 81 (ringparam->rx_jumbo_max_pending && 82 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX, 83 ringparam->rx_jumbo_max_pending) || 84 nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO, 85 ringparam->rx_jumbo_pending))) || 86 (ringparam->tx_max_pending && 87 (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX, 88 ringparam->tx_max_pending) || 89 nla_put_u32(skb, ETHTOOL_A_RINGS_TX, 90 ringparam->tx_pending))) || 91 (kr->rx_buf_len && 92 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) || 93 (kr->tcp_data_split && 94 (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, 95 kr->tcp_data_split))) || 96 (kr->cqe_size && 97 (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size)))) 98 return -EMSGSIZE; 99 100 return 0; 101 } 102 103 const struct ethnl_request_ops ethnl_rings_request_ops = { 104 .request_cmd = ETHTOOL_MSG_RINGS_GET, 105 .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY, 106 .hdr_attr = ETHTOOL_A_RINGS_HEADER, 107 .req_info_size = sizeof(struct rings_req_info), 108 .reply_data_size = sizeof(struct rings_reply_data), 109 110 .prepare_data = rings_prepare_data, 111 .reply_size = rings_reply_size, 112 .fill_reply = rings_fill_reply, 113 }; 114 115 /* RINGS_SET */ 116 117 const struct nla_policy ethnl_rings_set_policy[] = { 118 [ETHTOOL_A_RINGS_HEADER] = 119 NLA_POLICY_NESTED(ethnl_header_policy), 120 [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 }, 121 [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, 122 [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, 123 [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, 124 [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), 125 [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), 126 }; 127 128 int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info) 129 { 130 struct kernel_ethtool_ringparam kernel_ringparam = {}; 131 struct ethtool_ringparam ringparam = {}; 132 struct ethnl_req_info req_info = {}; 133 struct nlattr **tb = info->attrs; 134 const struct nlattr *err_attr; 135 const struct ethtool_ops *ops; 136 struct net_device *dev; 137 bool mod = false; 138 int ret; 139 140 ret = ethnl_parse_header_dev_get(&req_info, 141 tb[ETHTOOL_A_RINGS_HEADER], 142 genl_info_net(info), info->extack, 143 true); 144 if (ret < 0) 145 return ret; 146 dev = req_info.dev; 147 ops = dev->ethtool_ops; 148 ret = -EOPNOTSUPP; 149 if (!ops->get_ringparam || !ops->set_ringparam) 150 goto out_dev; 151 152 rtnl_lock(); 153 ret = ethnl_ops_begin(dev); 154 if (ret < 0) 155 goto out_rtnl; 156 ops->get_ringparam(dev, &ringparam, &kernel_ringparam, info->extack); 157 158 ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); 159 ethnl_update_u32(&ringparam.rx_mini_pending, 160 tb[ETHTOOL_A_RINGS_RX_MINI], &mod); 161 ethnl_update_u32(&ringparam.rx_jumbo_pending, 162 tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod); 163 ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); 164 ethnl_update_u32(&kernel_ringparam.rx_buf_len, 165 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); 166 ethnl_update_u32(&kernel_ringparam.cqe_size, 167 tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); 168 ret = 0; 169 if (!mod) 170 goto out_ops; 171 172 /* ensure new ring parameters are within limits */ 173 if (ringparam.rx_pending > ringparam.rx_max_pending) 174 err_attr = tb[ETHTOOL_A_RINGS_RX]; 175 else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending) 176 err_attr = tb[ETHTOOL_A_RINGS_RX_MINI]; 177 else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending) 178 err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO]; 179 else if (ringparam.tx_pending > ringparam.tx_max_pending) 180 err_attr = tb[ETHTOOL_A_RINGS_TX]; 181 else 182 err_attr = NULL; 183 if (err_attr) { 184 ret = -EINVAL; 185 NL_SET_ERR_MSG_ATTR(info->extack, err_attr, 186 "requested ring size exceeds maximum"); 187 goto out_ops; 188 } 189 190 if (kernel_ringparam.rx_buf_len != 0 && 191 !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { 192 ret = -EOPNOTSUPP; 193 NL_SET_ERR_MSG_ATTR(info->extack, 194 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], 195 "setting rx buf len not supported"); 196 goto out_ops; 197 } 198 199 if (kernel_ringparam.cqe_size && 200 !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { 201 ret = -EOPNOTSUPP; 202 NL_SET_ERR_MSG_ATTR(info->extack, 203 tb[ETHTOOL_A_RINGS_CQE_SIZE], 204 "setting cqe size not supported"); 205 goto out_ops; 206 } 207 208 ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, 209 &kernel_ringparam, info->extack); 210 if (ret < 0) 211 goto out_ops; 212 ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL); 213 214 out_ops: 215 ethnl_ops_complete(dev); 216 out_rtnl: 217 rtnl_unlock(); 218 out_dev: 219 ethnl_parse_header_dev_put(&req_info); 220 return ret; 221 } 222