1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include "netlink.h" 4 #include "common.h" 5 6 struct rings_req_info { 7 struct ethnl_req_info base; 8 }; 9 10 struct rings_reply_data { 11 struct ethnl_reply_data base; 12 struct ethtool_ringparam ringparam; 13 struct kernel_ethtool_ringparam kernel_ringparam; 14 u32 supported_ring_params; 15 }; 16 17 #define RINGS_REPDATA(__reply_base) \ 18 container_of(__reply_base, struct rings_reply_data, base) 19 20 const struct nla_policy ethnl_rings_get_policy[] = { 21 [ETHTOOL_A_RINGS_HEADER] = 22 NLA_POLICY_NESTED(ethnl_header_policy), 23 }; 24 25 static int rings_prepare_data(const struct ethnl_req_info *req_base, 26 struct ethnl_reply_data *reply_base, 27 struct genl_info *info) 28 { 29 struct rings_reply_data *data = RINGS_REPDATA(reply_base); 30 struct netlink_ext_ack *extack = info ? info->extack : NULL; 31 struct net_device *dev = reply_base->dev; 32 int ret; 33 34 if (!dev->ethtool_ops->get_ringparam) 35 return -EOPNOTSUPP; 36 37 data->supported_ring_params = dev->ethtool_ops->supported_ring_params; 38 ret = ethnl_ops_begin(dev); 39 if (ret < 0) 40 return ret; 41 dev->ethtool_ops->get_ringparam(dev, &data->ringparam, 42 &data->kernel_ringparam, extack); 43 ethnl_ops_complete(dev); 44 45 return 0; 46 } 47 48 static int rings_reply_size(const struct ethnl_req_info *req_base, 49 const struct ethnl_reply_data *reply_base) 50 { 51 return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */ 52 nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */ 53 nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */ 54 nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */ 55 nla_total_size(sizeof(u32)) + /* _RINGS_RX */ 56 nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ 57 nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ 58 nla_total_size(sizeof(u32)) + /* _RINGS_TX */ 59 nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ 60 nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ 61 nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */ 62 nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */ 63 nla_total_size(sizeof(u8))) + /* _RINGS_RX_PUSH */ 64 nla_total_size(sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN */ 65 nla_total_size(sizeof(u32)); /* _RINGS_TX_PUSH_BUF_LEN_MAX */ 66 } 67 68 static int rings_fill_reply(struct sk_buff *skb, 69 const struct ethnl_req_info *req_base, 70 const struct ethnl_reply_data *reply_base) 71 { 72 const struct rings_reply_data *data = RINGS_REPDATA(reply_base); 73 const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; 74 const struct ethtool_ringparam *ringparam = &data->ringparam; 75 u32 supported_ring_params = data->supported_ring_params; 76 77 WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); 78 79 if ((ringparam->rx_max_pending && 80 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX, 81 ringparam->rx_max_pending) || 82 nla_put_u32(skb, ETHTOOL_A_RINGS_RX, 83 ringparam->rx_pending))) || 84 (ringparam->rx_mini_max_pending && 85 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX, 86 ringparam->rx_mini_max_pending) || 87 nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI, 88 ringparam->rx_mini_pending))) || 89 (ringparam->rx_jumbo_max_pending && 90 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX, 91 ringparam->rx_jumbo_max_pending) || 92 nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO, 93 ringparam->rx_jumbo_pending))) || 94 (ringparam->tx_max_pending && 95 (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX, 96 ringparam->tx_max_pending) || 97 nla_put_u32(skb, ETHTOOL_A_RINGS_TX, 98 ringparam->tx_pending))) || 99 (kr->rx_buf_len && 100 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) || 101 (kr->tcp_data_split && 102 (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, 103 kr->tcp_data_split))) || 104 (kr->cqe_size && 105 (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) || 106 nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) || 107 nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push) || 108 ((supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN) && 109 (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, 110 kr->tx_push_buf_max_len) || 111 nla_put_u32(skb, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, 112 kr->tx_push_buf_len)))) 113 return -EMSGSIZE; 114 115 return 0; 116 } 117 118 /* RINGS_SET */ 119 120 const struct nla_policy ethnl_rings_set_policy[] = { 121 [ETHTOOL_A_RINGS_HEADER] = 122 NLA_POLICY_NESTED(ethnl_header_policy), 123 [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 }, 124 [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, 125 [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, 126 [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, 127 [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), 128 [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), 129 [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), 130 [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), 131 [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .type = NLA_U32 }, 132 }; 133 134 static int 135 ethnl_set_rings_validate(struct ethnl_req_info *req_info, 136 struct genl_info *info) 137 { 138 const struct ethtool_ops *ops = req_info->dev->ethtool_ops; 139 struct nlattr **tb = info->attrs; 140 141 if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] && 142 !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { 143 NL_SET_ERR_MSG_ATTR(info->extack, 144 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], 145 "setting rx buf len not supported"); 146 return -EOPNOTSUPP; 147 } 148 149 if (tb[ETHTOOL_A_RINGS_CQE_SIZE] && 150 !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { 151 NL_SET_ERR_MSG_ATTR(info->extack, 152 tb[ETHTOOL_A_RINGS_CQE_SIZE], 153 "setting cqe size not supported"); 154 return -EOPNOTSUPP; 155 } 156 157 if (tb[ETHTOOL_A_RINGS_TX_PUSH] && 158 !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) { 159 NL_SET_ERR_MSG_ATTR(info->extack, 160 tb[ETHTOOL_A_RINGS_TX_PUSH], 161 "setting tx push not supported"); 162 return -EOPNOTSUPP; 163 } 164 165 if (tb[ETHTOOL_A_RINGS_RX_PUSH] && 166 !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) { 167 NL_SET_ERR_MSG_ATTR(info->extack, 168 tb[ETHTOOL_A_RINGS_RX_PUSH], 169 "setting rx push not supported"); 170 return -EOPNOTSUPP; 171 } 172 173 if (tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] && 174 !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN)) { 175 NL_SET_ERR_MSG_ATTR(info->extack, 176 tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], 177 "setting tx push buf len is not supported"); 178 return -EOPNOTSUPP; 179 } 180 181 return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP; 182 } 183 184 static int 185 ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info) 186 { 187 struct kernel_ethtool_ringparam kernel_ringparam = {}; 188 struct ethtool_ringparam ringparam = {}; 189 struct net_device *dev = req_info->dev; 190 struct nlattr **tb = info->attrs; 191 const struct nlattr *err_attr; 192 bool mod = false; 193 int ret; 194 195 dev->ethtool_ops->get_ringparam(dev, &ringparam, 196 &kernel_ringparam, info->extack); 197 198 ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); 199 ethnl_update_u32(&ringparam.rx_mini_pending, 200 tb[ETHTOOL_A_RINGS_RX_MINI], &mod); 201 ethnl_update_u32(&ringparam.rx_jumbo_pending, 202 tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod); 203 ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); 204 ethnl_update_u32(&kernel_ringparam.rx_buf_len, 205 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); 206 ethnl_update_u32(&kernel_ringparam.cqe_size, 207 tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); 208 ethnl_update_u8(&kernel_ringparam.tx_push, 209 tb[ETHTOOL_A_RINGS_TX_PUSH], &mod); 210 ethnl_update_u8(&kernel_ringparam.rx_push, 211 tb[ETHTOOL_A_RINGS_RX_PUSH], &mod); 212 ethnl_update_u32(&kernel_ringparam.tx_push_buf_len, 213 tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], &mod); 214 if (!mod) 215 return 0; 216 217 /* ensure new ring parameters are within limits */ 218 if (ringparam.rx_pending > ringparam.rx_max_pending) 219 err_attr = tb[ETHTOOL_A_RINGS_RX]; 220 else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending) 221 err_attr = tb[ETHTOOL_A_RINGS_RX_MINI]; 222 else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending) 223 err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO]; 224 else if (ringparam.tx_pending > ringparam.tx_max_pending) 225 err_attr = tb[ETHTOOL_A_RINGS_TX]; 226 else 227 err_attr = NULL; 228 if (err_attr) { 229 NL_SET_ERR_MSG_ATTR(info->extack, err_attr, 230 "requested ring size exceeds maximum"); 231 return -EINVAL; 232 } 233 234 if (kernel_ringparam.tx_push_buf_len > kernel_ringparam.tx_push_buf_max_len) { 235 NL_SET_ERR_MSG_ATTR_FMT(info->extack, tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], 236 "Requested TX push buffer exceeds the maximum of %u", 237 kernel_ringparam.tx_push_buf_max_len); 238 239 return -EINVAL; 240 } 241 242 ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, 243 &kernel_ringparam, info->extack); 244 return ret < 0 ? ret : 1; 245 } 246 247 const struct ethnl_request_ops ethnl_rings_request_ops = { 248 .request_cmd = ETHTOOL_MSG_RINGS_GET, 249 .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY, 250 .hdr_attr = ETHTOOL_A_RINGS_HEADER, 251 .req_info_size = sizeof(struct rings_req_info), 252 .reply_data_size = sizeof(struct rings_reply_data), 253 254 .prepare_data = rings_prepare_data, 255 .reply_size = rings_reply_size, 256 .fill_reply = rings_fill_reply, 257 258 .set_validate = ethnl_set_rings_validate, 259 .set = ethnl_set_rings, 260 .set_ntf_cmd = ETHTOOL_MSG_RINGS_NTF, 261 }; 262