1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include "netlink.h" 4 #include "common.h" 5 6 struct rings_req_info { 7 struct ethnl_req_info base; 8 }; 9 10 struct rings_reply_data { 11 struct ethnl_reply_data base; 12 struct ethtool_ringparam ringparam; 13 struct kernel_ethtool_ringparam kernel_ringparam; 14 }; 15 16 #define RINGS_REPDATA(__reply_base) \ 17 container_of(__reply_base, struct rings_reply_data, base) 18 19 const struct nla_policy ethnl_rings_get_policy[] = { 20 [ETHTOOL_A_RINGS_HEADER] = 21 NLA_POLICY_NESTED(ethnl_header_policy), 22 }; 23 24 static int rings_prepare_data(const struct ethnl_req_info *req_base, 25 struct ethnl_reply_data *reply_base, 26 struct genl_info *info) 27 { 28 struct rings_reply_data *data = RINGS_REPDATA(reply_base); 29 struct netlink_ext_ack *extack = info ? info->extack : NULL; 30 struct net_device *dev = reply_base->dev; 31 int ret; 32 33 if (!dev->ethtool_ops->get_ringparam) 34 return -EOPNOTSUPP; 35 ret = ethnl_ops_begin(dev); 36 if (ret < 0) 37 return ret; 38 dev->ethtool_ops->get_ringparam(dev, &data->ringparam, 39 &data->kernel_ringparam, extack); 40 ethnl_ops_complete(dev); 41 42 return 0; 43 } 44 45 static int rings_reply_size(const struct ethnl_req_info *req_base, 46 const struct ethnl_reply_data *reply_base) 47 { 48 return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */ 49 nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */ 50 nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */ 51 nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */ 52 nla_total_size(sizeof(u32)) + /* _RINGS_RX */ 53 nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */ 54 nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */ 55 nla_total_size(sizeof(u32)) + /* _RINGS_TX */ 56 nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ 57 nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ 58 nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */ 59 nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */ 60 nla_total_size(sizeof(u8))); /* _RINGS_RX_PUSH */ 61 } 62 63 static int rings_fill_reply(struct sk_buff *skb, 64 const struct ethnl_req_info *req_base, 65 const struct ethnl_reply_data *reply_base) 66 { 67 const struct rings_reply_data *data = RINGS_REPDATA(reply_base); 68 const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; 69 const struct ethtool_ringparam *ringparam = &data->ringparam; 70 71 WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); 72 73 if ((ringparam->rx_max_pending && 74 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX, 75 ringparam->rx_max_pending) || 76 nla_put_u32(skb, ETHTOOL_A_RINGS_RX, 77 ringparam->rx_pending))) || 78 (ringparam->rx_mini_max_pending && 79 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX, 80 ringparam->rx_mini_max_pending) || 81 nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI, 82 ringparam->rx_mini_pending))) || 83 (ringparam->rx_jumbo_max_pending && 84 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX, 85 ringparam->rx_jumbo_max_pending) || 86 nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO, 87 ringparam->rx_jumbo_pending))) || 88 (ringparam->tx_max_pending && 89 (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX, 90 ringparam->tx_max_pending) || 91 nla_put_u32(skb, ETHTOOL_A_RINGS_TX, 92 ringparam->tx_pending))) || 93 (kr->rx_buf_len && 94 (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) || 95 (kr->tcp_data_split && 96 (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, 97 kr->tcp_data_split))) || 98 (kr->cqe_size && 99 (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) || 100 nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) || 101 nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push)) 102 return -EMSGSIZE; 103 104 return 0; 105 } 106 107 /* RINGS_SET */ 108 109 const struct nla_policy ethnl_rings_set_policy[] = { 110 [ETHTOOL_A_RINGS_HEADER] = 111 NLA_POLICY_NESTED(ethnl_header_policy), 112 [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 }, 113 [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, 114 [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, 115 [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, 116 [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), 117 [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), 118 [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), 119 [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), 120 }; 121 122 static int 123 ethnl_set_rings_validate(struct ethnl_req_info *req_info, 124 struct genl_info *info) 125 { 126 const struct ethtool_ops *ops = req_info->dev->ethtool_ops; 127 struct nlattr **tb = info->attrs; 128 129 if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] && 130 !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { 131 NL_SET_ERR_MSG_ATTR(info->extack, 132 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], 133 "setting rx buf len not supported"); 134 return -EOPNOTSUPP; 135 } 136 137 if (tb[ETHTOOL_A_RINGS_CQE_SIZE] && 138 !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { 139 NL_SET_ERR_MSG_ATTR(info->extack, 140 tb[ETHTOOL_A_RINGS_CQE_SIZE], 141 "setting cqe size not supported"); 142 return -EOPNOTSUPP; 143 } 144 145 if (tb[ETHTOOL_A_RINGS_TX_PUSH] && 146 !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) { 147 NL_SET_ERR_MSG_ATTR(info->extack, 148 tb[ETHTOOL_A_RINGS_TX_PUSH], 149 "setting tx push not supported"); 150 return -EOPNOTSUPP; 151 } 152 153 if (tb[ETHTOOL_A_RINGS_RX_PUSH] && 154 !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) { 155 NL_SET_ERR_MSG_ATTR(info->extack, 156 tb[ETHTOOL_A_RINGS_RX_PUSH], 157 "setting rx push not supported"); 158 return -EOPNOTSUPP; 159 } 160 161 return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP; 162 } 163 164 static int 165 ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info) 166 { 167 struct kernel_ethtool_ringparam kernel_ringparam = {}; 168 struct ethtool_ringparam ringparam = {}; 169 struct net_device *dev = req_info->dev; 170 struct nlattr **tb = info->attrs; 171 const struct nlattr *err_attr; 172 bool mod = false; 173 int ret; 174 175 dev->ethtool_ops->get_ringparam(dev, &ringparam, 176 &kernel_ringparam, info->extack); 177 178 ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); 179 ethnl_update_u32(&ringparam.rx_mini_pending, 180 tb[ETHTOOL_A_RINGS_RX_MINI], &mod); 181 ethnl_update_u32(&ringparam.rx_jumbo_pending, 182 tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod); 183 ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod); 184 ethnl_update_u32(&kernel_ringparam.rx_buf_len, 185 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod); 186 ethnl_update_u32(&kernel_ringparam.cqe_size, 187 tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod); 188 ethnl_update_u8(&kernel_ringparam.tx_push, 189 tb[ETHTOOL_A_RINGS_TX_PUSH], &mod); 190 ethnl_update_u8(&kernel_ringparam.rx_push, 191 tb[ETHTOOL_A_RINGS_RX_PUSH], &mod); 192 if (!mod) 193 return 0; 194 195 /* ensure new ring parameters are within limits */ 196 if (ringparam.rx_pending > ringparam.rx_max_pending) 197 err_attr = tb[ETHTOOL_A_RINGS_RX]; 198 else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending) 199 err_attr = tb[ETHTOOL_A_RINGS_RX_MINI]; 200 else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending) 201 err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO]; 202 else if (ringparam.tx_pending > ringparam.tx_max_pending) 203 err_attr = tb[ETHTOOL_A_RINGS_TX]; 204 else 205 err_attr = NULL; 206 if (err_attr) { 207 NL_SET_ERR_MSG_ATTR(info->extack, err_attr, 208 "requested ring size exceeds maximum"); 209 return -EINVAL; 210 } 211 212 ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, 213 &kernel_ringparam, info->extack); 214 return ret < 0 ? ret : 1; 215 } 216 217 const struct ethnl_request_ops ethnl_rings_request_ops = { 218 .request_cmd = ETHTOOL_MSG_RINGS_GET, 219 .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY, 220 .hdr_attr = ETHTOOL_A_RINGS_HEADER, 221 .req_info_size = sizeof(struct rings_req_info), 222 .reply_data_size = sizeof(struct rings_reply_data), 223 224 .prepare_data = rings_prepare_data, 225 .reply_size = rings_reply_size, 226 .fill_reply = rings_fill_reply, 227 228 .set_validate = ethnl_set_rings_validate, 229 .set = ethnl_set_rings, 230 .set_ntf_cmd = ETHTOOL_MSG_RINGS_NTF, 231 }; 232