1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ 3 4 #include "cxgb4.h" 5 #include "cxgb4_tc_matchall.h" 6 #include "sched.h" 7 #include "cxgb4_uld.h" 8 #include "cxgb4_filter.h" 9 #include "cxgb4_tc_flower.h" 10 11 static int cxgb4_matchall_egress_validate(struct net_device *dev, 12 struct tc_cls_matchall_offload *cls) 13 { 14 struct netlink_ext_ack *extack = cls->common.extack; 15 struct flow_action *actions = &cls->rule->action; 16 struct port_info *pi = netdev2pinfo(dev); 17 struct flow_action_entry *entry; 18 u64 max_link_rate; 19 u32 i, speed; 20 int ret; 21 22 if (!flow_action_has_entries(actions)) { 23 NL_SET_ERR_MSG_MOD(extack, 24 "Egress MATCHALL offload needs at least 1 policing action"); 25 return -EINVAL; 26 } else if (!flow_offload_has_one_action(actions)) { 27 NL_SET_ERR_MSG_MOD(extack, 28 "Egress MATCHALL offload only supports 1 policing action"); 29 return -EINVAL; 30 } else if (pi->tc_block_shared) { 31 NL_SET_ERR_MSG_MOD(extack, 32 "Egress MATCHALL offload not supported with shared blocks"); 33 return -EINVAL; 34 } 35 36 ret = t4_get_link_params(pi, NULL, &speed, NULL); 37 if (ret) { 38 NL_SET_ERR_MSG_MOD(extack, 39 "Failed to get max speed supported by the link"); 40 return -EINVAL; 41 } 42 43 /* Convert from Mbps to bps */ 44 max_link_rate = (u64)speed * 1000 * 1000; 45 46 flow_action_for_each(i, entry, actions) { 47 switch (entry->id) { 48 case FLOW_ACTION_POLICE: 49 /* Convert bytes per second to bits per second */ 50 if (entry->police.rate_bytes_ps * 8 > max_link_rate) { 51 NL_SET_ERR_MSG_MOD(extack, 52 "Specified policing max rate is larger than underlying link speed"); 53 return -ERANGE; 54 } 55 break; 56 default: 57 NL_SET_ERR_MSG_MOD(extack, 58 "Only policing action supported with Egress MATCHALL offload"); 59 return -EOPNOTSUPP; 60 } 61 } 62 63 return 0; 64 } 65 66 static int cxgb4_matchall_alloc_tc(struct net_device *dev, 67 struct tc_cls_matchall_offload *cls) 68 { 69 struct ch_sched_params p = { 70 .type = SCHED_CLASS_TYPE_PACKET, 71 .u.params.level = SCHED_CLASS_LEVEL_CH_RL, 72 .u.params.mode = SCHED_CLASS_MODE_CLASS, 73 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS, 74 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS, 75 .u.params.class = SCHED_CLS_NONE, 76 .u.params.minrate = 0, 77 .u.params.weight = 0, 78 .u.params.pktsize = dev->mtu, 79 }; 80 struct netlink_ext_ack *extack = cls->common.extack; 81 struct cxgb4_tc_port_matchall *tc_port_matchall; 82 struct port_info *pi = netdev2pinfo(dev); 83 struct adapter *adap = netdev2adap(dev); 84 struct flow_action_entry *entry; 85 struct sched_class *e; 86 u32 i; 87 88 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 89 90 flow_action_for_each(i, entry, &cls->rule->action) 91 if (entry->id == FLOW_ACTION_POLICE) 92 break; 93 94 /* Convert from bytes per second to Kbps */ 95 p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000); 96 p.u.params.channel = pi->tx_chan; 97 e = cxgb4_sched_class_alloc(dev, &p); 98 if (!e) { 99 NL_SET_ERR_MSG_MOD(extack, 100 "No free traffic class available for policing action"); 101 return -ENOMEM; 102 } 103 104 tc_port_matchall->egress.hwtc = e->idx; 105 tc_port_matchall->egress.cookie = cls->cookie; 106 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED; 107 return 0; 108 } 109 110 static void cxgb4_matchall_free_tc(struct net_device *dev) 111 { 112 struct cxgb4_tc_port_matchall *tc_port_matchall; 113 struct port_info *pi = netdev2pinfo(dev); 114 struct adapter *adap = netdev2adap(dev); 115 116 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 117 cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc); 118 119 tc_port_matchall->egress.hwtc = SCHED_CLS_NONE; 120 tc_port_matchall->egress.cookie = 0; 121 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED; 122 } 123 124 static int cxgb4_matchall_alloc_filter(struct net_device *dev, 125 struct tc_cls_matchall_offload *cls) 126 { 127 struct netlink_ext_ack *extack = cls->common.extack; 128 struct cxgb4_tc_port_matchall *tc_port_matchall; 129 struct port_info *pi = netdev2pinfo(dev); 130 struct adapter *adap = netdev2adap(dev); 131 struct ch_filter_specification *fs; 132 int ret, fidx; 133 134 /* Note that TC uses prio 0 to indicate stack to generate 135 * automatic prio and hence doesn't pass prio 0 to driver. 136 * However, the hardware TCAM index starts from 0. Hence, the 137 * -1 here. 1 slot is enough to create a wildcard matchall 138 * VIID rule. 139 */ 140 if (cls->common.prio <= adap->tids.nftids) 141 fidx = cls->common.prio - 1; 142 else 143 fidx = cxgb4_get_free_ftid(dev, PF_INET); 144 145 /* Only insert MATCHALL rule if its priority doesn't conflict 146 * with existing rules in the LETCAM. 147 */ 148 if (fidx < 0 || 149 !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) { 150 NL_SET_ERR_MSG_MOD(extack, 151 "No free LETCAM index available"); 152 return -ENOMEM; 153 } 154 155 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 156 fs = &tc_port_matchall->ingress.fs; 157 memset(fs, 0, sizeof(*fs)); 158 159 fs->tc_prio = cls->common.prio; 160 fs->tc_cookie = cls->cookie; 161 fs->hitcnts = 1; 162 163 fs->val.pfvf_vld = 1; 164 fs->val.pf = adap->pf; 165 fs->val.vf = pi->vin; 166 167 cxgb4_process_flow_actions(dev, &cls->rule->action, fs); 168 169 ret = cxgb4_set_filter(dev, fidx, fs); 170 if (ret) 171 return ret; 172 173 tc_port_matchall->ingress.tid = fidx; 174 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED; 175 return 0; 176 } 177 178 static int cxgb4_matchall_free_filter(struct net_device *dev) 179 { 180 struct cxgb4_tc_port_matchall *tc_port_matchall; 181 struct port_info *pi = netdev2pinfo(dev); 182 struct adapter *adap = netdev2adap(dev); 183 int ret; 184 185 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 186 187 ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid, 188 &tc_port_matchall->ingress.fs); 189 if (ret) 190 return ret; 191 192 tc_port_matchall->ingress.packets = 0; 193 tc_port_matchall->ingress.bytes = 0; 194 tc_port_matchall->ingress.last_used = 0; 195 tc_port_matchall->ingress.tid = 0; 196 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED; 197 return 0; 198 } 199 200 int cxgb4_tc_matchall_replace(struct net_device *dev, 201 struct tc_cls_matchall_offload *cls_matchall, 202 bool ingress) 203 { 204 struct netlink_ext_ack *extack = cls_matchall->common.extack; 205 struct cxgb4_tc_port_matchall *tc_port_matchall; 206 struct port_info *pi = netdev2pinfo(dev); 207 struct adapter *adap = netdev2adap(dev); 208 int ret; 209 210 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 211 if (ingress) { 212 if (tc_port_matchall->ingress.state == 213 CXGB4_MATCHALL_STATE_ENABLED) { 214 NL_SET_ERR_MSG_MOD(extack, 215 "Only 1 Ingress MATCHALL can be offloaded"); 216 return -ENOMEM; 217 } 218 219 ret = cxgb4_validate_flow_actions(dev, 220 &cls_matchall->rule->action); 221 if (ret) 222 return ret; 223 224 return cxgb4_matchall_alloc_filter(dev, cls_matchall); 225 } 226 227 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) { 228 NL_SET_ERR_MSG_MOD(extack, 229 "Only 1 Egress MATCHALL can be offloaded"); 230 return -ENOMEM; 231 } 232 233 ret = cxgb4_matchall_egress_validate(dev, cls_matchall); 234 if (ret) 235 return ret; 236 237 return cxgb4_matchall_alloc_tc(dev, cls_matchall); 238 } 239 240 int cxgb4_tc_matchall_destroy(struct net_device *dev, 241 struct tc_cls_matchall_offload *cls_matchall, 242 bool ingress) 243 { 244 struct cxgb4_tc_port_matchall *tc_port_matchall; 245 struct port_info *pi = netdev2pinfo(dev); 246 struct adapter *adap = netdev2adap(dev); 247 248 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 249 if (ingress) { 250 if (cls_matchall->cookie != 251 tc_port_matchall->ingress.fs.tc_cookie) 252 return -ENOENT; 253 254 return cxgb4_matchall_free_filter(dev); 255 } 256 257 if (cls_matchall->cookie != tc_port_matchall->egress.cookie) 258 return -ENOENT; 259 260 cxgb4_matchall_free_tc(dev); 261 return 0; 262 } 263 264 int cxgb4_tc_matchall_stats(struct net_device *dev, 265 struct tc_cls_matchall_offload *cls_matchall) 266 { 267 struct cxgb4_tc_port_matchall *tc_port_matchall; 268 struct port_info *pi = netdev2pinfo(dev); 269 struct adapter *adap = netdev2adap(dev); 270 u64 packets, bytes; 271 int ret; 272 273 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 274 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED) 275 return -ENOENT; 276 277 ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid, 278 &packets, &bytes, 279 tc_port_matchall->ingress.fs.hash); 280 if (ret) 281 return ret; 282 283 if (tc_port_matchall->ingress.packets != packets) { 284 flow_stats_update(&cls_matchall->stats, 285 bytes - tc_port_matchall->ingress.bytes, 286 packets - tc_port_matchall->ingress.packets, 287 tc_port_matchall->ingress.last_used); 288 289 tc_port_matchall->ingress.packets = packets; 290 tc_port_matchall->ingress.bytes = bytes; 291 tc_port_matchall->ingress.last_used = jiffies; 292 } 293 294 return 0; 295 } 296 297 static void cxgb4_matchall_disable_offload(struct net_device *dev) 298 { 299 struct cxgb4_tc_port_matchall *tc_port_matchall; 300 struct port_info *pi = netdev2pinfo(dev); 301 struct adapter *adap = netdev2adap(dev); 302 303 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 304 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) 305 cxgb4_matchall_free_tc(dev); 306 307 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED) 308 cxgb4_matchall_free_filter(dev); 309 } 310 311 int cxgb4_init_tc_matchall(struct adapter *adap) 312 { 313 struct cxgb4_tc_port_matchall *tc_port_matchall; 314 struct cxgb4_tc_matchall *tc_matchall; 315 int ret; 316 317 tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL); 318 if (!tc_matchall) 319 return -ENOMEM; 320 321 tc_port_matchall = kcalloc(adap->params.nports, 322 sizeof(*tc_port_matchall), 323 GFP_KERNEL); 324 if (!tc_port_matchall) { 325 ret = -ENOMEM; 326 goto out_free_matchall; 327 } 328 329 tc_matchall->port_matchall = tc_port_matchall; 330 adap->tc_matchall = tc_matchall; 331 return 0; 332 333 out_free_matchall: 334 kfree(tc_matchall); 335 return ret; 336 } 337 338 void cxgb4_cleanup_tc_matchall(struct adapter *adap) 339 { 340 u8 i; 341 342 if (adap->tc_matchall) { 343 if (adap->tc_matchall->port_matchall) { 344 for (i = 0; i < adap->params.nports; i++) { 345 struct net_device *dev = adap->port[i]; 346 347 if (dev) 348 cxgb4_matchall_disable_offload(dev); 349 } 350 kfree(adap->tc_matchall->port_matchall); 351 } 352 kfree(adap->tc_matchall); 353 } 354 } 355