1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ 3 4 #include "cxgb4.h" 5 #include "cxgb4_tc_matchall.h" 6 #include "sched.h" 7 #include "cxgb4_uld.h" 8 #include "cxgb4_filter.h" 9 #include "cxgb4_tc_flower.h" 10 11 static int cxgb4_matchall_egress_validate(struct net_device *dev, 12 struct tc_cls_matchall_offload *cls) 13 { 14 struct netlink_ext_ack *extack = cls->common.extack; 15 struct flow_action *actions = &cls->rule->action; 16 struct port_info *pi = netdev2pinfo(dev); 17 struct flow_action_entry *entry; 18 struct ch_sched_queue qe; 19 struct sched_class *e; 20 u64 max_link_rate; 21 u32 i, speed; 22 int ret; 23 24 if (!flow_action_has_entries(actions)) { 25 NL_SET_ERR_MSG_MOD(extack, 26 "Egress MATCHALL offload needs at least 1 policing action"); 27 return -EINVAL; 28 } else if (!flow_offload_has_one_action(actions)) { 29 NL_SET_ERR_MSG_MOD(extack, 30 "Egress MATCHALL offload only supports 1 policing action"); 31 return -EINVAL; 32 } else if (pi->tc_block_shared) { 33 NL_SET_ERR_MSG_MOD(extack, 34 "Egress MATCHALL offload not supported with shared blocks"); 35 return -EINVAL; 36 } 37 38 ret = t4_get_link_params(pi, NULL, &speed, NULL); 39 if (ret) { 40 NL_SET_ERR_MSG_MOD(extack, 41 "Failed to get max speed supported by the link"); 42 return -EINVAL; 43 } 44 45 /* Convert from Mbps to bps */ 46 max_link_rate = (u64)speed * 1000 * 1000; 47 48 flow_action_for_each(i, entry, actions) { 49 switch (entry->id) { 50 case FLOW_ACTION_POLICE: 51 /* Convert bytes per second to bits per second */ 52 if (entry->police.rate_bytes_ps * 8 > max_link_rate) { 53 NL_SET_ERR_MSG_MOD(extack, 54 "Specified policing max rate is larger than underlying link speed"); 55 return -ERANGE; 56 } 57 break; 58 default: 59 NL_SET_ERR_MSG_MOD(extack, 60 "Only policing action supported with Egress MATCHALL offload"); 61 return -EOPNOTSUPP; 62 } 63 } 64 65 for (i = 0; i < pi->nqsets; i++) { 66 memset(&qe, 0, sizeof(qe)); 67 qe.queue = i; 68 69 e = cxgb4_sched_queue_lookup(dev, &qe); 70 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) { 71 NL_SET_ERR_MSG_MOD(extack, 72 "Some queues are already bound to different class"); 73 return -EBUSY; 74 } 75 } 76 77 return 0; 78 } 79 80 static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc) 81 { 82 struct port_info *pi = netdev2pinfo(dev); 83 struct ch_sched_queue qe; 84 int ret; 85 u32 i; 86 87 for (i = 0; i < pi->nqsets; i++) { 88 qe.queue = i; 89 qe.class = tc; 90 ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE); 91 if (ret) 92 goto out_free; 93 } 94 95 return 0; 96 97 out_free: 98 while (i--) { 99 qe.queue = i; 100 qe.class = SCHED_CLS_NONE; 101 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); 102 } 103 104 return ret; 105 } 106 107 static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev) 108 { 109 struct port_info *pi = netdev2pinfo(dev); 110 struct ch_sched_queue qe; 111 u32 i; 112 113 for (i = 0; i < pi->nqsets; i++) { 114 qe.queue = i; 115 qe.class = SCHED_CLS_NONE; 116 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE); 117 } 118 } 119 120 static int cxgb4_matchall_alloc_tc(struct net_device *dev, 121 struct tc_cls_matchall_offload *cls) 122 { 123 struct ch_sched_params p = { 124 .type = SCHED_CLASS_TYPE_PACKET, 125 .u.params.level = SCHED_CLASS_LEVEL_CH_RL, 126 .u.params.mode = SCHED_CLASS_MODE_CLASS, 127 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS, 128 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS, 129 .u.params.class = SCHED_CLS_NONE, 130 .u.params.minrate = 0, 131 .u.params.weight = 0, 132 .u.params.pktsize = dev->mtu, 133 }; 134 struct netlink_ext_ack *extack = cls->common.extack; 135 struct cxgb4_tc_port_matchall *tc_port_matchall; 136 struct port_info *pi = netdev2pinfo(dev); 137 struct adapter *adap = netdev2adap(dev); 138 struct flow_action_entry *entry; 139 struct sched_class *e; 140 int ret; 141 u32 i; 142 143 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 144 145 flow_action_for_each(i, entry, &cls->rule->action) 146 if (entry->id == FLOW_ACTION_POLICE) 147 break; 148 149 /* Convert from bytes per second to Kbps */ 150 p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000); 151 p.u.params.channel = pi->tx_chan; 152 e = cxgb4_sched_class_alloc(dev, &p); 153 if (!e) { 154 NL_SET_ERR_MSG_MOD(extack, 155 "No free traffic class available for policing action"); 156 return -ENOMEM; 157 } 158 159 ret = cxgb4_matchall_tc_bind_queues(dev, e->idx); 160 if (ret) { 161 NL_SET_ERR_MSG_MOD(extack, 162 "Could not bind queues to traffic class"); 163 goto out_free; 164 } 165 166 tc_port_matchall->egress.hwtc = e->idx; 167 tc_port_matchall->egress.cookie = cls->cookie; 168 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED; 169 return 0; 170 171 out_free: 172 cxgb4_sched_class_free(dev, e->idx); 173 return ret; 174 } 175 176 static void cxgb4_matchall_free_tc(struct net_device *dev) 177 { 178 struct cxgb4_tc_port_matchall *tc_port_matchall; 179 struct port_info *pi = netdev2pinfo(dev); 180 struct adapter *adap = netdev2adap(dev); 181 182 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 183 cxgb4_matchall_tc_unbind_queues(dev); 184 cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc); 185 186 tc_port_matchall->egress.hwtc = SCHED_CLS_NONE; 187 tc_port_matchall->egress.cookie = 0; 188 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED; 189 } 190 191 static int cxgb4_matchall_alloc_filter(struct net_device *dev, 192 struct tc_cls_matchall_offload *cls) 193 { 194 struct netlink_ext_ack *extack = cls->common.extack; 195 struct cxgb4_tc_port_matchall *tc_port_matchall; 196 struct port_info *pi = netdev2pinfo(dev); 197 struct adapter *adap = netdev2adap(dev); 198 struct ch_filter_specification *fs; 199 int ret, fidx; 200 201 /* Note that TC uses prio 0 to indicate stack to generate 202 * automatic prio and hence doesn't pass prio 0 to driver. 203 * However, the hardware TCAM index starts from 0. Hence, the 204 * -1 here. 1 slot is enough to create a wildcard matchall 205 * VIID rule. 206 */ 207 if (cls->common.prio <= adap->tids.nftids) 208 fidx = cls->common.prio - 1; 209 else 210 fidx = cxgb4_get_free_ftid(dev, PF_INET); 211 212 /* Only insert MATCHALL rule if its priority doesn't conflict 213 * with existing rules in the LETCAM. 214 */ 215 if (fidx < 0 || 216 !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) { 217 NL_SET_ERR_MSG_MOD(extack, 218 "No free LETCAM index available"); 219 return -ENOMEM; 220 } 221 222 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 223 fs = &tc_port_matchall->ingress.fs; 224 memset(fs, 0, sizeof(*fs)); 225 226 fs->tc_prio = cls->common.prio; 227 fs->tc_cookie = cls->cookie; 228 fs->hitcnts = 1; 229 230 fs->val.pfvf_vld = 1; 231 fs->val.pf = adap->pf; 232 fs->val.vf = pi->vin; 233 234 cxgb4_process_flow_actions(dev, &cls->rule->action, fs); 235 236 ret = cxgb4_set_filter(dev, fidx, fs); 237 if (ret) 238 return ret; 239 240 tc_port_matchall->ingress.tid = fidx; 241 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED; 242 return 0; 243 } 244 245 static int cxgb4_matchall_free_filter(struct net_device *dev) 246 { 247 struct cxgb4_tc_port_matchall *tc_port_matchall; 248 struct port_info *pi = netdev2pinfo(dev); 249 struct adapter *adap = netdev2adap(dev); 250 int ret; 251 252 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 253 254 ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid, 255 &tc_port_matchall->ingress.fs); 256 if (ret) 257 return ret; 258 259 tc_port_matchall->ingress.packets = 0; 260 tc_port_matchall->ingress.bytes = 0; 261 tc_port_matchall->ingress.last_used = 0; 262 tc_port_matchall->ingress.tid = 0; 263 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED; 264 return 0; 265 } 266 267 int cxgb4_tc_matchall_replace(struct net_device *dev, 268 struct tc_cls_matchall_offload *cls_matchall, 269 bool ingress) 270 { 271 struct netlink_ext_ack *extack = cls_matchall->common.extack; 272 struct cxgb4_tc_port_matchall *tc_port_matchall; 273 struct port_info *pi = netdev2pinfo(dev); 274 struct adapter *adap = netdev2adap(dev); 275 int ret; 276 277 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 278 if (ingress) { 279 if (tc_port_matchall->ingress.state == 280 CXGB4_MATCHALL_STATE_ENABLED) { 281 NL_SET_ERR_MSG_MOD(extack, 282 "Only 1 Ingress MATCHALL can be offloaded"); 283 return -ENOMEM; 284 } 285 286 ret = cxgb4_validate_flow_actions(dev, 287 &cls_matchall->rule->action); 288 if (ret) 289 return ret; 290 291 return cxgb4_matchall_alloc_filter(dev, cls_matchall); 292 } 293 294 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) { 295 NL_SET_ERR_MSG_MOD(extack, 296 "Only 1 Egress MATCHALL can be offloaded"); 297 return -ENOMEM; 298 } 299 300 ret = cxgb4_matchall_egress_validate(dev, cls_matchall); 301 if (ret) 302 return ret; 303 304 return cxgb4_matchall_alloc_tc(dev, cls_matchall); 305 } 306 307 int cxgb4_tc_matchall_destroy(struct net_device *dev, 308 struct tc_cls_matchall_offload *cls_matchall, 309 bool ingress) 310 { 311 struct cxgb4_tc_port_matchall *tc_port_matchall; 312 struct port_info *pi = netdev2pinfo(dev); 313 struct adapter *adap = netdev2adap(dev); 314 315 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 316 if (ingress) { 317 if (cls_matchall->cookie != 318 tc_port_matchall->ingress.fs.tc_cookie) 319 return -ENOENT; 320 321 return cxgb4_matchall_free_filter(dev); 322 } 323 324 if (cls_matchall->cookie != tc_port_matchall->egress.cookie) 325 return -ENOENT; 326 327 cxgb4_matchall_free_tc(dev); 328 return 0; 329 } 330 331 int cxgb4_tc_matchall_stats(struct net_device *dev, 332 struct tc_cls_matchall_offload *cls_matchall) 333 { 334 struct cxgb4_tc_port_matchall *tc_port_matchall; 335 struct port_info *pi = netdev2pinfo(dev); 336 struct adapter *adap = netdev2adap(dev); 337 u64 packets, bytes; 338 int ret; 339 340 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 341 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED) 342 return -ENOENT; 343 344 ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid, 345 &packets, &bytes, 346 tc_port_matchall->ingress.fs.hash); 347 if (ret) 348 return ret; 349 350 if (tc_port_matchall->ingress.packets != packets) { 351 flow_stats_update(&cls_matchall->stats, 352 bytes - tc_port_matchall->ingress.bytes, 353 packets - tc_port_matchall->ingress.packets, 354 tc_port_matchall->ingress.last_used); 355 356 tc_port_matchall->ingress.packets = packets; 357 tc_port_matchall->ingress.bytes = bytes; 358 tc_port_matchall->ingress.last_used = jiffies; 359 } 360 361 return 0; 362 } 363 364 static void cxgb4_matchall_disable_offload(struct net_device *dev) 365 { 366 struct cxgb4_tc_port_matchall *tc_port_matchall; 367 struct port_info *pi = netdev2pinfo(dev); 368 struct adapter *adap = netdev2adap(dev); 369 370 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; 371 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) 372 cxgb4_matchall_free_tc(dev); 373 374 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED) 375 cxgb4_matchall_free_filter(dev); 376 } 377 378 int cxgb4_init_tc_matchall(struct adapter *adap) 379 { 380 struct cxgb4_tc_port_matchall *tc_port_matchall; 381 struct cxgb4_tc_matchall *tc_matchall; 382 int ret; 383 384 tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL); 385 if (!tc_matchall) 386 return -ENOMEM; 387 388 tc_port_matchall = kcalloc(adap->params.nports, 389 sizeof(*tc_port_matchall), 390 GFP_KERNEL); 391 if (!tc_port_matchall) { 392 ret = -ENOMEM; 393 goto out_free_matchall; 394 } 395 396 tc_matchall->port_matchall = tc_port_matchall; 397 adap->tc_matchall = tc_matchall; 398 return 0; 399 400 out_free_matchall: 401 kfree(tc_matchall); 402 return ret; 403 } 404 405 void cxgb4_cleanup_tc_matchall(struct adapter *adap) 406 { 407 u8 i; 408 409 if (adap->tc_matchall) { 410 if (adap->tc_matchall->port_matchall) { 411 for (i = 0; i < adap->params.nports; i++) { 412 struct net_device *dev = adap->port[i]; 413 414 if (dev) 415 cxgb4_matchall_disable_offload(dev); 416 } 417 kfree(adap->tc_matchall->port_matchall); 418 } 419 kfree(adap->tc_matchall); 420 } 421 } 422