1 /* 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <net/tc_act/tc_gact.h> 36 #include <net/tc_act/tc_mirred.h> 37 #include <net/tc_act/tc_vlan.h> 38 39 #include "cxgb4.h" 40 #include "cxgb4_tc_flower.h" 41 42 #define STATS_CHECK_PERIOD (HZ / 2) 43 44 static struct ch_tc_flower_entry *allocate_flower_entry(void) 45 { 46 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); 47 spin_lock_init(&new->lock); 48 return new; 49 } 50 51 /* Must be called with either RTNL or rcu_read_lock */ 52 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, 53 unsigned long flower_cookie) 54 { 55 struct ch_tc_flower_entry *flower_entry; 56 57 hash_for_each_possible_rcu(adap->flower_anymatch_tbl, flower_entry, 58 link, flower_cookie) 59 if (flower_entry->tc_flower_cookie == flower_cookie) 60 return flower_entry; 61 return NULL; 62 } 63 64 static void cxgb4_process_flow_match(struct net_device *dev, 65 struct tc_cls_flower_offload *cls, 66 struct ch_filter_specification *fs) 67 { 68 u16 addr_type = 0; 69 70 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 71 struct flow_dissector_key_control *key = 72 skb_flow_dissector_target(cls->dissector, 73 FLOW_DISSECTOR_KEY_CONTROL, 74 cls->key); 75 76 addr_type = key->addr_type; 77 } 78 79 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 80 struct flow_dissector_key_basic *key = 81 skb_flow_dissector_target(cls->dissector, 82 FLOW_DISSECTOR_KEY_BASIC, 83 cls->key); 84 struct flow_dissector_key_basic *mask = 85 skb_flow_dissector_target(cls->dissector, 86 FLOW_DISSECTOR_KEY_BASIC, 87 cls->mask); 88 u16 ethtype_key = ntohs(key->n_proto); 89 u16 ethtype_mask = ntohs(mask->n_proto); 90 91 if (ethtype_key == ETH_P_ALL) { 92 ethtype_key = 0; 93 ethtype_mask = 0; 94 } 95 96 fs->val.ethtype = ethtype_key; 97 fs->mask.ethtype = ethtype_mask; 98 fs->val.proto = key->ip_proto; 99 fs->mask.proto = mask->ip_proto; 100 } 101 102 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 103 struct flow_dissector_key_ipv4_addrs *key = 104 skb_flow_dissector_target(cls->dissector, 105 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 106 cls->key); 107 struct flow_dissector_key_ipv4_addrs *mask = 108 skb_flow_dissector_target(cls->dissector, 109 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 110 cls->mask); 111 fs->type = 0; 112 memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst)); 113 memcpy(&fs->val.fip[0], &key->src, sizeof(key->src)); 114 memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst)); 115 memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src)); 116 } 117 118 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 119 struct flow_dissector_key_ipv6_addrs *key = 120 skb_flow_dissector_target(cls->dissector, 121 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 122 cls->key); 123 struct flow_dissector_key_ipv6_addrs *mask = 124 skb_flow_dissector_target(cls->dissector, 125 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 126 cls->mask); 127 128 fs->type = 1; 129 memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst)); 130 memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src)); 131 memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst)); 132 memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src)); 133 } 134 135 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) { 136 struct flow_dissector_key_ports *key, *mask; 137 138 key = skb_flow_dissector_target(cls->dissector, 139 FLOW_DISSECTOR_KEY_PORTS, 140 cls->key); 141 mask = skb_flow_dissector_target(cls->dissector, 142 FLOW_DISSECTOR_KEY_PORTS, 143 cls->mask); 144 fs->val.lport = cpu_to_be16(key->dst); 145 fs->mask.lport = cpu_to_be16(mask->dst); 146 fs->val.fport = cpu_to_be16(key->src); 147 fs->mask.fport = cpu_to_be16(mask->src); 148 } 149 150 /* Match only packets coming from the ingress port where this 151 * filter will be created. 152 */ 153 fs->val.iport = netdev2pinfo(dev)->port_id; 154 fs->mask.iport = ~0; 155 } 156 157 static int cxgb4_validate_flow_match(struct net_device *dev, 158 struct tc_cls_flower_offload *cls) 159 { 160 if (cls->dissector->used_keys & 161 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 162 BIT(FLOW_DISSECTOR_KEY_BASIC) | 163 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 164 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 165 BIT(FLOW_DISSECTOR_KEY_PORTS))) { 166 netdev_warn(dev, "Unsupported key used: 0x%x\n", 167 cls->dissector->used_keys); 168 return -EOPNOTSUPP; 169 } 170 return 0; 171 } 172 173 static void cxgb4_process_flow_actions(struct net_device *in, 174 struct tc_cls_flower_offload *cls, 175 struct ch_filter_specification *fs) 176 { 177 const struct tc_action *a; 178 LIST_HEAD(actions); 179 180 tcf_exts_to_list(cls->exts, &actions); 181 list_for_each_entry(a, &actions, list) { 182 if (is_tcf_gact_shot(a)) { 183 fs->action = FILTER_DROP; 184 } else if (is_tcf_mirred_egress_redirect(a)) { 185 int ifindex = tcf_mirred_ifindex(a); 186 struct net_device *out = __dev_get_by_index(dev_net(in), 187 ifindex); 188 struct port_info *pi = netdev_priv(out); 189 190 fs->action = FILTER_SWITCH; 191 fs->eport = pi->port_id; 192 } else if (is_tcf_vlan(a)) { 193 u32 vlan_action = tcf_vlan_action(a); 194 u8 prio = tcf_vlan_push_prio(a); 195 u16 vid = tcf_vlan_push_vid(a); 196 u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid; 197 198 switch (vlan_action) { 199 case TCA_VLAN_ACT_POP: 200 fs->newvlan |= VLAN_REMOVE; 201 break; 202 case TCA_VLAN_ACT_PUSH: 203 fs->newvlan |= VLAN_INSERT; 204 fs->vlan = vlan_tci; 205 break; 206 case TCA_VLAN_ACT_MODIFY: 207 fs->newvlan |= VLAN_REWRITE; 208 fs->vlan = vlan_tci; 209 break; 210 default: 211 break; 212 } 213 } 214 } 215 } 216 217 static int cxgb4_validate_flow_actions(struct net_device *dev, 218 struct tc_cls_flower_offload *cls) 219 { 220 const struct tc_action *a; 221 LIST_HEAD(actions); 222 223 tcf_exts_to_list(cls->exts, &actions); 224 list_for_each_entry(a, &actions, list) { 225 if (is_tcf_gact_shot(a)) { 226 /* Do nothing */ 227 } else if (is_tcf_mirred_egress_redirect(a)) { 228 struct adapter *adap = netdev2adap(dev); 229 struct net_device *n_dev; 230 unsigned int i, ifindex; 231 bool found = false; 232 233 ifindex = tcf_mirred_ifindex(a); 234 for_each_port(adap, i) { 235 n_dev = adap->port[i]; 236 if (ifindex == n_dev->ifindex) { 237 found = true; 238 break; 239 } 240 } 241 242 /* If interface doesn't belong to our hw, then 243 * the provided output port is not valid 244 */ 245 if (!found) { 246 netdev_err(dev, "%s: Out port invalid\n", 247 __func__); 248 return -EINVAL; 249 } 250 } else if (is_tcf_vlan(a)) { 251 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); 252 u32 vlan_action = tcf_vlan_action(a); 253 254 switch (vlan_action) { 255 case TCA_VLAN_ACT_POP: 256 break; 257 case TCA_VLAN_ACT_PUSH: 258 case TCA_VLAN_ACT_MODIFY: 259 if (proto != ETH_P_8021Q) { 260 netdev_err(dev, "%s: Unsupported vlan proto\n", 261 __func__); 262 return -EOPNOTSUPP; 263 } 264 break; 265 default: 266 netdev_err(dev, "%s: Unsupported vlan action\n", 267 __func__); 268 return -EOPNOTSUPP; 269 } 270 } else { 271 netdev_err(dev, "%s: Unsupported action\n", __func__); 272 return -EOPNOTSUPP; 273 } 274 } 275 return 0; 276 } 277 278 int cxgb4_tc_flower_replace(struct net_device *dev, 279 struct tc_cls_flower_offload *cls) 280 { 281 struct adapter *adap = netdev2adap(dev); 282 struct ch_tc_flower_entry *ch_flower; 283 struct ch_filter_specification *fs; 284 struct filter_ctx ctx; 285 int fidx; 286 int ret; 287 288 if (cxgb4_validate_flow_actions(dev, cls)) 289 return -EOPNOTSUPP; 290 291 if (cxgb4_validate_flow_match(dev, cls)) 292 return -EOPNOTSUPP; 293 294 ch_flower = allocate_flower_entry(); 295 if (!ch_flower) { 296 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); 297 return -ENOMEM; 298 } 299 300 fs = &ch_flower->fs; 301 fs->hitcnts = 1; 302 cxgb4_process_flow_actions(dev, cls, fs); 303 cxgb4_process_flow_match(dev, cls, fs); 304 305 fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); 306 if (fidx < 0) { 307 netdev_err(dev, "%s: No fidx for offload.\n", __func__); 308 ret = -ENOMEM; 309 goto free_entry; 310 } 311 312 init_completion(&ctx.completion); 313 ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); 314 if (ret) { 315 netdev_err(dev, "%s: filter creation err %d\n", 316 __func__, ret); 317 goto free_entry; 318 } 319 320 /* Wait for reply */ 321 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ); 322 if (!ret) { 323 ret = -ETIMEDOUT; 324 goto free_entry; 325 } 326 327 ret = ctx.result; 328 /* Check if hw returned error for filter creation */ 329 if (ret) { 330 netdev_err(dev, "%s: filter creation err %d\n", 331 __func__, ret); 332 goto free_entry; 333 } 334 335 INIT_HLIST_NODE(&ch_flower->link); 336 ch_flower->tc_flower_cookie = cls->cookie; 337 ch_flower->filter_id = ctx.tid; 338 hash_add_rcu(adap->flower_anymatch_tbl, &ch_flower->link, cls->cookie); 339 340 return ret; 341 342 free_entry: 343 kfree(ch_flower); 344 return ret; 345 } 346 347 int cxgb4_tc_flower_destroy(struct net_device *dev, 348 struct tc_cls_flower_offload *cls) 349 { 350 struct adapter *adap = netdev2adap(dev); 351 struct ch_tc_flower_entry *ch_flower; 352 int ret; 353 354 ch_flower = ch_flower_lookup(adap, cls->cookie); 355 if (!ch_flower) 356 return -ENOENT; 357 358 ret = cxgb4_del_filter(dev, ch_flower->filter_id); 359 if (ret) 360 goto err; 361 362 hash_del_rcu(&ch_flower->link); 363 kfree_rcu(ch_flower, rcu); 364 365 err: 366 return ret; 367 } 368 369 static void ch_flower_stats_cb(unsigned long data) 370 { 371 struct adapter *adap = (struct adapter *)data; 372 struct ch_tc_flower_entry *flower_entry; 373 struct ch_tc_flower_stats *ofld_stats; 374 unsigned int i; 375 u64 packets; 376 u64 bytes; 377 int ret; 378 379 rcu_read_lock(); 380 hash_for_each_rcu(adap->flower_anymatch_tbl, i, flower_entry, link) { 381 ret = cxgb4_get_filter_counters(adap->port[0], 382 flower_entry->filter_id, 383 &packets, &bytes); 384 if (!ret) { 385 spin_lock(&flower_entry->lock); 386 ofld_stats = &flower_entry->stats; 387 388 if (ofld_stats->prev_packet_count != packets) { 389 ofld_stats->prev_packet_count = packets; 390 ofld_stats->last_used = jiffies; 391 } 392 spin_unlock(&flower_entry->lock); 393 } 394 } 395 rcu_read_unlock(); 396 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); 397 } 398 399 int cxgb4_tc_flower_stats(struct net_device *dev, 400 struct tc_cls_flower_offload *cls) 401 { 402 struct adapter *adap = netdev2adap(dev); 403 struct ch_tc_flower_stats *ofld_stats; 404 struct ch_tc_flower_entry *ch_flower; 405 u64 packets; 406 u64 bytes; 407 int ret; 408 409 ch_flower = ch_flower_lookup(adap, cls->cookie); 410 if (!ch_flower) { 411 ret = -ENOENT; 412 goto err; 413 } 414 415 ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id, 416 &packets, &bytes); 417 if (ret < 0) 418 goto err; 419 420 spin_lock_bh(&ch_flower->lock); 421 ofld_stats = &ch_flower->stats; 422 if (ofld_stats->packet_count != packets) { 423 if (ofld_stats->prev_packet_count != packets) 424 ofld_stats->last_used = jiffies; 425 tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count, 426 packets - ofld_stats->packet_count, 427 ofld_stats->last_used); 428 429 ofld_stats->packet_count = packets; 430 ofld_stats->byte_count = bytes; 431 ofld_stats->prev_packet_count = packets; 432 } 433 spin_unlock_bh(&ch_flower->lock); 434 return 0; 435 436 err: 437 return ret; 438 } 439 440 void cxgb4_init_tc_flower(struct adapter *adap) 441 { 442 hash_init(adap->flower_anymatch_tbl); 443 setup_timer(&adap->flower_stats_timer, ch_flower_stats_cb, 444 (unsigned long)adap); 445 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); 446 } 447 448 void cxgb4_cleanup_tc_flower(struct adapter *adap) 449 { 450 if (adap->flower_stats_timer.function) 451 del_timer_sync(&adap->flower_stats_timer); 452 } 453