1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <net/tc_act/tc_gact.h> 36 #include <net/tc_act/tc_mirred.h> 37 38 #include "cxgb4.h" 39 #include "cxgb4_filter.h" 40 #include "cxgb4_tc_u32_parse.h" 41 #include "cxgb4_tc_u32.h" 42 43 /* Fill ch_filter_specification with parsed match value/mask pair. */ 44 static int fill_match_fields(struct adapter *adap, 45 struct ch_filter_specification *fs, 46 struct tc_cls_u32_offload *cls, 47 const struct cxgb4_match_field *entry, 48 bool next_header) 49 { 50 unsigned int i, j; 51 u32 val, mask; 52 int off, err; 53 bool found; 54 55 for (i = 0; i < cls->knode.sel->nkeys; i++) { 56 off = cls->knode.sel->keys[i].off; 57 val = cls->knode.sel->keys[i].val; 58 mask = cls->knode.sel->keys[i].mask; 59 60 if (next_header) { 61 /* For next headers, parse only keys with offmask */ 62 if (!cls->knode.sel->keys[i].offmask) 63 continue; 64 } else { 65 /* For the remaining, parse only keys without offmask */ 66 if (cls->knode.sel->keys[i].offmask) 67 continue; 68 } 69 70 found = false; 71 72 for (j = 0; entry[j].val; j++) { 73 if (off == entry[j].off) { 74 found = true; 75 err = entry[j].val(fs, val, mask); 76 if (err) 77 return err; 78 break; 79 } 80 } 81 82 if (!found) 83 return -EINVAL; 84 } 85 86 return 0; 87 } 88 89 /* Fill ch_filter_specification with parsed action. */ 90 static int fill_action_fields(struct adapter *adap, 91 struct ch_filter_specification *fs, 92 struct tc_cls_u32_offload *cls) 93 { 94 unsigned int num_actions = 0; 95 const struct tc_action *a; 96 struct tcf_exts *exts; 97 int i; 98 99 exts = cls->knode.exts; 100 if (!tcf_exts_has_actions(exts)) 101 return -EINVAL; 102 103 tcf_exts_for_each_action(i, a, exts) { 104 /* Don't allow more than one action per rule. */ 105 if (num_actions) 106 return -EINVAL; 107 108 /* Drop in hardware. */ 109 if (is_tcf_gact_shot(a)) { 110 fs->action = FILTER_DROP; 111 num_actions++; 112 continue; 113 } 114 115 /* Re-direct to specified port in hardware. */ 116 if (is_tcf_mirred_egress_redirect(a)) { 117 struct net_device *n_dev, *target_dev; 118 bool found = false; 119 unsigned int i; 120 121 target_dev = tcf_mirred_dev(a); 122 for_each_port(adap, i) { 123 n_dev = adap->port[i]; 124 if (target_dev == n_dev) { 125 fs->action = FILTER_SWITCH; 126 fs->eport = i; 127 found = true; 128 break; 129 } 130 } 131 132 /* Interface doesn't belong to any port of 133 * the underlying hardware. 134 */ 135 if (!found) 136 return -EINVAL; 137 138 num_actions++; 139 continue; 140 } 141 142 /* Un-supported action. */ 143 return -EINVAL; 144 } 145 146 return 0; 147 } 148 149 int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) 150 { 151 const struct cxgb4_match_field *start, *link_start = NULL; 152 struct netlink_ext_ack *extack = cls->common.extack; 153 struct adapter *adapter = netdev2adap(dev); 154 __be16 protocol = cls->common.protocol; 155 struct ch_filter_specification fs; 156 struct cxgb4_tc_u32_table *t; 157 struct cxgb4_link *link; 158 unsigned int filter_id; 159 u32 uhtid, link_uhtid; 160 bool is_ipv6 = false; 161 int ret; 162 163 if (!can_tc_u32_offload(dev)) 164 return -EOPNOTSUPP; 165 166 if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6)) 167 return -EOPNOTSUPP; 168 169 /* Note that TC uses prio 0 to indicate stack to generate 170 * automatic prio and hence doesn't pass prio 0 to driver. 171 * However, the hardware TCAM index starts from 0. Hence, the 172 * -1 here. 173 */ 174 filter_id = TC_U32_NODE(cls->knode.handle) - 1; 175 176 /* Only insert U32 rule if its priority doesn't conflict with 177 * existing rules in the LETCAM. 178 */ 179 if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids || 180 !cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) { 181 NL_SET_ERR_MSG_MOD(extack, 182 "No free LETCAM index available"); 183 return -ENOMEM; 184 } 185 186 t = adapter->tc_u32; 187 uhtid = TC_U32_USERHTID(cls->knode.handle); 188 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); 189 190 /* Ensure that uhtid is either root u32 (i.e. 0x800) 191 * or a a valid linked bucket. 192 */ 193 if (uhtid != 0x800 && uhtid >= t->size) 194 return -EINVAL; 195 196 /* Ensure link handle uhtid is sane, if specified. */ 197 if (link_uhtid >= t->size) 198 return -EINVAL; 199 200 memset(&fs, 0, sizeof(fs)); 201 202 if (filter_id < adapter->tids.nhpftids) 203 fs.prio = 1; 204 fs.tc_prio = cls->common.prio; 205 fs.tc_cookie = cls->knode.handle; 206 207 if (protocol == htons(ETH_P_IPV6)) { 208 start = cxgb4_ipv6_fields; 209 is_ipv6 = true; 210 } else { 211 start = cxgb4_ipv4_fields; 212 is_ipv6 = false; 213 } 214 215 if (uhtid != 0x800) { 216 /* Link must exist from root node before insertion. */ 217 if (!t->table[uhtid - 1].link_handle) 218 return -EINVAL; 219 220 /* Link must have a valid supported next header. */ 221 link_start = t->table[uhtid - 1].match_field; 222 if (!link_start) 223 return -EINVAL; 224 } 225 226 /* Parse links and record them for subsequent jumps to valid 227 * next headers. 228 */ 229 if (link_uhtid) { 230 const struct cxgb4_next_header *next; 231 bool found = false; 232 unsigned int i, j; 233 u32 val, mask; 234 int off; 235 236 if (t->table[link_uhtid - 1].link_handle) { 237 dev_err(adapter->pdev_dev, 238 "Link handle exists for: 0x%x\n", 239 link_uhtid); 240 return -EINVAL; 241 } 242 243 next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps; 244 245 /* Try to find matches that allow jumps to next header. */ 246 for (i = 0; next[i].jump; i++) { 247 if (next[i].offoff != cls->knode.sel->offoff || 248 next[i].shift != cls->knode.sel->offshift || 249 next[i].mask != cls->knode.sel->offmask || 250 next[i].offset != cls->knode.sel->off) 251 continue; 252 253 /* Found a possible candidate. Find a key that 254 * matches the corresponding offset, value, and 255 * mask to jump to next header. 256 */ 257 for (j = 0; j < cls->knode.sel->nkeys; j++) { 258 off = cls->knode.sel->keys[j].off; 259 val = cls->knode.sel->keys[j].val; 260 mask = cls->knode.sel->keys[j].mask; 261 262 if (next[i].match_off == off && 263 next[i].match_val == val && 264 next[i].match_mask == mask) { 265 found = true; 266 break; 267 } 268 } 269 270 if (!found) 271 continue; /* Try next candidate. */ 272 273 /* Candidate to jump to next header found. 274 * Translate all keys to internal specification 275 * and store them in jump table. This spec is copied 276 * later to set the actual filters. 277 */ 278 ret = fill_match_fields(adapter, &fs, cls, 279 start, false); 280 if (ret) 281 goto out; 282 283 link = &t->table[link_uhtid - 1]; 284 link->match_field = next[i].jump; 285 link->link_handle = cls->knode.handle; 286 memcpy(&link->fs, &fs, sizeof(fs)); 287 break; 288 } 289 290 /* No candidate found to jump to next header. */ 291 if (!found) 292 return -EINVAL; 293 294 return 0; 295 } 296 297 /* Fill ch_filter_specification match fields to be shipped to hardware. 298 * Copy the linked spec (if any) first. And then update the spec as 299 * needed. 300 */ 301 if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) { 302 /* Copy linked ch_filter_specification */ 303 memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs)); 304 ret = fill_match_fields(adapter, &fs, cls, 305 link_start, true); 306 if (ret) 307 goto out; 308 } 309 310 ret = fill_match_fields(adapter, &fs, cls, start, false); 311 if (ret) 312 goto out; 313 314 /* Fill ch_filter_specification action fields to be shipped to 315 * hardware. 316 */ 317 ret = fill_action_fields(adapter, &fs, cls); 318 if (ret) 319 goto out; 320 321 /* The filter spec has been completely built from the info 322 * provided from u32. We now set some default fields in the 323 * spec for sanity. 324 */ 325 326 /* Match only packets coming from the ingress port where this 327 * filter will be created. 328 */ 329 fs.val.iport = netdev2pinfo(dev)->port_id; 330 fs.mask.iport = ~0; 331 332 /* Enable filter hit counts. */ 333 fs.hitcnts = 1; 334 335 /* Set type of filter - IPv6 or IPv4 */ 336 fs.type = is_ipv6 ? 1 : 0; 337 338 /* Set the filter */ 339 ret = cxgb4_set_filter(dev, filter_id, &fs); 340 if (ret) 341 goto out; 342 343 /* If this is a linked bucket, then set the corresponding 344 * entry in the bitmap to mark it as belonging to this linked 345 * bucket. 346 */ 347 if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) 348 set_bit(filter_id, t->table[uhtid - 1].tid_map); 349 350 out: 351 return ret; 352 } 353 354 int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) 355 { 356 struct adapter *adapter = netdev2adap(dev); 357 unsigned int filter_id, max_tids, i, j; 358 struct cxgb4_link *link = NULL; 359 struct cxgb4_tc_u32_table *t; 360 struct filter_entry *f; 361 u32 handle, uhtid; 362 int ret; 363 364 if (!can_tc_u32_offload(dev)) 365 return -EOPNOTSUPP; 366 367 /* Fetch the location to delete the filter. */ 368 filter_id = TC_U32_NODE(cls->knode.handle) - 1; 369 if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids) 370 return -ERANGE; 371 372 if (filter_id < adapter->tids.nhpftids) 373 f = &adapter->tids.hpftid_tab[filter_id]; 374 else 375 f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids]; 376 377 if (cls->knode.handle != f->fs.tc_cookie) 378 return -ERANGE; 379 380 t = adapter->tc_u32; 381 handle = cls->knode.handle; 382 uhtid = TC_U32_USERHTID(cls->knode.handle); 383 384 /* Ensure that uhtid is either root u32 (i.e. 0x800) 385 * or a a valid linked bucket. 386 */ 387 if (uhtid != 0x800 && uhtid >= t->size) 388 return -EINVAL; 389 390 /* Delete the specified filter */ 391 if (uhtid != 0x800) { 392 link = &t->table[uhtid - 1]; 393 if (!link->link_handle) 394 return -EINVAL; 395 396 if (!test_bit(filter_id, link->tid_map)) 397 return -EINVAL; 398 } 399 400 ret = cxgb4_del_filter(dev, filter_id, NULL); 401 if (ret) 402 goto out; 403 404 if (link) 405 clear_bit(filter_id, link->tid_map); 406 407 /* If a link is being deleted, then delete all filters 408 * associated with the link. 409 */ 410 max_tids = adapter->tids.nftids; 411 for (i = 0; i < t->size; i++) { 412 link = &t->table[i]; 413 414 if (link->link_handle == handle) { 415 for (j = 0; j < max_tids; j++) { 416 if (!test_bit(j, link->tid_map)) 417 continue; 418 419 ret = __cxgb4_del_filter(dev, j, NULL, NULL); 420 if (ret) 421 goto out; 422 423 clear_bit(j, link->tid_map); 424 } 425 426 /* Clear the link state */ 427 link->match_field = NULL; 428 link->link_handle = 0; 429 memset(&link->fs, 0, sizeof(link->fs)); 430 break; 431 } 432 } 433 434 out: 435 return ret; 436 } 437 438 void cxgb4_cleanup_tc_u32(struct adapter *adap) 439 { 440 struct cxgb4_tc_u32_table *t; 441 unsigned int i; 442 443 if (!adap->tc_u32) 444 return; 445 446 /* Free up all allocated memory. */ 447 t = adap->tc_u32; 448 for (i = 0; i < t->size; i++) { 449 struct cxgb4_link *link = &t->table[i]; 450 451 kvfree(link->tid_map); 452 } 453 kvfree(adap->tc_u32); 454 } 455 456 struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) 457 { 458 unsigned int max_tids = adap->tids.nftids + adap->tids.nhpftids; 459 struct cxgb4_tc_u32_table *t; 460 unsigned int i; 461 462 if (!max_tids) 463 return NULL; 464 465 t = kvzalloc(struct_size(t, table, max_tids), GFP_KERNEL); 466 if (!t) 467 return NULL; 468 469 t->size = max_tids; 470 471 for (i = 0; i < t->size; i++) { 472 struct cxgb4_link *link = &t->table[i]; 473 unsigned int bmap_size; 474 475 bmap_size = BITS_TO_LONGS(max_tids); 476 link->tid_map = kvcalloc(bmap_size, sizeof(unsigned long), 477 GFP_KERNEL); 478 if (!link->tid_map) 479 goto out_no_mem; 480 bitmap_zero(link->tid_map, max_tids); 481 } 482 483 return t; 484 485 out_no_mem: 486 for (i = 0; i < t->size; i++) { 487 struct cxgb4_link *link = &t->table[i]; 488 489 if (link->tid_map) 490 kvfree(link->tid_map); 491 } 492 493 if (t) 494 kvfree(t); 495 496 return NULL; 497 } 498