1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/netdevice.h> 5 #include <linux/dynamic_debug.h> 6 #include <linux/etherdevice.h> 7 #include <linux/list.h> 8 9 #include "ionic.h" 10 #include "ionic_lif.h" 11 #include "ionic_rx_filter.h" 12 13 void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f) 14 { 15 struct device *dev = lif->ionic->dev; 16 17 hlist_del(&f->by_id); 18 hlist_del(&f->by_hash); 19 devm_kfree(dev, f); 20 } 21 22 void ionic_rx_filter_replay(struct ionic_lif *lif) 23 { 24 struct ionic_rx_filter_add_cmd *ac; 25 struct hlist_head new_id_list; 26 struct ionic_admin_ctx ctx; 27 struct ionic_rx_filter *f; 28 struct hlist_head *head; 29 struct hlist_node *tmp; 30 unsigned int key; 31 unsigned int i; 32 int err; 33 34 INIT_HLIST_HEAD(&new_id_list); 35 ac = &ctx.cmd.rx_filter_add; 36 37 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { 38 head = &lif->rx_filters.by_id[i]; 39 hlist_for_each_entry_safe(f, tmp, head, by_id) { 40 ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work); 41 memcpy(ac, &f->cmd, sizeof(f->cmd)); 42 dev_dbg(&lif->netdev->dev, "replay filter command:\n"); 43 dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1, 44 &ctx.cmd, sizeof(ctx.cmd), true); 45 46 err = ionic_adminq_post_wait(lif, &ctx); 47 if (err) { 48 switch (le16_to_cpu(ac->match)) { 49 case IONIC_RX_FILTER_MATCH_VLAN: 50 netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n", 51 err, 52 le16_to_cpu(ac->vlan.vlan)); 53 break; 54 case IONIC_RX_FILTER_MATCH_MAC: 55 netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n", 56 err, ac->mac.addr); 57 break; 58 case IONIC_RX_FILTER_MATCH_MAC_VLAN: 59 netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n", 60 err, 61 le16_to_cpu(ac->vlan.vlan), 62 ac->mac.addr); 63 break; 64 } 65 spin_lock_bh(&lif->rx_filters.lock); 66 ionic_rx_filter_free(lif, f); 67 spin_unlock_bh(&lif->rx_filters.lock); 68 69 continue; 70 } 71 72 /* remove from old id list, save new id in tmp list */ 73 spin_lock_bh(&lif->rx_filters.lock); 74 hlist_del(&f->by_id); 75 spin_unlock_bh(&lif->rx_filters.lock); 76 f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id); 77 hlist_add_head(&f->by_id, &new_id_list); 78 } 79 } 80 81 /* rebuild the by_id hash lists with the new filter ids */ 82 spin_lock_bh(&lif->rx_filters.lock); 83 hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) { 84 key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; 85 head = &lif->rx_filters.by_id[key]; 86 hlist_add_head(&f->by_id, head); 87 } 88 spin_unlock_bh(&lif->rx_filters.lock); 89 } 90 91 int ionic_rx_filters_init(struct ionic_lif *lif) 92 { 93 unsigned int i; 94 95 spin_lock_init(&lif->rx_filters.lock); 96 97 spin_lock_bh(&lif->rx_filters.lock); 98 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { 99 INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]); 100 INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]); 101 } 102 spin_unlock_bh(&lif->rx_filters.lock); 103 104 return 0; 105 } 106 107 void ionic_rx_filters_deinit(struct ionic_lif *lif) 108 { 109 struct ionic_rx_filter *f; 110 struct hlist_head *head; 111 struct hlist_node *tmp; 112 unsigned int i; 113 114 spin_lock_bh(&lif->rx_filters.lock); 115 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { 116 head = &lif->rx_filters.by_id[i]; 117 hlist_for_each_entry_safe(f, tmp, head, by_id) 118 ionic_rx_filter_free(lif, f); 119 } 120 spin_unlock_bh(&lif->rx_filters.lock); 121 } 122 123 int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, 124 u32 hash, struct ionic_admin_ctx *ctx, 125 enum ionic_filter_state state) 126 { 127 struct device *dev = lif->ionic->dev; 128 struct ionic_rx_filter_add_cmd *ac; 129 struct ionic_rx_filter *f = NULL; 130 struct hlist_head *head; 131 unsigned int key; 132 133 ac = &ctx->cmd.rx_filter_add; 134 135 switch (le16_to_cpu(ac->match)) { 136 case IONIC_RX_FILTER_MATCH_VLAN: 137 key = le16_to_cpu(ac->vlan.vlan); 138 f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan)); 139 break; 140 case IONIC_RX_FILTER_MATCH_MAC: 141 key = *(u32 *)ac->mac.addr; 142 f = ionic_rx_filter_by_addr(lif, ac->mac.addr); 143 break; 144 case IONIC_RX_FILTER_MATCH_MAC_VLAN: 145 key = le16_to_cpu(ac->mac_vlan.vlan); 146 break; 147 case IONIC_RX_FILTER_STEER_PKTCLASS: 148 key = 0; 149 break; 150 default: 151 return -EINVAL; 152 } 153 154 if (f) { 155 /* remove from current linking so we can refresh it */ 156 hlist_del(&f->by_id); 157 hlist_del(&f->by_hash); 158 } else { 159 f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC); 160 if (!f) 161 return -ENOMEM; 162 } 163 164 f->flow_id = flow_id; 165 f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); 166 f->state = state; 167 f->rxq_index = rxq_index; 168 memcpy(&f->cmd, ac, sizeof(f->cmd)); 169 netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); 170 171 INIT_HLIST_NODE(&f->by_hash); 172 INIT_HLIST_NODE(&f->by_id); 173 174 key = hash_32(key, IONIC_RX_FILTER_HASH_BITS); 175 head = &lif->rx_filters.by_hash[key]; 176 hlist_add_head(&f->by_hash, head); 177 178 key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; 179 head = &lif->rx_filters.by_id[key]; 180 hlist_add_head(&f->by_id, head); 181 182 return 0; 183 } 184 185 struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid) 186 { 187 struct ionic_rx_filter *f; 188 struct hlist_head *head; 189 unsigned int key; 190 191 key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS); 192 head = &lif->rx_filters.by_hash[key]; 193 194 hlist_for_each_entry(f, head, by_hash) { 195 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN) 196 continue; 197 if (le16_to_cpu(f->cmd.vlan.vlan) == vid) 198 return f; 199 } 200 201 return NULL; 202 } 203 204 struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, 205 const u8 *addr) 206 { 207 struct ionic_rx_filter *f; 208 struct hlist_head *head; 209 unsigned int key; 210 211 key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS); 212 head = &lif->rx_filters.by_hash[key]; 213 214 hlist_for_each_entry(f, head, by_hash) { 215 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC) 216 continue; 217 if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0) 218 return f; 219 } 220 221 return NULL; 222 } 223 224 struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif) 225 { 226 struct ionic_rx_filter *f; 227 struct hlist_head *head; 228 unsigned int key; 229 230 key = hash_32(0, IONIC_RX_FILTER_HASH_BITS); 231 head = &lif->rx_filters.by_hash[key]; 232 233 hlist_for_each_entry(f, head, by_hash) { 234 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_STEER_PKTCLASS) 235 continue; 236 return f; 237 } 238 239 return NULL; 240 } 241 242 int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode) 243 { 244 struct ionic_rx_filter *f; 245 int err; 246 247 spin_lock_bh(&lif->rx_filters.lock); 248 249 f = ionic_rx_filter_by_addr(lif, addr); 250 if (mode == ADD_ADDR && !f) { 251 struct ionic_admin_ctx ctx = { 252 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 253 .cmd.rx_filter_add = { 254 .opcode = IONIC_CMD_RX_FILTER_ADD, 255 .lif_index = cpu_to_le16(lif->index), 256 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 257 }, 258 }; 259 260 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 261 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, 262 IONIC_FILTER_STATE_NEW); 263 if (err) { 264 spin_unlock_bh(&lif->rx_filters.lock); 265 return err; 266 } 267 268 } else if (mode == ADD_ADDR && f) { 269 if (f->state == IONIC_FILTER_STATE_OLD) 270 f->state = IONIC_FILTER_STATE_SYNCED; 271 272 } else if (mode == DEL_ADDR && f) { 273 if (f->state == IONIC_FILTER_STATE_NEW) 274 ionic_rx_filter_free(lif, f); 275 else if (f->state == IONIC_FILTER_STATE_SYNCED) 276 f->state = IONIC_FILTER_STATE_OLD; 277 } else if (mode == DEL_ADDR && !f) { 278 spin_unlock_bh(&lif->rx_filters.lock); 279 return -ENOENT; 280 } 281 282 spin_unlock_bh(&lif->rx_filters.lock); 283 284 set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); 285 286 return 0; 287 } 288 289 int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 290 { 291 struct ionic_admin_ctx ctx = { 292 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 293 .cmd.rx_filter_add = { 294 .opcode = IONIC_CMD_RX_FILTER_ADD, 295 .lif_index = cpu_to_le16(lif->index), 296 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 297 }, 298 }; 299 int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 300 bool mc = is_multicast_ether_addr(addr); 301 struct ionic_rx_filter *f; 302 int err = 0; 303 304 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 305 306 spin_lock_bh(&lif->rx_filters.lock); 307 f = ionic_rx_filter_by_addr(lif, addr); 308 if (f) { 309 /* don't bother if we already have it and it is sync'd */ 310 if (f->state == IONIC_FILTER_STATE_SYNCED) { 311 spin_unlock_bh(&lif->rx_filters.lock); 312 return 0; 313 } 314 315 /* mark preemptively as sync'd to block any parallel attempts */ 316 f->state = IONIC_FILTER_STATE_SYNCED; 317 } else { 318 /* save as SYNCED to catch any DEL requests while processing */ 319 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, 320 IONIC_FILTER_STATE_SYNCED); 321 } 322 spin_unlock_bh(&lif->rx_filters.lock); 323 if (err) 324 return err; 325 326 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); 327 328 /* Don't bother with the write to FW if we know there's no room, 329 * we can try again on the next sync attempt. 330 */ 331 if ((lif->nucast + lif->nmcast) >= nfilters) 332 err = -ENOSPC; 333 else 334 err = ionic_adminq_post_wait(lif, &ctx); 335 336 spin_lock_bh(&lif->rx_filters.lock); 337 if (err && err != -EEXIST) { 338 /* set the state back to NEW so we can try again later */ 339 f = ionic_rx_filter_by_addr(lif, addr); 340 if (f && f->state == IONIC_FILTER_STATE_SYNCED) { 341 f->state = IONIC_FILTER_STATE_NEW; 342 set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); 343 } 344 345 spin_unlock_bh(&lif->rx_filters.lock); 346 347 if (err == -ENOSPC) 348 return 0; 349 else 350 return err; 351 } 352 353 if (mc) 354 lif->nmcast++; 355 else 356 lif->nucast++; 357 358 f = ionic_rx_filter_by_addr(lif, addr); 359 if (f && f->state == IONIC_FILTER_STATE_OLD) { 360 /* Someone requested a delete while we were adding 361 * so update the filter info with the results from the add 362 * and the data will be there for the delete on the next 363 * sync cycle. 364 */ 365 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, 366 IONIC_FILTER_STATE_OLD); 367 } else { 368 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, 369 IONIC_FILTER_STATE_SYNCED); 370 } 371 372 spin_unlock_bh(&lif->rx_filters.lock); 373 374 return err; 375 } 376 377 int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 378 { 379 struct ionic_admin_ctx ctx = { 380 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 381 .cmd.rx_filter_del = { 382 .opcode = IONIC_CMD_RX_FILTER_DEL, 383 .lif_index = cpu_to_le16(lif->index), 384 }, 385 }; 386 struct ionic_rx_filter *f; 387 int state; 388 int err; 389 390 spin_lock_bh(&lif->rx_filters.lock); 391 f = ionic_rx_filter_by_addr(lif, addr); 392 if (!f) { 393 spin_unlock_bh(&lif->rx_filters.lock); 394 return -ENOENT; 395 } 396 397 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", 398 addr, f->filter_id); 399 400 state = f->state; 401 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 402 ionic_rx_filter_free(lif, f); 403 404 if (is_multicast_ether_addr(addr) && lif->nmcast) 405 lif->nmcast--; 406 else if (!is_multicast_ether_addr(addr) && lif->nucast) 407 lif->nucast--; 408 409 spin_unlock_bh(&lif->rx_filters.lock); 410 411 if (state != IONIC_FILTER_STATE_NEW) { 412 err = ionic_adminq_post_wait(lif, &ctx); 413 if (err && err != -EEXIST) 414 return err; 415 } 416 417 return 0; 418 } 419 420 struct sync_item { 421 struct list_head list; 422 struct ionic_rx_filter f; 423 }; 424 425 void ionic_rx_filter_sync(struct ionic_lif *lif) 426 { 427 struct device *dev = lif->ionic->dev; 428 struct list_head sync_add_list; 429 struct list_head sync_del_list; 430 struct sync_item *sync_item; 431 struct ionic_rx_filter *f; 432 struct hlist_head *head; 433 struct hlist_node *tmp; 434 struct sync_item *spos; 435 unsigned int i; 436 437 INIT_LIST_HEAD(&sync_add_list); 438 INIT_LIST_HEAD(&sync_del_list); 439 440 clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); 441 442 /* Copy the filters to be added and deleted 443 * into a separate local list that needs no locking. 444 */ 445 spin_lock_bh(&lif->rx_filters.lock); 446 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { 447 head = &lif->rx_filters.by_id[i]; 448 hlist_for_each_entry_safe(f, tmp, head, by_id) { 449 if (f->state == IONIC_FILTER_STATE_NEW || 450 f->state == IONIC_FILTER_STATE_OLD) { 451 sync_item = devm_kzalloc(dev, sizeof(*sync_item), 452 GFP_ATOMIC); 453 if (!sync_item) 454 goto loop_out; 455 456 sync_item->f = *f; 457 458 if (f->state == IONIC_FILTER_STATE_NEW) 459 list_add(&sync_item->list, &sync_add_list); 460 else 461 list_add(&sync_item->list, &sync_del_list); 462 } 463 } 464 } 465 loop_out: 466 spin_unlock_bh(&lif->rx_filters.lock); 467 468 /* If the add or delete fails, it won't get marked as sync'd 469 * and will be tried again in the next sync action. 470 * Do the deletes first in case we're in an overflow state and 471 * they can clear room for some new filters 472 */ 473 list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) { 474 (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr); 475 476 list_del(&sync_item->list); 477 devm_kfree(dev, sync_item); 478 } 479 480 list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) { 481 (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr); 482 483 list_del(&sync_item->list); 484 devm_kfree(dev, sync_item); 485 } 486 } 487