Lines Matching refs:macsec
313 static bool macsec_is_offloaded(struct macsec_dev *macsec) in macsec_is_offloaded() argument
315 if (macsec->offload == MACSEC_OFFLOAD_MAC || in macsec_is_offloaded()
316 macsec->offload == MACSEC_OFFLOAD_PHY) in macsec_is_offloaded()
324 struct macsec_dev *macsec) in macsec_check_offload() argument
326 if (!macsec || !macsec->real_dev) in macsec_check_offload()
330 return macsec->real_dev->phydev && in macsec_check_offload()
331 macsec->real_dev->phydev->macsec_ops; in macsec_check_offload()
333 return macsec->real_dev->features & NETIF_F_HW_MACSEC && in macsec_check_offload()
334 macsec->real_dev->macsec_ops; in macsec_check_offload()
340 struct macsec_dev *macsec, in __macsec_get_ops() argument
348 ctx->phydev = macsec->real_dev->phydev; in __macsec_get_ops()
350 ctx->netdev = macsec->real_dev; in __macsec_get_ops()
354 return macsec->real_dev->phydev->macsec_ops; in __macsec_get_ops()
356 return macsec->real_dev->macsec_ops; in __macsec_get_ops()
362 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, in macsec_get_ops() argument
365 if (!macsec_check_offload(macsec->offload, macsec)) in macsec_get_ops()
368 return __macsec_get_ops(macsec->offload, macsec, ctx); in macsec_get_ops()
471 struct macsec_dev *macsec = netdev_priv(dev); in macsec_encrypt_finish() local
473 skb->dev = macsec->real_dev; in macsec_encrypt_finish()
480 struct macsec_dev *macsec = macsec_priv(skb->dev); in macsec_msdu_len() local
481 struct macsec_secy *secy = &macsec->secy; in macsec_msdu_len()
516 struct macsec_dev *macsec = macsec_priv(dev); in macsec_encrypt_done() local
523 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); in macsec_encrypt_done()
579 struct macsec_dev *macsec = macsec_priv(dev); in macsec_encrypt() local
583 secy = &macsec->secy; in macsec_encrypt()
634 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); in macsec_encrypt()
818 struct macsec_dev *macsec = macsec_priv(dev); in macsec_decrypt_done() local
831 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { in macsec_decrypt_done()
837 macsec_finalize_skb(skb, macsec->secy.icv_len, in macsec_decrypt_done()
840 macsec_reset_skb(skb, macsec->secy.netdev); in macsec_decrypt_done()
842 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) in macsec_decrypt_done()
985 struct macsec_dev *macsec; in handle_not_macsec() local
993 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { in handle_not_macsec()
995 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); in handle_not_macsec()
996 struct net_device *ndev = macsec->secy.netdev; in handle_not_macsec()
1001 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { in handle_not_macsec()
1004 ops = macsec_get_ops(macsec, NULL); in handle_not_macsec()
1017 rx_sc = find_rx_sc(&macsec->secy, in handle_not_macsec()
1070 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { in handle_not_macsec()
1074 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); in handle_not_macsec()
1106 struct macsec_dev *macsec; in macsec_handle_frame() local
1160 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { in macsec_handle_frame()
1161 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); in macsec_handle_frame()
1166 secy = &macsec->secy; in macsec_handle_frame()
1176 macsec = macsec_priv(dev); in macsec_handle_frame()
1177 secy_stats = this_cpu_ptr(macsec->stats); in macsec_handle_frame()
1230 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); in macsec_handle_frame()
1267 ret = gro_cells_receive(&macsec->gro_cells, skb); in macsec_handle_frame()
1271 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); in macsec_handle_frame()
1295 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { in macsec_handle_frame()
1298 secy_stats = this_cpu_ptr(macsec->stats); in macsec_handle_frame()
1304 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { in macsec_handle_frame()
1308 DEV_STATS_INC(macsec->secy.netdev, rx_errors); in macsec_handle_frame()
1319 macsec_reset_skb(nskb, macsec->secy.netdev); in macsec_handle_frame()
1327 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); in macsec_handle_frame()
1426 struct macsec_dev *macsec; in create_rx_sc() local
1431 list_for_each_entry(macsec, &rxd->secys, secys) { in create_rx_sc()
1432 if (find_rx_sc_rtnl(&macsec->secy, sci)) in create_rx_sc()
2578 static bool macsec_is_configured(struct macsec_dev *macsec) in macsec_is_configured() argument
2580 struct macsec_secy *secy = &macsec->secy; in macsec_is_configured()
2599 struct macsec_dev *macsec; in macsec_update_offload() local
2602 macsec = macsec_priv(dev); in macsec_update_offload()
2606 !macsec_check_offload(offload, macsec)) in macsec_update_offload()
2616 if (macsec_is_configured(macsec)) in macsec_update_offload()
2619 prev_offload = macsec->offload; in macsec_update_offload()
2622 macsec, &ctx); in macsec_update_offload()
2626 macsec->offload = offload; in macsec_update_offload()
2628 ctx.secy = &macsec->secy; in macsec_update_offload()
2632 macsec->offload = prev_offload; in macsec_update_offload()
2642 struct macsec_dev *macsec; in macsec_upd_offload() local
2664 macsec = macsec_priv(dev); in macsec_upd_offload()
2673 if (macsec->offload != offload) in macsec_upd_offload()
2684 struct macsec_dev *macsec = macsec_priv(dev); in get_tx_sa_stats() local
2688 if (macsec_is_offloaded(macsec)) { in get_tx_sa_stats()
2692 ops = macsec_get_ops(macsec, &ctx); in get_tx_sa_stats()
2728 struct macsec_dev *macsec = macsec_priv(dev); in get_rx_sa_stats() local
2732 if (macsec_is_offloaded(macsec)) { in get_rx_sa_stats()
2736 ops = macsec_get_ops(macsec, &ctx); in get_rx_sa_stats()
2781 struct macsec_dev *macsec = macsec_priv(dev); in get_rx_sc_stats() local
2785 if (macsec_is_offloaded(macsec)) { in get_rx_sc_stats()
2789 ops = macsec_get_ops(macsec, &ctx); in get_rx_sc_stats()
2863 struct macsec_dev *macsec = macsec_priv(dev); in get_tx_sc_stats() local
2867 if (macsec_is_offloaded(macsec)) { in get_tx_sc_stats()
2871 ops = macsec_get_ops(macsec, &ctx); in get_tx_sc_stats()
2919 struct macsec_dev *macsec = macsec_priv(dev); in get_secy_stats() local
2923 if (macsec_is_offloaded(macsec)) { in get_secy_stats()
2927 ops = macsec_get_ops(macsec, &ctx); in get_secy_stats()
3047 struct macsec_dev *macsec = netdev_priv(dev); in dump_secy() local
3069 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) in dump_secy()
3393 struct macsec_dev *macsec = netdev_priv(dev); in macsec_start_xmit() local
3394 struct macsec_secy *secy = &macsec->secy; in macsec_start_xmit()
3404 skb->dev = macsec->real_dev; in macsec_start_xmit()
3410 secy_stats = this_cpu_ptr(macsec->stats); in macsec_start_xmit()
3414 skb->dev = macsec->real_dev; in macsec_start_xmit()
3435 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); in macsec_start_xmit()
3448 struct macsec_dev *macsec = macsec_priv(dev); in macsec_dev_init() local
3449 struct net_device *real_dev = macsec->real_dev; in macsec_dev_init()
3456 err = gro_cells_init(&macsec->gro_cells, dev); in macsec_dev_init()
3476 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); in macsec_dev_init()
3483 struct macsec_dev *macsec = macsec_priv(dev); in macsec_dev_uninit() local
3485 gro_cells_destroy(&macsec->gro_cells); in macsec_dev_uninit()
3492 struct macsec_dev *macsec = macsec_priv(dev); in macsec_fix_features() local
3493 struct net_device *real_dev = macsec->real_dev; in macsec_fix_features()
3504 struct macsec_dev *macsec = macsec_priv(dev); in macsec_dev_open() local
3505 struct net_device *real_dev = macsec->real_dev; in macsec_dev_open()
3525 if (macsec_is_offloaded(macsec)) { in macsec_dev_open()
3535 ctx.secy = &macsec->secy; in macsec_dev_open()
3556 struct macsec_dev *macsec = macsec_priv(dev); in macsec_dev_stop() local
3557 struct net_device *real_dev = macsec->real_dev; in macsec_dev_stop()
3562 if (macsec_is_offloaded(macsec)) { in macsec_dev_stop()
3566 ops = macsec_get_ops(macsec, &ctx); in macsec_dev_stop()
3568 ctx.secy = &macsec->secy; in macsec_dev_stop()
3612 struct macsec_dev *macsec = macsec_priv(dev); in macsec_set_mac_address() local
3613 struct net_device *real_dev = macsec->real_dev; in macsec_set_mac_address()
3633 if (macsec_is_offloaded(macsec)) { in macsec_set_mac_address()
3637 ops = macsec_get_ops(macsec, &ctx); in macsec_set_mac_address()
3639 ctx.secy = &macsec->secy; in macsec_set_mac_address()
3649 struct macsec_dev *macsec = macsec_priv(dev); in macsec_change_mtu() local
3650 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); in macsec_change_mtu()
3652 if (macsec->real_dev->mtu - extra < new_mtu) in macsec_change_mtu()
3716 struct macsec_dev *macsec = macsec_priv(dev); in macsec_free_netdev() local
3718 dst_release(&macsec->secy.tx_sc.md_dst->dst); in macsec_free_netdev()
3719 free_percpu(macsec->stats); in macsec_free_netdev()
3720 free_percpu(macsec->secy.tx_sc.stats); in macsec_free_netdev()
3723 netdev_put(macsec->real_dev, &macsec->dev_tracker); in macsec_free_netdev()
3820 struct macsec_dev *macsec = macsec_priv(dev); in macsec_changelink() local
3839 memcpy(&secy, &macsec->secy, sizeof(secy)); in macsec_changelink()
3840 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); in macsec_changelink()
3848 if (macsec->offload != offload) { in macsec_changelink()
3857 if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { in macsec_changelink()
3867 ctx.secy = &macsec->secy; in macsec_changelink()
3876 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); in macsec_changelink()
3877 memcpy(&macsec->secy, &secy, sizeof(secy)); in macsec_changelink()
3882 static void macsec_del_dev(struct macsec_dev *macsec) in macsec_del_dev() argument
3886 while (macsec->secy.rx_sc) { in macsec_del_dev()
3887 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); in macsec_del_dev()
3889 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); in macsec_del_dev()
3894 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); in macsec_del_dev()
3897 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); in macsec_del_dev()
3905 struct macsec_dev *macsec = macsec_priv(dev); in macsec_common_dellink() local
3906 struct net_device *real_dev = macsec->real_dev; in macsec_common_dellink()
3909 if (macsec_is_offloaded(macsec)) { in macsec_common_dellink()
3915 ctx.secy = &macsec->secy; in macsec_common_dellink()
3921 list_del_rcu(&macsec->secys); in macsec_common_dellink()
3922 macsec_del_dev(macsec); in macsec_common_dellink()
3930 struct macsec_dev *macsec = macsec_priv(dev); in macsec_dellink() local
3931 struct net_device *real_dev = macsec->real_dev; in macsec_dellink()
3945 struct macsec_dev *macsec = macsec_priv(dev); in register_macsec_dev() local
3965 list_add_tail_rcu(&macsec->secys, &rxd->secys); in register_macsec_dev()
3972 struct macsec_dev *macsec; in sci_exists() local
3974 list_for_each_entry(macsec, &rxd->secys, secys) { in sci_exists()
3975 if (macsec->secy.sci == sci) in sci_exists()
3989 struct macsec_dev *macsec = macsec_priv(dev); in macsec_add_dev() local
3990 struct macsec_secy *secy = &macsec->secy; in macsec_add_dev()
3992 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); in macsec_add_dev()
3993 if (!macsec->stats) in macsec_add_dev()
4037 struct macsec_dev *macsec = macsec_priv(dev); in macsec_newlink() local
4054 macsec->real_dev = real_dev; in macsec_newlink()
4057 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); in macsec_newlink()
4060 macsec->offload = MACSEC_OFFLOAD_OFF; in macsec_newlink()
4063 if (macsec->offload != MACSEC_OFFLOAD_OFF && in macsec_newlink()
4064 !macsec_check_offload(macsec->offload, macsec)) in macsec_newlink()
4126 if (macsec_is_offloaded(macsec)) { in macsec_newlink()
4130 ops = macsec_get_ops(macsec, &ctx); in macsec_newlink()
4132 ctx.secy = &macsec->secy; in macsec_newlink()
4151 macsec_del_dev(macsec); in macsec_newlink()
4274 struct macsec_dev *macsec; in macsec_fill_info() local
4278 macsec = macsec_priv(dev); in macsec_fill_info()
4279 secy = &macsec->secy; in macsec_fill_info()
4306 nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || in macsec_fill_info()