1 /* 2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #ifndef __MLX5E_IPSEC_RXTX_H__ 35 #define __MLX5E_IPSEC_RXTX_H__ 36 37 #include <linux/skbuff.h> 38 #include <net/xfrm.h> 39 #include "en.h" 40 #include "en/txrx.h" 41 42 /* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */ 43 #define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1) 44 #define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0)) 45 #define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0)) 46 47 struct mlx5e_accel_tx_ipsec_state { 48 struct xfrm_offload *xo; 49 struct xfrm_state *x; 50 u32 tailen; 51 u32 plen; 52 }; 53 54 #ifdef CONFIG_MLX5_EN_IPSEC 55 56 struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, 57 struct sk_buff *skb, u32 *cqe_bcnt); 58 59 void mlx5e_ipsec_inverse_table_init(void); 60 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, 61 struct xfrm_offload *xo); 62 void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, 63 struct xfrm_offload *xo); 64 bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, 65 struct sk_buff *skb, 66 struct mlx5e_accel_tx_ipsec_state *ipsec_st); 67 void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe, 68 struct mlx5e_accel_tx_ipsec_state *ipsec_st, 69 struct mlx5_wqe_inline_seg *inlseg); 70 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, 71 struct sk_buff *skb, 72 struct mlx5_cqe64 *cqe); 73 static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st) 74 { 75 return ipsec_st->tailen; 76 } 77 78 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) 79 { 80 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); 81 } 82 83 static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st) 84 { 85 return ipsec_st->x; 86 } 87 88 static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg) 89 { 90 return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC); 91 } 92 93 void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb, 94 struct mlx5_wqe_eth_seg *eseg); 95 96 static inline netdev_features_t 97 mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features) 98 { 99 struct xfrm_offload *xo = xfrm_offload(skb); 100 struct sec_path *sp = skb_sec_path(skb); 101 102 if (sp && sp->len && xo) { 103 struct xfrm_state *x = sp->xvec[0]; 104 105 if (!x || !x->xso.offload_handle) 106 goto out_disable; 107 108 if (xo->inner_ipproto) { 109 /* Cannot support tunnel packet over IPsec tunnel mode 110 * because we cannot offload three IP header csum 111 */ 112 if (x->props.mode == XFRM_MODE_TUNNEL) 113 goto out_disable; 114 115 /* Only support UDP or TCP L4 checksum */ 116 if (xo->inner_ipproto != IPPROTO_UDP && 117 xo->inner_ipproto != IPPROTO_TCP) 118 goto out_disable; 119 } 120 121 return features; 122 123 } 124 125 /* Disable CSUM and GSO for software IPsec */ 126 out_disable: 127 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 128 } 129 130 static inline bool 131 mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, 132 struct mlx5_wqe_eth_seg *eseg) 133 { 134 struct xfrm_offload *xo = xfrm_offload(skb); 135 136 if (!mlx5e_ipsec_eseg_meta(eseg)) 137 return false; 138 139 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; 140 if (xo->inner_ipproto) { 141 eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM; 142 } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 143 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 144 sq->stats->csum_partial_inner++; 145 } 146 147 return true; 148 } 149 #else 150 static inline 151 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, 152 struct sk_buff *skb, 153 struct mlx5_cqe64 *cqe) 154 {} 155 156 static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg) 157 { 158 return false; 159 } 160 161 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } 162 static inline netdev_features_t 163 mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features) 164 { return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } 165 166 static inline bool 167 mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, 168 struct mlx5_wqe_eth_seg *eseg) 169 { 170 return false; 171 } 172 #endif /* CONFIG_MLX5_EN_IPSEC */ 173 174 #endif /* __MLX5E_IPSEC_RXTX_H__ */ 175