1 /*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #ifndef __MLX5E_EN_ACCEL_H__
35 #define __MLX5E_EN_ACCEL_H__
36
37 #include <linux/skbuff.h>
38 #include <linux/netdevice.h>
39 #include "en_accel/ipsec_rxtx.h"
40 #include "en_accel/ktls.h"
41 #include "en_accel/ktls_txrx.h"
42 #include <en_accel/macsec.h>
43 #include "en.h"
44 #include "en/txrx.h"
45
46 #if IS_ENABLED(CONFIG_GENEVE)
47 #include <net/geneve.h>
48
mlx5_geneve_tx_allowed(struct mlx5_core_dev * mdev)49 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
50 {
51 return mlx5_tx_swp_supported(mdev);
52 }
53
54 static inline void
mlx5e_tx_tunnel_accel(struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg,u16 ihs)55 mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
56 {
57 struct mlx5e_swp_spec swp_spec = {};
58 unsigned int offset = 0;
59 __be16 l3_proto;
60 u8 l4_proto;
61
62 l3_proto = vlan_get_protocol(skb);
63 switch (l3_proto) {
64 case htons(ETH_P_IP):
65 l4_proto = ip_hdr(skb)->protocol;
66 break;
67 case htons(ETH_P_IPV6):
68 l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
69 break;
70 default:
71 return;
72 }
73
74 if (l4_proto != IPPROTO_UDP ||
75 udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
76 return;
77 swp_spec.l3_proto = l3_proto;
78 swp_spec.l4_proto = l4_proto;
79 swp_spec.is_tun = true;
80 if (inner_ip_hdr(skb)->version == 6) {
81 swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
82 swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
83 } else {
84 swp_spec.tun_l3_proto = htons(ETH_P_IP);
85 swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
86 }
87
88 mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
89 if (skb_vlan_tag_present(skb) && ihs)
90 mlx5e_eseg_swp_offsets_add_vlan(eseg);
91 }
92
93 #else
mlx5_geneve_tx_allowed(struct mlx5_core_dev * mdev)94 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
95 {
96 return false;
97 }
98
99 #endif /* CONFIG_GENEVE */
100
101 static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff * skb)102 mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
103 {
104 int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
105 struct udphdr *udphdr;
106
107 if (skb->encapsulation)
108 udphdr = (struct udphdr *)skb_inner_transport_header(skb);
109 else
110 udphdr = udp_hdr(skb);
111
112 udphdr->len = htons(payload_len);
113 }
114
115 struct mlx5e_accel_tx_state {
116 #ifdef CONFIG_MLX5_EN_TLS
117 struct mlx5e_accel_tx_tls_state tls;
118 #endif
119 #ifdef CONFIG_MLX5_EN_IPSEC
120 struct mlx5e_accel_tx_ipsec_state ipsec;
121 #endif
122 };
123
mlx5e_accel_tx_begin(struct net_device * dev,struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_accel_tx_state * state)124 static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
125 struct mlx5e_txqsq *sq,
126 struct sk_buff *skb,
127 struct mlx5e_accel_tx_state *state)
128 {
129 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
130 mlx5e_udp_gso_handle_tx_skb(skb);
131
132 #ifdef CONFIG_MLX5_EN_TLS
133 /* May send WQEs. */
134 if (tls_is_skb_tx_device_offloaded(skb))
135 if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
136 &state->tls)))
137 return false;
138 #endif
139
140 #ifdef CONFIG_MLX5_EN_IPSEC
141 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
142 if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
143 return false;
144 }
145 #endif
146
147 #ifdef CONFIG_MLX5_MACSEC
148 if (unlikely(mlx5e_macsec_skb_is_offload(skb))) {
149 struct mlx5e_priv *priv = netdev_priv(dev);
150
151 if (unlikely(!mlx5e_macsec_handle_tx_skb(priv->macsec, skb)))
152 return false;
153 }
154 #endif
155
156 return true;
157 }
158
mlx5e_accel_tx_ids_len(struct mlx5e_txqsq * sq,struct mlx5e_accel_tx_state * state)159 static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
160 struct mlx5e_accel_tx_state *state)
161 {
162 #ifdef CONFIG_MLX5_EN_IPSEC
163 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
164 return mlx5e_ipsec_tx_ids_len(&state->ipsec);
165 #endif
166
167 return 0;
168 }
169
170 /* Part of the eseg touched by TX offloads */
171 #define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
172
mlx5e_accel_tx_eseg(struct mlx5e_priv * priv,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg,u16 ihs)173 static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
174 struct sk_buff *skb,
175 struct mlx5_wqe_eth_seg *eseg, u16 ihs)
176 {
177 #ifdef CONFIG_MLX5_EN_IPSEC
178 if (xfrm_offload(skb))
179 mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
180 #endif
181
182 #ifdef CONFIG_MLX5_MACSEC
183 if (unlikely(mlx5e_macsec_skb_is_offload(skb)))
184 mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg);
185 #endif
186
187 #if IS_ENABLED(CONFIG_GENEVE)
188 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
189 mlx5e_tx_tunnel_accel(skb, eseg, ihs);
190 #endif
191 }
192
mlx5e_accel_tx_finish(struct mlx5e_txqsq * sq,struct mlx5e_tx_wqe * wqe,struct mlx5e_accel_tx_state * state,struct mlx5_wqe_inline_seg * inlseg)193 static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
194 struct mlx5e_tx_wqe *wqe,
195 struct mlx5e_accel_tx_state *state,
196 struct mlx5_wqe_inline_seg *inlseg)
197 {
198 #ifdef CONFIG_MLX5_EN_TLS
199 mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
200 #endif
201
202 #ifdef CONFIG_MLX5_EN_IPSEC
203 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
204 state->ipsec.xo && state->ipsec.tailen)
205 mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
206 #endif
207 }
208
mlx5e_accel_init_rx(struct mlx5e_priv * priv)209 static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
210 {
211 return mlx5e_ktls_init_rx(priv);
212 }
213
mlx5e_accel_cleanup_rx(struct mlx5e_priv * priv)214 static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
215 {
216 mlx5e_ktls_cleanup_rx(priv);
217 }
218
mlx5e_accel_init_tx(struct mlx5e_priv * priv)219 static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
220 {
221 return mlx5e_ktls_init_tx(priv);
222 }
223
mlx5e_accel_cleanup_tx(struct mlx5e_priv * priv)224 static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
225 {
226 mlx5e_ktls_cleanup_tx(priv);
227 }
228 #endif /* __MLX5E_EN_ACCEL_H__ */
229