1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
3
4 #include "mlx5_core.h"
5 #include "en.h"
6 #include "ipsec.h"
7 #include "lib/crypto.h"
8 #include "fs_core.h"
9 #include "eswitch.h"
10
11 enum {
12 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
13 MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET,
14 };
15
mlx5_ipsec_device_caps(struct mlx5_core_dev * mdev)16 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
17 {
18 u32 caps = 0;
19
20 if (!MLX5_CAP_GEN(mdev, ipsec_offload))
21 return 0;
22
23 if (!MLX5_CAP_GEN(mdev, log_max_dek))
24 return 0;
25
26 if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
27 MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
28 return 0;
29
30 if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
31 !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
32 return 0;
33
34 if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
35 !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
36 return 0;
37
38 if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
39 MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
40 caps |= MLX5_IPSEC_CAP_CRYPTO;
41
42 if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
43 (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
44 (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS &&
45 is_mdev_legacy_mode(mdev)))) {
46 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
47 reformat_add_esp_trasport) &&
48 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
49 reformat_del_esp_trasport) &&
50 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
51 caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
52
53 if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
54 ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
55 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
56 MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
57 caps |= MLX5_IPSEC_CAP_PRIO;
58
59 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
60 reformat_l2_to_l3_esp_tunnel) &&
61 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
62 reformat_l3_esp_tunnel_to_l2))
63 caps |= MLX5_IPSEC_CAP_TUNNEL;
64
65 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
66 reformat_add_esp_transport_over_udp) &&
67 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
68 reformat_del_esp_transport_over_udp))
69 caps |= MLX5_IPSEC_CAP_ESPINUDP;
70 }
71
72 if (mlx5_get_roce_state(mdev) &&
73 MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
74 MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
75 caps |= MLX5_IPSEC_CAP_ROCE;
76
77 if (!caps)
78 return 0;
79
80 if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
81 caps |= MLX5_IPSEC_CAP_ESN;
82
83 /* We can accommodate up to 2^24 different IPsec objects
84 * because we use up to 24 bit in flow table metadata
85 * to hold the IPsec Object unique handle.
86 */
87 WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
88 return caps;
89 }
90 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
91
mlx5e_ipsec_packet_setup(void * obj,u32 pdn,struct mlx5_accel_esp_xfrm_attrs * attrs)92 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
93 struct mlx5_accel_esp_xfrm_attrs *attrs)
94 {
95 void *aso_ctx;
96
97 aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
98 if (attrs->replay_esn.trigger) {
99 MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
100
101 if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
102 MLX5_SET(ipsec_aso, aso_ctx, window_sz,
103 attrs->replay_esn.replay_window);
104 MLX5_SET(ipsec_aso, aso_ctx, mode,
105 MLX5_IPSEC_ASO_REPLAY_PROTECTION);
106 }
107 MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
108 attrs->replay_esn.esn);
109 }
110
111 /* ASO context */
112 MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
113 MLX5_SET(ipsec_obj, obj, full_offload, 1);
114 MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
115 /* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
116 * in flow steering to perform matching against. Please be
117 * aware that this register was chosen arbitrary and can't
118 * be used in other places as long as IPsec packet offload
119 * active.
120 */
121 MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
122 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
123 MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
124
125 if (attrs->lft.hard_packet_limit != XFRM_INF) {
126 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
127 attrs->lft.hard_packet_limit);
128 MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
129 }
130
131 if (attrs->lft.soft_packet_limit != XFRM_INF) {
132 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
133 attrs->lft.soft_packet_limit);
134
135 MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
136 }
137 }
138
mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)139 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
140 {
141 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
142 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
143 struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
144 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
145 u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
146 void *obj, *salt_p, *salt_iv_p;
147 struct mlx5e_hw_objs *res;
148 int err;
149
150 obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
151
152 /* salt and seq_iv */
153 salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
154 memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
155
156 MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
157 salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
158 memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
159 /* esn */
160 if (attrs->replay_esn.trigger) {
161 MLX5_SET(ipsec_obj, obj, esn_en, 1);
162 MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
163 MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
164 }
165
166 MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
167
168 /* general object fields set */
169 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
170 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
171 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
172 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
173
174 res = &mdev->mlx5e_res.hw_objs;
175 if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
176 mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
177
178 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
179 if (!err)
180 sa_entry->ipsec_obj_id =
181 MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
182
183 return err;
184 }
185
mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)186 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
187 {
188 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
189 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
190 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
191
192 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
193 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
194 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
195 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
196 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
197
198 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
199 }
200
mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)201 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
202 {
203 struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
204 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
205 int err;
206
207 /* key */
208 err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
209 aes_gcm->key_len / BITS_PER_BYTE,
210 MLX5_ACCEL_OBJ_IPSEC_KEY,
211 &sa_entry->enc_key_id);
212 if (err) {
213 mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
214 return err;
215 }
216
217 err = mlx5_create_ipsec_obj(sa_entry);
218 if (err) {
219 mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
220 goto err_enc_key;
221 }
222
223 return 0;
224
225 err_enc_key:
226 mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
227 return err;
228 }
229
mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)230 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
231 {
232 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
233
234 mlx5_destroy_ipsec_obj(sa_entry);
235 mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
236 }
237
mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)238 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
239 const struct mlx5_accel_esp_xfrm_attrs *attrs)
240 {
241 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
242 u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
243 u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
244 u64 modify_field_select = 0;
245 u64 general_obj_types;
246 void *obj;
247 int err;
248
249 general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
250 if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
251 return -EINVAL;
252
253 /* general object fields set */
254 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
255 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
256 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
257 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
258 if (err) {
259 mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
260 sa_entry->ipsec_obj_id, err);
261 return err;
262 }
263
264 obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
265 modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
266
267 /* esn */
268 if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
269 !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
270 return -EOPNOTSUPP;
271
272 obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
273 MLX5_SET64(ipsec_obj, obj, modify_field_select,
274 MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
275 MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
276 MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
277 MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
278
279 /* general object fields set */
280 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
281
282 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
283 }
284
mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)285 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
286 const struct mlx5_accel_esp_xfrm_attrs *attrs)
287 {
288 int err;
289
290 err = mlx5_modify_ipsec_obj(sa_entry, attrs);
291 if (err)
292 return;
293
294 memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
295 }
296
mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)297 static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
298 struct mlx5_wqe_aso_ctrl_seg *data)
299 {
300 data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
301 data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
302 MLX5_ASO_ALWAYS_TRUE << 4;
303
304 mlx5e_ipsec_aso_query(sa_entry, data);
305 }
306
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry,u32 mode_param)307 static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
308 u32 mode_param)
309 {
310 struct mlx5_accel_esp_xfrm_attrs attrs = {};
311 struct mlx5_wqe_aso_ctrl_seg data = {};
312
313 if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
314 sa_entry->esn_state.esn_msb++;
315 sa_entry->esn_state.overlap = 0;
316 } else {
317 sa_entry->esn_state.overlap = 1;
318 }
319
320 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
321
322 /* It is safe to execute the modify below unlocked since the only flows
323 * that could affect this HW object, are create, destroy and this work.
324 *
325 * Creation flow can't co-exist with this modify work, the destruction
326 * flow would cancel this work, and this work is a single entity that
327 * can't conflict with it self.
328 */
329 spin_unlock_bh(&sa_entry->x->lock);
330 mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
331 spin_lock_bh(&sa_entry->x->lock);
332
333 data.data_offset_condition_operand =
334 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
335 data.bitwise_data = cpu_to_be64(BIT_ULL(54));
336 data.data_mask = data.bitwise_data;
337
338 mlx5e_ipsec_aso_update(sa_entry, &data);
339 }
340
mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry * sa_entry)341 static void mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry *sa_entry)
342 {
343 struct mlx5_wqe_aso_ctrl_seg data = {};
344
345 data.data_offset_condition_operand =
346 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
347 data.bitwise_data = cpu_to_be64(BIT_ULL(57) + BIT_ULL(31));
348 data.data_mask = data.bitwise_data;
349 mlx5e_ipsec_aso_update(sa_entry, &data);
350 }
351
mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry * sa_entry,u32 val)352 static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry,
353 u32 val)
354 {
355 struct mlx5_wqe_aso_ctrl_seg data = {};
356
357 data.data_offset_condition_operand =
358 MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET;
359 data.bitwise_data = cpu_to_be64(val);
360 data.data_mask = cpu_to_be64(U32_MAX);
361 mlx5e_ipsec_aso_update(sa_entry, &data);
362 }
363
mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry * sa_entry)364 static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry)
365 {
366 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
367 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
368 struct mlx5e_ipsec_aso *aso = ipsec->aso;
369 bool soft_arm, hard_arm;
370 u64 hard_cnt;
371
372 lockdep_assert_held(&sa_entry->x->lock);
373
374 soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm);
375 hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm);
376 if (!soft_arm && !hard_arm)
377 /* It is not lifetime event */
378 return;
379
380 hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
381 if (!hard_cnt || hard_arm) {
382 /* It is possible to see packet counter equal to zero without
383 * hard limit event armed. Such situation can be if packet
384 * decreased, while we handled soft limit event.
385 *
386 * However it will be HW/FW bug if hard limit event is raised
387 * and packet counter is not zero.
388 */
389 WARN_ON_ONCE(hard_arm && hard_cnt);
390
391 /* Notify about hard limit */
392 xfrm_state_check_expire(sa_entry->x);
393 return;
394 }
395
396 /* We are in soft limit event. */
397 if (!sa_entry->limits.soft_limit_hit &&
398 sa_entry->limits.round == attrs->lft.numb_rounds_soft) {
399 sa_entry->limits.soft_limit_hit = true;
400 /* Notify about soft limit */
401 xfrm_state_check_expire(sa_entry->x);
402
403 if (sa_entry->limits.round == attrs->lft.numb_rounds_hard)
404 goto hard;
405
406 if (attrs->lft.soft_packet_limit > BIT_ULL(31)) {
407 /* We cannot avoid a soft_value that might have the high
408 * bit set. For instance soft_value=2^31+1 cannot be
409 * adjusted to the low bit clear version of soft_value=1
410 * because it is too close to 0.
411 *
412 * Thus we have this corner case where we can hit the
413 * soft_limit with the high bit set, but cannot adjust
414 * the counter. Thus we set a temporary interrupt_value
415 * at least 2^30 away from here and do the adjustment
416 * then.
417 */
418 mlx5e_ipsec_aso_update_soft(sa_entry,
419 BIT_ULL(31) - BIT_ULL(30));
420 sa_entry->limits.fix_limit = true;
421 return;
422 }
423
424 sa_entry->limits.fix_limit = true;
425 }
426
427 hard:
428 if (sa_entry->limits.round == attrs->lft.numb_rounds_hard) {
429 mlx5e_ipsec_aso_update_soft(sa_entry, 0);
430 attrs->lft.soft_packet_limit = XFRM_INF;
431 return;
432 }
433
434 mlx5e_ipsec_aso_update_hard(sa_entry);
435 sa_entry->limits.round++;
436 if (sa_entry->limits.round == attrs->lft.numb_rounds_soft)
437 mlx5e_ipsec_aso_update_soft(sa_entry,
438 attrs->lft.soft_packet_limit);
439 if (sa_entry->limits.fix_limit) {
440 sa_entry->limits.fix_limit = false;
441 mlx5e_ipsec_aso_update_soft(sa_entry, BIT_ULL(31) - 1);
442 }
443 }
444
mlx5e_ipsec_handle_event(struct work_struct * _work)445 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
446 {
447 struct mlx5e_ipsec_work *work =
448 container_of(_work, struct mlx5e_ipsec_work, work);
449 struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
450 struct mlx5_accel_esp_xfrm_attrs *attrs;
451 struct mlx5e_ipsec_aso *aso;
452 int ret;
453
454 aso = sa_entry->ipsec->aso;
455 attrs = &sa_entry->attrs;
456
457 spin_lock_bh(&sa_entry->x->lock);
458 ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
459 if (ret)
460 goto unlock;
461
462 if (attrs->replay_esn.trigger &&
463 !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
464 u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
465
466 mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
467 }
468
469 if (attrs->lft.soft_packet_limit != XFRM_INF)
470 mlx5e_ipsec_handle_limits(sa_entry);
471
472 unlock:
473 spin_unlock_bh(&sa_entry->x->lock);
474 kfree(work);
475 }
476
mlx5e_ipsec_event(struct notifier_block * nb,unsigned long event,void * data)477 static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
478 void *data)
479 {
480 struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
481 struct mlx5e_ipsec_sa_entry *sa_entry;
482 struct mlx5_eqe_obj_change *object;
483 struct mlx5e_ipsec_work *work;
484 struct mlx5_eqe *eqe = data;
485 u16 type;
486
487 if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
488 return NOTIFY_DONE;
489
490 object = &eqe->data.obj_change;
491 type = be16_to_cpu(object->obj_type);
492
493 if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
494 return NOTIFY_DONE;
495
496 sa_entry = xa_load(&ipsec->sadb, be32_to_cpu(object->obj_id));
497 if (!sa_entry)
498 return NOTIFY_DONE;
499
500 work = kmalloc(sizeof(*work), GFP_ATOMIC);
501 if (!work)
502 return NOTIFY_DONE;
503
504 INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
505 work->data = sa_entry;
506
507 queue_work(ipsec->wq, &work->work);
508 return NOTIFY_OK;
509 }
510
mlx5e_ipsec_aso_init(struct mlx5e_ipsec * ipsec)511 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
512 {
513 struct mlx5_core_dev *mdev = ipsec->mdev;
514 struct mlx5e_ipsec_aso *aso;
515 struct mlx5e_hw_objs *res;
516 struct device *pdev;
517 int err;
518
519 aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
520 if (!aso)
521 return -ENOMEM;
522
523 res = &mdev->mlx5e_res.hw_objs;
524
525 pdev = mlx5_core_dma_dev(mdev);
526 aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
527 DMA_BIDIRECTIONAL);
528 err = dma_mapping_error(pdev, aso->dma_addr);
529 if (err)
530 goto err_dma;
531
532 aso->aso = mlx5_aso_create(mdev, res->pdn);
533 if (IS_ERR(aso->aso)) {
534 err = PTR_ERR(aso->aso);
535 goto err_aso_create;
536 }
537
538 spin_lock_init(&aso->lock);
539 ipsec->nb.notifier_call = mlx5e_ipsec_event;
540 mlx5_notifier_register(mdev, &ipsec->nb);
541
542 ipsec->aso = aso;
543 return 0;
544
545 err_aso_create:
546 dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
547 DMA_BIDIRECTIONAL);
548 err_dma:
549 kfree(aso);
550 return err;
551 }
552
mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec * ipsec)553 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
554 {
555 struct mlx5_core_dev *mdev = ipsec->mdev;
556 struct mlx5e_ipsec_aso *aso;
557 struct device *pdev;
558
559 aso = ipsec->aso;
560 pdev = mlx5_core_dma_dev(mdev);
561
562 mlx5_notifier_unregister(mdev, &ipsec->nb);
563 mlx5_aso_destroy(aso->aso);
564 dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
565 DMA_BIDIRECTIONAL);
566 kfree(aso);
567 ipsec->aso = NULL;
568 }
569
mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg * ctrl,struct mlx5_wqe_aso_ctrl_seg * data)570 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
571 struct mlx5_wqe_aso_ctrl_seg *data)
572 {
573 if (!data)
574 return;
575
576 ctrl->data_mask_mode = data->data_mask_mode;
577 ctrl->condition_1_0_operand = data->condition_1_0_operand;
578 ctrl->condition_1_0_offset = data->condition_1_0_offset;
579 ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
580 ctrl->condition_0_data = data->condition_0_data;
581 ctrl->condition_0_mask = data->condition_0_mask;
582 ctrl->condition_1_data = data->condition_1_data;
583 ctrl->condition_1_mask = data->condition_1_mask;
584 ctrl->bitwise_data = data->bitwise_data;
585 ctrl->data_mask = data->data_mask;
586 }
587
mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)588 int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
589 struct mlx5_wqe_aso_ctrl_seg *data)
590 {
591 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
592 struct mlx5e_ipsec_aso *aso = ipsec->aso;
593 struct mlx5_core_dev *mdev = ipsec->mdev;
594 struct mlx5_wqe_aso_ctrl_seg *ctrl;
595 struct mlx5e_hw_objs *res;
596 struct mlx5_aso_wqe *wqe;
597 unsigned long expires;
598 u8 ds_cnt;
599 int ret;
600
601 lockdep_assert_held(&sa_entry->x->lock);
602 res = &mdev->mlx5e_res.hw_objs;
603
604 spin_lock_bh(&aso->lock);
605 memset(aso->ctx, 0, sizeof(aso->ctx));
606 wqe = mlx5_aso_get_wqe(aso->aso);
607 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
608 mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
609 MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
610
611 ctrl = &wqe->aso_ctrl;
612 ctrl->va_l =
613 cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
614 ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
615 ctrl->l_key = cpu_to_be32(res->mkey);
616 mlx5e_ipsec_aso_copy(ctrl, data);
617
618 mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
619 expires = jiffies + msecs_to_jiffies(10);
620 do {
621 ret = mlx5_aso_poll_cq(aso->aso, false);
622 if (ret)
623 /* We are in atomic context */
624 udelay(10);
625 } while (ret && time_is_after_jiffies(expires));
626 spin_unlock_bh(&aso->lock);
627 return ret;
628 }
629