1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
3 
4 #include "mlx5_core.h"
5 #include "en.h"
6 #include "ipsec.h"
7 #include "lib/mlx5.h"
8 
9 enum {
10 	MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
11 };
12 
13 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
14 {
15 	u32 caps = 0;
16 
17 	if (!MLX5_CAP_GEN(mdev, ipsec_offload))
18 		return 0;
19 
20 	if (!MLX5_CAP_GEN(mdev, log_max_dek))
21 		return 0;
22 
23 	if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
24 	    MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
25 		return 0;
26 
27 	if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
28 	    !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
29 		return 0;
30 
31 	if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
32 	    !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
33 		return 0;
34 
35 	if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
36 	    MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
37 		caps |= MLX5_IPSEC_CAP_CRYPTO;
38 
39 	if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
40 	    MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) &&
41 	    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) &&
42 	    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
43 		caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
44 
45 	if (!caps)
46 		return 0;
47 
48 	if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
49 		caps |= MLX5_IPSEC_CAP_ESN;
50 
51 	/* We can accommodate up to 2^24 different IPsec objects
52 	 * because we use up to 24 bit in flow table metadata
53 	 * to hold the IPsec Object unique handle.
54 	 */
55 	WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
56 	return caps;
57 }
58 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
59 
60 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
61 				     struct mlx5_accel_esp_xfrm_attrs *attrs)
62 {
63 	void *aso_ctx;
64 
65 	aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
66 	if (attrs->esn_trigger) {
67 		MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
68 
69 		if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
70 			MLX5_SET(ipsec_aso, aso_ctx, window_sz,
71 				 attrs->replay_window / 64);
72 			MLX5_SET(ipsec_aso, aso_ctx, mode,
73 				 MLX5_IPSEC_ASO_REPLAY_PROTECTION);
74 			}
75 	}
76 
77 	/* ASO context */
78 	MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
79 	MLX5_SET(ipsec_obj, obj, full_offload, 1);
80 	MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
81 	/* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
82 	 * in flow steering to perform matching against. Please be
83 	 * aware that this register was chosen arbitrary and can't
84 	 * be used in other places as long as IPsec packet offload
85 	 * active.
86 	 */
87 	MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
88 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
89 		MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
90 
91 	if (attrs->hard_packet_limit != XFRM_INF) {
92 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
93 			 lower_32_bits(attrs->hard_packet_limit));
94 		MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
95 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
96 	}
97 
98 	if (attrs->soft_packet_limit != XFRM_INF) {
99 		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
100 			 lower_32_bits(attrs->soft_packet_limit));
101 
102 		MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
103 	}
104 }
105 
106 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
107 {
108 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
109 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
110 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
111 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
112 	u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
113 	void *obj, *salt_p, *salt_iv_p;
114 	struct mlx5e_hw_objs *res;
115 	int err;
116 
117 	obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
118 
119 	/* salt and seq_iv */
120 	salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
121 	memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
122 
123 	MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
124 	salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
125 	memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
126 	/* esn */
127 	if (attrs->esn_trigger) {
128 		MLX5_SET(ipsec_obj, obj, esn_en, 1);
129 		MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
130 		MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
131 	}
132 
133 	MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
134 
135 	/* general object fields set */
136 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
137 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
138 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
139 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
140 
141 	res = &mdev->mlx5e_res.hw_objs;
142 	if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
143 		mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
144 
145 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
146 	if (!err)
147 		sa_entry->ipsec_obj_id =
148 			MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
149 
150 	return err;
151 }
152 
153 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
154 {
155 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
156 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
157 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
158 
159 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
160 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
161 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
162 		 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
163 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
164 
165 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
166 }
167 
168 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
169 {
170 	struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
171 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
172 	int err;
173 
174 	/* key */
175 	err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
176 					 aes_gcm->key_len / BITS_PER_BYTE,
177 					 MLX5_ACCEL_OBJ_IPSEC_KEY,
178 					 &sa_entry->enc_key_id);
179 	if (err) {
180 		mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
181 		return err;
182 	}
183 
184 	err = mlx5_create_ipsec_obj(sa_entry);
185 	if (err) {
186 		mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
187 		goto err_enc_key;
188 	}
189 
190 	return 0;
191 
192 err_enc_key:
193 	mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
194 	return err;
195 }
196 
197 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
198 {
199 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
200 
201 	mlx5_destroy_ipsec_obj(sa_entry);
202 	mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
203 }
204 
205 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
206 				 const struct mlx5_accel_esp_xfrm_attrs *attrs)
207 {
208 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
209 	u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
210 	u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
211 	u64 modify_field_select = 0;
212 	u64 general_obj_types;
213 	void *obj;
214 	int err;
215 
216 	if (!attrs->esn_trigger)
217 		return 0;
218 
219 	general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
220 	if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
221 		return -EINVAL;
222 
223 	/* general object fields set */
224 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
225 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
226 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
227 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
228 	if (err) {
229 		mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
230 			      sa_entry->ipsec_obj_id, err);
231 		return err;
232 	}
233 
234 	obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
235 	modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
236 
237 	/* esn */
238 	if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
239 	    !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
240 		return -EOPNOTSUPP;
241 
242 	obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
243 	MLX5_SET64(ipsec_obj, obj, modify_field_select,
244 		   MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
245 			   MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
246 	MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
247 	MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
248 
249 	/* general object fields set */
250 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
251 
252 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
253 }
254 
255 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
256 				const struct mlx5_accel_esp_xfrm_attrs *attrs)
257 {
258 	int err;
259 
260 	err = mlx5_modify_ipsec_obj(sa_entry, attrs);
261 	if (err)
262 		return;
263 
264 	memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
265 }
266 
267 static void
268 mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry,
269 			   const struct mlx5_accel_esp_xfrm_attrs *attrs)
270 {
271 	struct mlx5_wqe_aso_ctrl_seg data = {};
272 
273 	data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
274 	data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE
275 								    << 4;
276 	data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
277 	data.bitwise_data = cpu_to_be64(BIT_ULL(54));
278 	data.data_mask = data.bitwise_data;
279 
280 	mlx5e_ipsec_aso_query(sa_entry, &data);
281 }
282 
283 static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
284 					 u32 mode_param)
285 {
286 	struct mlx5_accel_esp_xfrm_attrs attrs = {};
287 
288 	if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
289 		sa_entry->esn_state.esn++;
290 		sa_entry->esn_state.overlap = 0;
291 	} else {
292 		sa_entry->esn_state.overlap = 1;
293 	}
294 
295 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
296 	mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
297 	mlx5e_ipsec_aso_update_esn(sa_entry, &attrs);
298 }
299 
300 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
301 {
302 	struct mlx5e_ipsec_work *work =
303 		container_of(_work, struct mlx5e_ipsec_work, work);
304 	struct mlx5_accel_esp_xfrm_attrs *attrs;
305 	struct mlx5e_ipsec_sa_entry *sa_entry;
306 	struct mlx5e_ipsec_aso *aso;
307 	struct mlx5e_ipsec *ipsec;
308 	int ret;
309 
310 	sa_entry = xa_load(&work->ipsec->sadb, work->id);
311 	if (!sa_entry)
312 		goto out;
313 
314 	ipsec = sa_entry->ipsec;
315 	aso = ipsec->aso;
316 	attrs = &sa_entry->attrs;
317 
318 	spin_lock(&sa_entry->x->lock);
319 	ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
320 	if (ret)
321 		goto unlock;
322 
323 	if (attrs->esn_trigger &&
324 	    !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
325 		u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
326 
327 		mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
328 	}
329 
330 	if (attrs->soft_packet_limit != XFRM_INF)
331 		if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
332 		    !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
333 		    !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
334 			xfrm_state_check_expire(sa_entry->x);
335 
336 unlock:
337 	spin_unlock(&sa_entry->x->lock);
338 out:
339 	kfree(work);
340 }
341 
342 static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
343 			     void *data)
344 {
345 	struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
346 	struct mlx5_eqe_obj_change *object;
347 	struct mlx5e_ipsec_work *work;
348 	struct mlx5_eqe *eqe = data;
349 	u16 type;
350 
351 	if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
352 		return NOTIFY_DONE;
353 
354 	object = &eqe->data.obj_change;
355 	type = be16_to_cpu(object->obj_type);
356 
357 	if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
358 		return NOTIFY_DONE;
359 
360 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
361 	if (!work)
362 		return NOTIFY_DONE;
363 
364 	INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
365 	work->ipsec = ipsec;
366 	work->id = be32_to_cpu(object->obj_id);
367 
368 	queue_work(ipsec->wq, &work->work);
369 	return NOTIFY_OK;
370 }
371 
372 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
373 {
374 	struct mlx5_core_dev *mdev = ipsec->mdev;
375 	struct mlx5e_ipsec_aso *aso;
376 	struct mlx5e_hw_objs *res;
377 	struct device *pdev;
378 	int err;
379 
380 	aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
381 	if (!aso)
382 		return -ENOMEM;
383 
384 	res = &mdev->mlx5e_res.hw_objs;
385 
386 	pdev = mlx5_core_dma_dev(mdev);
387 	aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
388 				       DMA_BIDIRECTIONAL);
389 	err = dma_mapping_error(pdev, aso->dma_addr);
390 	if (err)
391 		goto err_dma;
392 
393 	aso->aso = mlx5_aso_create(mdev, res->pdn);
394 	if (IS_ERR(aso->aso)) {
395 		err = PTR_ERR(aso->aso);
396 		goto err_aso_create;
397 	}
398 
399 	spin_lock_init(&aso->lock);
400 	ipsec->nb.notifier_call = mlx5e_ipsec_event;
401 	mlx5_notifier_register(mdev, &ipsec->nb);
402 
403 	ipsec->aso = aso;
404 	return 0;
405 
406 err_aso_create:
407 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
408 			 DMA_BIDIRECTIONAL);
409 err_dma:
410 	kfree(aso);
411 	return err;
412 }
413 
414 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
415 {
416 	struct mlx5_core_dev *mdev = ipsec->mdev;
417 	struct mlx5e_ipsec_aso *aso;
418 	struct device *pdev;
419 
420 	aso = ipsec->aso;
421 	pdev = mlx5_core_dma_dev(mdev);
422 
423 	mlx5_notifier_unregister(mdev, &ipsec->nb);
424 	mlx5_aso_destroy(aso->aso);
425 	dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
426 			 DMA_BIDIRECTIONAL);
427 	kfree(aso);
428 }
429 
430 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
431 				 struct mlx5_wqe_aso_ctrl_seg *data)
432 {
433 	if (!data)
434 		return;
435 
436 	ctrl->data_mask_mode = data->data_mask_mode;
437 	ctrl->condition_1_0_operand = data->condition_1_0_operand;
438 	ctrl->condition_1_0_offset = data->condition_1_0_offset;
439 	ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
440 	ctrl->condition_0_data = data->condition_0_data;
441 	ctrl->condition_0_mask = data->condition_0_mask;
442 	ctrl->condition_1_data = data->condition_1_data;
443 	ctrl->condition_1_mask = data->condition_1_mask;
444 	ctrl->bitwise_data = data->bitwise_data;
445 	ctrl->data_mask = data->data_mask;
446 }
447 
448 int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
449 			  struct mlx5_wqe_aso_ctrl_seg *data)
450 {
451 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
452 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
453 	struct mlx5_core_dev *mdev = ipsec->mdev;
454 	struct mlx5_wqe_aso_ctrl_seg *ctrl;
455 	struct mlx5e_hw_objs *res;
456 	struct mlx5_aso_wqe *wqe;
457 	u8 ds_cnt;
458 	int ret;
459 
460 	lockdep_assert_held(&sa_entry->x->lock);
461 	res = &mdev->mlx5e_res.hw_objs;
462 
463 	spin_lock_bh(&aso->lock);
464 	memset(aso->ctx, 0, sizeof(aso->ctx));
465 	wqe = mlx5_aso_get_wqe(aso->aso);
466 	ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
467 	mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
468 			   MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
469 
470 	ctrl = &wqe->aso_ctrl;
471 	ctrl->va_l =
472 		cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
473 	ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
474 	ctrl->l_key = cpu_to_be32(res->mkey);
475 	mlx5e_ipsec_aso_copy(ctrl, data);
476 
477 	mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
478 	ret = mlx5_aso_poll_cq(aso->aso, false);
479 	spin_unlock_bh(&aso->lock);
480 	return ret;
481 }
482 
483 void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
484 				   u64 *packets)
485 {
486 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
487 	struct mlx5e_ipsec_aso *aso = ipsec->aso;
488 	u64 hard_cnt;
489 
490 	hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
491 	/* HW decresases the limit till it reaches zero to fire an avent.
492 	 * We need to fix the calculations, so the returned count is a total
493 	 * number of passed packets and not how much left.
494 	 */
495 	*packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
496 }
497