1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include "fs_core.h"
5 #include "eswitch.h"
6 #include "en_accel/ipsec.h"
7 #include "esw/ipsec_fs.h"
8 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
9 #include "en/tc_priv.h"
10 #endif
11 
12 enum {
13 	MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
14 	MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
15 	MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
16 };
17 
18 enum {
19 	MLX5_ESW_IPSEC_TX_POL_FT_LEVEL,
20 	MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL,
21 	MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
22 };
23 
mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx_create_attr * attr)24 void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
25 				       struct mlx5e_ipsec_rx_create_attr *attr)
26 {
27 	attr->prio = FDB_CRYPTO_INGRESS;
28 	attr->pol_level = MLX5_ESW_IPSEC_RX_POL_FT_LEVEL;
29 	attr->sa_level = MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL;
30 	attr->status_level = MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL;
31 	attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
32 }
33 
mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5_flow_destination * dest)34 int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
35 					   struct mlx5_flow_destination *dest)
36 {
37 	dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
38 	dest->ft = mlx5_chains_get_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
39 
40 	return 0;
41 }
42 
mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_flow_act * flow_act)43 int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
44 					  struct mlx5_flow_act *flow_act)
45 {
46 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
47 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
48 	struct mlx5_core_dev *mdev = ipsec->mdev;
49 	struct mlx5_modify_hdr *modify_hdr;
50 	u32 mapped_id;
51 	int err;
52 
53 	err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
54 			  xa_mk_value(sa_entry->ipsec_obj_id),
55 			  XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
56 	if (err)
57 		return err;
58 
59 	/* reuse tunnel bits for ipsec,
60 	 * tun_id is always 0 and tun_opts is mapped to ipsec_obj_id.
61 	 */
62 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
63 	MLX5_SET(set_action_in, action, field,
64 		 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
65 	MLX5_SET(set_action_in, action, offset, ESW_ZONE_ID_BITS);
66 	MLX5_SET(set_action_in, action, length,
67 		 ESW_TUN_ID_BITS + ESW_TUN_OPTS_BITS);
68 	MLX5_SET(set_action_in, action, data, mapped_id);
69 
70 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
71 					      1, action);
72 	if (IS_ERR(modify_hdr)) {
73 		err = PTR_ERR(modify_hdr);
74 		goto err_header_alloc;
75 	}
76 
77 	sa_entry->rx_mapped_id = mapped_id;
78 	flow_act->modify_hdr = modify_hdr;
79 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
80 
81 	return 0;
82 
83 err_header_alloc:
84 	xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
85 	return err;
86 }
87 
mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry * sa_entry)88 void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
89 {
90 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
91 
92 	if (sa_entry->rx_mapped_id)
93 		xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
94 			    sa_entry->rx_mapped_id);
95 }
96 
mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv * priv,u32 id,u32 * ipsec_obj_id)97 int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
98 					  u32 *ipsec_obj_id)
99 {
100 	struct mlx5e_ipsec *ipsec = priv->ipsec;
101 	void *val;
102 
103 	val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
104 	if (!val)
105 		return -ENOENT;
106 
107 	*ipsec_obj_id = xa_to_value(val);
108 
109 	return 0;
110 }
111 
mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx_create_attr * attr)112 void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
113 				       struct mlx5e_ipsec_tx_create_attr *attr)
114 {
115 	attr->prio = FDB_CRYPTO_EGRESS;
116 	attr->pol_level = MLX5_ESW_IPSEC_TX_POL_FT_LEVEL;
117 	attr->sa_level = MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL;
118 	attr->cnt_level = MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL;
119 	attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
120 }
121 
122 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
mlx5_esw_ipsec_modify_flow_dests(struct mlx5_eswitch * esw,struct mlx5e_tc_flow * flow)123 static int mlx5_esw_ipsec_modify_flow_dests(struct mlx5_eswitch *esw,
124 					    struct mlx5e_tc_flow *flow)
125 {
126 	struct mlx5_esw_flow_attr *esw_attr;
127 	struct mlx5_flow_attr *attr;
128 	int err;
129 
130 	attr = flow->attr;
131 	esw_attr = attr->esw_attr;
132 	if (esw_attr->out_count - esw_attr->split_count > 1)
133 		return 0;
134 
135 	err = mlx5_eswitch_restore_ipsec_rule(esw, flow->rule[0], esw_attr,
136 					      esw_attr->out_count - 1);
137 
138 	return err;
139 }
140 #endif
141 
mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev * mdev)142 void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
143 {
144 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
145 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
146 	struct mlx5_eswitch_rep *rep;
147 	struct mlx5e_rep_priv *rpriv;
148 	struct rhashtable_iter iter;
149 	struct mlx5e_tc_flow *flow;
150 	unsigned long i;
151 	int err;
152 
153 	xa_for_each(&esw->offloads.vport_reps, i, rep) {
154 		rpriv = rep->rep_data[REP_ETH].priv;
155 		if (!rpriv || !rpriv->netdev)
156 			continue;
157 
158 		rhashtable_walk_enter(&rpriv->tc_ht, &iter);
159 		rhashtable_walk_start(&iter);
160 		while ((flow = rhashtable_walk_next(&iter)) != NULL) {
161 			if (IS_ERR(flow))
162 				continue;
163 
164 			err = mlx5_esw_ipsec_modify_flow_dests(esw, flow);
165 			if (err)
166 				mlx5_core_warn_once(mdev,
167 						    "Failed to modify flow dests for IPsec");
168 		}
169 		rhashtable_walk_stop(&iter);
170 		rhashtable_walk_exit(&iter);
171 	}
172 #endif
173 }
174