1f7c4ffdaSLeon Romanovsky // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2f7c4ffdaSLeon Romanovsky /*
3f7c4ffdaSLeon Romanovsky * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4f7c4ffdaSLeon Romanovsky */
5f7c4ffdaSLeon Romanovsky
6f7c4ffdaSLeon Romanovsky #include <rdma/ib_user_verbs.h>
7f7c4ffdaSLeon Romanovsky #include <rdma/ib_verbs.h>
8f7c4ffdaSLeon Romanovsky #include <rdma/uverbs_types.h>
9f7c4ffdaSLeon Romanovsky #include <rdma/uverbs_ioctl.h>
10f7c4ffdaSLeon Romanovsky #include <rdma/uverbs_std_types.h>
11f7c4ffdaSLeon Romanovsky #include <rdma/mlx5_user_ioctl_cmds.h>
12f7c4ffdaSLeon Romanovsky #include <rdma/mlx5_user_ioctl_verbs.h>
13ffa501efSAharon Landau #include <rdma/ib_hdrs.h>
14f7c4ffdaSLeon Romanovsky #include <rdma/ib_umem.h>
15f7c4ffdaSLeon Romanovsky #include <linux/mlx5/driver.h>
16f7c4ffdaSLeon Romanovsky #include <linux/mlx5/fs.h>
17f7c4ffdaSLeon Romanovsky #include <linux/mlx5/fs_helpers.h>
18f7c4ffdaSLeon Romanovsky #include <linux/mlx5/eswitch.h>
19ffa501efSAharon Landau #include <net/inet_ecn.h>
20f7c4ffdaSLeon Romanovsky #include "mlx5_ib.h"
21f7c4ffdaSLeon Romanovsky #include "counters.h"
22f7c4ffdaSLeon Romanovsky #include "devx.h"
23f7c4ffdaSLeon Romanovsky #include "fs.h"
24f7c4ffdaSLeon Romanovsky
25f7c4ffdaSLeon Romanovsky #define UVERBS_MODULE_NAME mlx5_ib
26f7c4ffdaSLeon Romanovsky #include <rdma/uverbs_named_ioctl.h>
27f7c4ffdaSLeon Romanovsky
28f7c4ffdaSLeon Romanovsky enum {
29f7c4ffdaSLeon Romanovsky MATCH_CRITERIA_ENABLE_OUTER_BIT,
30f7c4ffdaSLeon Romanovsky MATCH_CRITERIA_ENABLE_MISC_BIT,
31f7c4ffdaSLeon Romanovsky MATCH_CRITERIA_ENABLE_INNER_BIT,
32f7c4ffdaSLeon Romanovsky MATCH_CRITERIA_ENABLE_MISC2_BIT
33f7c4ffdaSLeon Romanovsky };
34f7c4ffdaSLeon Romanovsky
35f7c4ffdaSLeon Romanovsky #define HEADER_IS_ZERO(match_criteria, headers) \
36f7c4ffdaSLeon Romanovsky !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
37f7c4ffdaSLeon Romanovsky 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
38f7c4ffdaSLeon Romanovsky
get_match_criteria_enable(u32 * match_criteria)39f7c4ffdaSLeon Romanovsky static u8 get_match_criteria_enable(u32 *match_criteria)
40f7c4ffdaSLeon Romanovsky {
41f7c4ffdaSLeon Romanovsky u8 match_criteria_enable;
42f7c4ffdaSLeon Romanovsky
43f7c4ffdaSLeon Romanovsky match_criteria_enable =
44f7c4ffdaSLeon Romanovsky (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
45f7c4ffdaSLeon Romanovsky MATCH_CRITERIA_ENABLE_OUTER_BIT;
46f7c4ffdaSLeon Romanovsky match_criteria_enable |=
47f7c4ffdaSLeon Romanovsky (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
48f7c4ffdaSLeon Romanovsky MATCH_CRITERIA_ENABLE_MISC_BIT;
49f7c4ffdaSLeon Romanovsky match_criteria_enable |=
50f7c4ffdaSLeon Romanovsky (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
51f7c4ffdaSLeon Romanovsky MATCH_CRITERIA_ENABLE_INNER_BIT;
52f7c4ffdaSLeon Romanovsky match_criteria_enable |=
53f7c4ffdaSLeon Romanovsky (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
54f7c4ffdaSLeon Romanovsky MATCH_CRITERIA_ENABLE_MISC2_BIT;
55f7c4ffdaSLeon Romanovsky
56f7c4ffdaSLeon Romanovsky return match_criteria_enable;
57f7c4ffdaSLeon Romanovsky }
58f7c4ffdaSLeon Romanovsky
set_proto(void * outer_c,void * outer_v,u8 mask,u8 val)59f7c4ffdaSLeon Romanovsky static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
60f7c4ffdaSLeon Romanovsky {
61f7c4ffdaSLeon Romanovsky u8 entry_mask;
62f7c4ffdaSLeon Romanovsky u8 entry_val;
63f7c4ffdaSLeon Romanovsky int err = 0;
64f7c4ffdaSLeon Romanovsky
65f7c4ffdaSLeon Romanovsky if (!mask)
66f7c4ffdaSLeon Romanovsky goto out;
67f7c4ffdaSLeon Romanovsky
68f7c4ffdaSLeon Romanovsky entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
69f7c4ffdaSLeon Romanovsky ip_protocol);
70f7c4ffdaSLeon Romanovsky entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
71f7c4ffdaSLeon Romanovsky ip_protocol);
72f7c4ffdaSLeon Romanovsky if (!entry_mask) {
73f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
74f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
75f7c4ffdaSLeon Romanovsky goto out;
76f7c4ffdaSLeon Romanovsky }
77f7c4ffdaSLeon Romanovsky /* Don't override existing ip protocol */
78f7c4ffdaSLeon Romanovsky if (mask != entry_mask || val != entry_val)
79f7c4ffdaSLeon Romanovsky err = -EINVAL;
80f7c4ffdaSLeon Romanovsky out:
81f7c4ffdaSLeon Romanovsky return err;
82f7c4ffdaSLeon Romanovsky }
83f7c4ffdaSLeon Romanovsky
set_flow_label(void * misc_c,void * misc_v,u32 mask,u32 val,bool inner)84f7c4ffdaSLeon Romanovsky static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
85f7c4ffdaSLeon Romanovsky bool inner)
86f7c4ffdaSLeon Romanovsky {
87f7c4ffdaSLeon Romanovsky if (inner) {
88f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc,
89f7c4ffdaSLeon Romanovsky misc_c, inner_ipv6_flow_label, mask);
90f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc,
91f7c4ffdaSLeon Romanovsky misc_v, inner_ipv6_flow_label, val);
92f7c4ffdaSLeon Romanovsky } else {
93f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc,
94f7c4ffdaSLeon Romanovsky misc_c, outer_ipv6_flow_label, mask);
95f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc,
96f7c4ffdaSLeon Romanovsky misc_v, outer_ipv6_flow_label, val);
97f7c4ffdaSLeon Romanovsky }
98f7c4ffdaSLeon Romanovsky }
99f7c4ffdaSLeon Romanovsky
set_tos(void * outer_c,void * outer_v,u8 mask,u8 val)100f7c4ffdaSLeon Romanovsky static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
101f7c4ffdaSLeon Romanovsky {
102f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
103f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
104f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
105f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
106f7c4ffdaSLeon Romanovsky }
107f7c4ffdaSLeon Romanovsky
check_mpls_supp_fields(u32 field_support,const __be32 * set_mask)108f7c4ffdaSLeon Romanovsky static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
109f7c4ffdaSLeon Romanovsky {
110f7c4ffdaSLeon Romanovsky if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
111f7c4ffdaSLeon Romanovsky !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
112f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
113f7c4ffdaSLeon Romanovsky
114f7c4ffdaSLeon Romanovsky if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
115f7c4ffdaSLeon Romanovsky !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
116f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
117f7c4ffdaSLeon Romanovsky
118f7c4ffdaSLeon Romanovsky if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
119f7c4ffdaSLeon Romanovsky !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
120f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
121f7c4ffdaSLeon Romanovsky
122f7c4ffdaSLeon Romanovsky if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
123f7c4ffdaSLeon Romanovsky !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
124f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
125f7c4ffdaSLeon Romanovsky
126f7c4ffdaSLeon Romanovsky return 0;
127f7c4ffdaSLeon Romanovsky }
128f7c4ffdaSLeon Romanovsky
129f7c4ffdaSLeon Romanovsky #define LAST_ETH_FIELD vlan_tag
130f7c4ffdaSLeon Romanovsky #define LAST_IPV4_FIELD tos
131f7c4ffdaSLeon Romanovsky #define LAST_IPV6_FIELD traffic_class
132f7c4ffdaSLeon Romanovsky #define LAST_TCP_UDP_FIELD src_port
133f7c4ffdaSLeon Romanovsky #define LAST_TUNNEL_FIELD tunnel_id
134f7c4ffdaSLeon Romanovsky #define LAST_FLOW_TAG_FIELD tag_id
135f7c4ffdaSLeon Romanovsky #define LAST_DROP_FIELD size
136f7c4ffdaSLeon Romanovsky #define LAST_COUNTERS_FIELD counters
137f7c4ffdaSLeon Romanovsky
138f7c4ffdaSLeon Romanovsky /* Field is the last supported field */
139f7c4ffdaSLeon Romanovsky #define FIELDS_NOT_SUPPORTED(filter, field) \
14070c1430fSLeon Romanovsky memchr_inv((void *)&filter.field + sizeof(filter.field), 0, \
14170c1430fSLeon Romanovsky sizeof(filter) - offsetofend(typeof(filter), field))
142f7c4ffdaSLeon Romanovsky
parse_flow_flow_action(struct mlx5_ib_flow_action * maction,bool is_egress,struct mlx5_flow_act * action)143f7c4ffdaSLeon Romanovsky int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
144f7c4ffdaSLeon Romanovsky bool is_egress,
145f7c4ffdaSLeon Romanovsky struct mlx5_flow_act *action)
146f7c4ffdaSLeon Romanovsky {
147f7c4ffdaSLeon Romanovsky
148f7c4ffdaSLeon Romanovsky switch (maction->ib_action.type) {
149f7c4ffdaSLeon Romanovsky case IB_FLOW_ACTION_UNSPECIFIED:
150f7c4ffdaSLeon Romanovsky if (maction->flow_action_raw.sub_type ==
151f7c4ffdaSLeon Romanovsky MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
152f7c4ffdaSLeon Romanovsky if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
153f7c4ffdaSLeon Romanovsky return -EINVAL;
154f7c4ffdaSLeon Romanovsky action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
155f7c4ffdaSLeon Romanovsky action->modify_hdr =
156f7c4ffdaSLeon Romanovsky maction->flow_action_raw.modify_hdr;
157f7c4ffdaSLeon Romanovsky return 0;
158f7c4ffdaSLeon Romanovsky }
159f7c4ffdaSLeon Romanovsky if (maction->flow_action_raw.sub_type ==
160f7c4ffdaSLeon Romanovsky MLX5_IB_FLOW_ACTION_DECAP) {
161f7c4ffdaSLeon Romanovsky if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
162f7c4ffdaSLeon Romanovsky return -EINVAL;
163f7c4ffdaSLeon Romanovsky action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
164f7c4ffdaSLeon Romanovsky return 0;
165f7c4ffdaSLeon Romanovsky }
166f7c4ffdaSLeon Romanovsky if (maction->flow_action_raw.sub_type ==
167f7c4ffdaSLeon Romanovsky MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
168f7c4ffdaSLeon Romanovsky if (action->action &
169f7c4ffdaSLeon Romanovsky MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
170f7c4ffdaSLeon Romanovsky return -EINVAL;
171f7c4ffdaSLeon Romanovsky action->action |=
172f7c4ffdaSLeon Romanovsky MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
173f7c4ffdaSLeon Romanovsky action->pkt_reformat =
174f7c4ffdaSLeon Romanovsky maction->flow_action_raw.pkt_reformat;
175f7c4ffdaSLeon Romanovsky return 0;
176f7c4ffdaSLeon Romanovsky }
177f7c4ffdaSLeon Romanovsky fallthrough;
178f7c4ffdaSLeon Romanovsky default:
179f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
180f7c4ffdaSLeon Romanovsky }
181f7c4ffdaSLeon Romanovsky }
182f7c4ffdaSLeon Romanovsky
parse_flow_attr(struct mlx5_core_dev * mdev,struct mlx5_flow_spec * spec,const union ib_flow_spec * ib_spec,const struct ib_flow_attr * flow_attr,struct mlx5_flow_act * action,u32 prev_type)183f7c4ffdaSLeon Romanovsky static int parse_flow_attr(struct mlx5_core_dev *mdev,
184f7c4ffdaSLeon Romanovsky struct mlx5_flow_spec *spec,
185f7c4ffdaSLeon Romanovsky const union ib_flow_spec *ib_spec,
186f7c4ffdaSLeon Romanovsky const struct ib_flow_attr *flow_attr,
187f7c4ffdaSLeon Romanovsky struct mlx5_flow_act *action, u32 prev_type)
188f7c4ffdaSLeon Romanovsky {
189f7c4ffdaSLeon Romanovsky struct mlx5_flow_context *flow_context = &spec->flow_context;
190f7c4ffdaSLeon Romanovsky u32 *match_c = spec->match_criteria;
191f7c4ffdaSLeon Romanovsky u32 *match_v = spec->match_value;
192f7c4ffdaSLeon Romanovsky void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
193f7c4ffdaSLeon Romanovsky misc_parameters);
194f7c4ffdaSLeon Romanovsky void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
195f7c4ffdaSLeon Romanovsky misc_parameters);
196f7c4ffdaSLeon Romanovsky void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
197f7c4ffdaSLeon Romanovsky misc_parameters_2);
198f7c4ffdaSLeon Romanovsky void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
199f7c4ffdaSLeon Romanovsky misc_parameters_2);
200f7c4ffdaSLeon Romanovsky void *headers_c;
201f7c4ffdaSLeon Romanovsky void *headers_v;
202f7c4ffdaSLeon Romanovsky int match_ipv;
203f7c4ffdaSLeon Romanovsky int ret;
204f7c4ffdaSLeon Romanovsky
205f7c4ffdaSLeon Romanovsky if (ib_spec->type & IB_FLOW_SPEC_INNER) {
206f7c4ffdaSLeon Romanovsky headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
207f7c4ffdaSLeon Romanovsky inner_headers);
208f7c4ffdaSLeon Romanovsky headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
209f7c4ffdaSLeon Romanovsky inner_headers);
210f7c4ffdaSLeon Romanovsky match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
211f7c4ffdaSLeon Romanovsky ft_field_support.inner_ip_version);
212f7c4ffdaSLeon Romanovsky } else {
213f7c4ffdaSLeon Romanovsky headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
214f7c4ffdaSLeon Romanovsky outer_headers);
215f7c4ffdaSLeon Romanovsky headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
216f7c4ffdaSLeon Romanovsky outer_headers);
217f7c4ffdaSLeon Romanovsky match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
218f7c4ffdaSLeon Romanovsky ft_field_support.outer_ip_version);
219f7c4ffdaSLeon Romanovsky }
220f7c4ffdaSLeon Romanovsky
221f7c4ffdaSLeon Romanovsky switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
222f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_ETH:
223f7c4ffdaSLeon Romanovsky if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
224f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
225f7c4ffdaSLeon Romanovsky
226f7c4ffdaSLeon Romanovsky ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
227f7c4ffdaSLeon Romanovsky dmac_47_16),
228f7c4ffdaSLeon Romanovsky ib_spec->eth.mask.dst_mac);
229f7c4ffdaSLeon Romanovsky ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
230f7c4ffdaSLeon Romanovsky dmac_47_16),
231f7c4ffdaSLeon Romanovsky ib_spec->eth.val.dst_mac);
232f7c4ffdaSLeon Romanovsky
233f7c4ffdaSLeon Romanovsky ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
234f7c4ffdaSLeon Romanovsky smac_47_16),
235f7c4ffdaSLeon Romanovsky ib_spec->eth.mask.src_mac);
236f7c4ffdaSLeon Romanovsky ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
237f7c4ffdaSLeon Romanovsky smac_47_16),
238f7c4ffdaSLeon Romanovsky ib_spec->eth.val.src_mac);
239f7c4ffdaSLeon Romanovsky
240f7c4ffdaSLeon Romanovsky if (ib_spec->eth.mask.vlan_tag) {
241f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c,
242f7c4ffdaSLeon Romanovsky cvlan_tag, 1);
243f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v,
244f7c4ffdaSLeon Romanovsky cvlan_tag, 1);
245f7c4ffdaSLeon Romanovsky
246f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c,
247f7c4ffdaSLeon Romanovsky first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
248f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v,
249f7c4ffdaSLeon Romanovsky first_vid, ntohs(ib_spec->eth.val.vlan_tag));
250f7c4ffdaSLeon Romanovsky
251f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c,
252f7c4ffdaSLeon Romanovsky first_cfi,
253f7c4ffdaSLeon Romanovsky ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
254f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v,
255f7c4ffdaSLeon Romanovsky first_cfi,
256f7c4ffdaSLeon Romanovsky ntohs(ib_spec->eth.val.vlan_tag) >> 12);
257f7c4ffdaSLeon Romanovsky
258f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c,
259f7c4ffdaSLeon Romanovsky first_prio,
260f7c4ffdaSLeon Romanovsky ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
261f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v,
262f7c4ffdaSLeon Romanovsky first_prio,
263f7c4ffdaSLeon Romanovsky ntohs(ib_spec->eth.val.vlan_tag) >> 13);
264f7c4ffdaSLeon Romanovsky }
265f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266f7c4ffdaSLeon Romanovsky ethertype, ntohs(ib_spec->eth.mask.ether_type));
267f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268f7c4ffdaSLeon Romanovsky ethertype, ntohs(ib_spec->eth.val.ether_type));
269f7c4ffdaSLeon Romanovsky break;
270f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_IPV4:
271f7c4ffdaSLeon Romanovsky if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
272f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
273f7c4ffdaSLeon Romanovsky
274f7c4ffdaSLeon Romanovsky if (match_ipv) {
275f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c,
276f7c4ffdaSLeon Romanovsky ip_version, 0xf);
277f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v,
278f7c4ffdaSLeon Romanovsky ip_version, MLX5_FS_IPV4_VERSION);
279f7c4ffdaSLeon Romanovsky } else {
280f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c,
281f7c4ffdaSLeon Romanovsky ethertype, 0xffff);
282f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v,
283f7c4ffdaSLeon Romanovsky ethertype, ETH_P_IP);
284f7c4ffdaSLeon Romanovsky }
285f7c4ffdaSLeon Romanovsky
286f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
287f7c4ffdaSLeon Romanovsky src_ipv4_src_ipv6.ipv4_layout.ipv4),
288f7c4ffdaSLeon Romanovsky &ib_spec->ipv4.mask.src_ip,
289f7c4ffdaSLeon Romanovsky sizeof(ib_spec->ipv4.mask.src_ip));
290f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
291f7c4ffdaSLeon Romanovsky src_ipv4_src_ipv6.ipv4_layout.ipv4),
292f7c4ffdaSLeon Romanovsky &ib_spec->ipv4.val.src_ip,
293f7c4ffdaSLeon Romanovsky sizeof(ib_spec->ipv4.val.src_ip));
294f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
295f7c4ffdaSLeon Romanovsky dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
296f7c4ffdaSLeon Romanovsky &ib_spec->ipv4.mask.dst_ip,
297f7c4ffdaSLeon Romanovsky sizeof(ib_spec->ipv4.mask.dst_ip));
298f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
299f7c4ffdaSLeon Romanovsky dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
300f7c4ffdaSLeon Romanovsky &ib_spec->ipv4.val.dst_ip,
301f7c4ffdaSLeon Romanovsky sizeof(ib_spec->ipv4.val.dst_ip));
302f7c4ffdaSLeon Romanovsky
303f7c4ffdaSLeon Romanovsky set_tos(headers_c, headers_v,
304f7c4ffdaSLeon Romanovsky ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
305f7c4ffdaSLeon Romanovsky
306f7c4ffdaSLeon Romanovsky if (set_proto(headers_c, headers_v,
307f7c4ffdaSLeon Romanovsky ib_spec->ipv4.mask.proto,
308f7c4ffdaSLeon Romanovsky ib_spec->ipv4.val.proto))
309f7c4ffdaSLeon Romanovsky return -EINVAL;
310f7c4ffdaSLeon Romanovsky break;
311f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_IPV6:
312f7c4ffdaSLeon Romanovsky if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
313f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
314f7c4ffdaSLeon Romanovsky
315f7c4ffdaSLeon Romanovsky if (match_ipv) {
316f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c,
317f7c4ffdaSLeon Romanovsky ip_version, 0xf);
318f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v,
319f7c4ffdaSLeon Romanovsky ip_version, MLX5_FS_IPV6_VERSION);
320f7c4ffdaSLeon Romanovsky } else {
321f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c,
322f7c4ffdaSLeon Romanovsky ethertype, 0xffff);
323f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v,
324f7c4ffdaSLeon Romanovsky ethertype, ETH_P_IPV6);
325f7c4ffdaSLeon Romanovsky }
326f7c4ffdaSLeon Romanovsky
327f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
328f7c4ffdaSLeon Romanovsky src_ipv4_src_ipv6.ipv6_layout.ipv6),
329f7c4ffdaSLeon Romanovsky &ib_spec->ipv6.mask.src_ip,
330f7c4ffdaSLeon Romanovsky sizeof(ib_spec->ipv6.mask.src_ip));
331f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
332f7c4ffdaSLeon Romanovsky src_ipv4_src_ipv6.ipv6_layout.ipv6),
333f7c4ffdaSLeon Romanovsky &ib_spec->ipv6.val.src_ip,
334f7c4ffdaSLeon Romanovsky sizeof(ib_spec->ipv6.val.src_ip));
335f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
336f7c4ffdaSLeon Romanovsky dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
337f7c4ffdaSLeon Romanovsky &ib_spec->ipv6.mask.dst_ip,
338f7c4ffdaSLeon Romanovsky sizeof(ib_spec->ipv6.mask.dst_ip));
339f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
340f7c4ffdaSLeon Romanovsky dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
341f7c4ffdaSLeon Romanovsky &ib_spec->ipv6.val.dst_ip,
342f7c4ffdaSLeon Romanovsky sizeof(ib_spec->ipv6.val.dst_ip));
343f7c4ffdaSLeon Romanovsky
344f7c4ffdaSLeon Romanovsky set_tos(headers_c, headers_v,
345f7c4ffdaSLeon Romanovsky ib_spec->ipv6.mask.traffic_class,
346f7c4ffdaSLeon Romanovsky ib_spec->ipv6.val.traffic_class);
347f7c4ffdaSLeon Romanovsky
348f7c4ffdaSLeon Romanovsky if (set_proto(headers_c, headers_v,
349f7c4ffdaSLeon Romanovsky ib_spec->ipv6.mask.next_hdr,
350f7c4ffdaSLeon Romanovsky ib_spec->ipv6.val.next_hdr))
351f7c4ffdaSLeon Romanovsky return -EINVAL;
352f7c4ffdaSLeon Romanovsky
353f7c4ffdaSLeon Romanovsky set_flow_label(misc_params_c, misc_params_v,
354f7c4ffdaSLeon Romanovsky ntohl(ib_spec->ipv6.mask.flow_label),
355f7c4ffdaSLeon Romanovsky ntohl(ib_spec->ipv6.val.flow_label),
356f7c4ffdaSLeon Romanovsky ib_spec->type & IB_FLOW_SPEC_INNER);
357f7c4ffdaSLeon Romanovsky break;
358f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_ESP:
359f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
360f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_TCP:
361f7c4ffdaSLeon Romanovsky if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
362f7c4ffdaSLeon Romanovsky LAST_TCP_UDP_FIELD))
363f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
364f7c4ffdaSLeon Romanovsky
365f7c4ffdaSLeon Romanovsky if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
366f7c4ffdaSLeon Romanovsky return -EINVAL;
367f7c4ffdaSLeon Romanovsky
368f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
369f7c4ffdaSLeon Romanovsky ntohs(ib_spec->tcp_udp.mask.src_port));
370f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
371f7c4ffdaSLeon Romanovsky ntohs(ib_spec->tcp_udp.val.src_port));
372f7c4ffdaSLeon Romanovsky
373f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
374f7c4ffdaSLeon Romanovsky ntohs(ib_spec->tcp_udp.mask.dst_port));
375f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
376f7c4ffdaSLeon Romanovsky ntohs(ib_spec->tcp_udp.val.dst_port));
377f7c4ffdaSLeon Romanovsky break;
378f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_UDP:
379f7c4ffdaSLeon Romanovsky if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
380f7c4ffdaSLeon Romanovsky LAST_TCP_UDP_FIELD))
381f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
382f7c4ffdaSLeon Romanovsky
383f7c4ffdaSLeon Romanovsky if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
384f7c4ffdaSLeon Romanovsky return -EINVAL;
385f7c4ffdaSLeon Romanovsky
386f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
387f7c4ffdaSLeon Romanovsky ntohs(ib_spec->tcp_udp.mask.src_port));
388f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
389f7c4ffdaSLeon Romanovsky ntohs(ib_spec->tcp_udp.val.src_port));
390f7c4ffdaSLeon Romanovsky
391f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
392f7c4ffdaSLeon Romanovsky ntohs(ib_spec->tcp_udp.mask.dst_port));
393f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
394f7c4ffdaSLeon Romanovsky ntohs(ib_spec->tcp_udp.val.dst_port));
395f7c4ffdaSLeon Romanovsky break;
396f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_GRE:
397f7c4ffdaSLeon Romanovsky if (ib_spec->gre.mask.c_ks_res0_ver)
398f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
399f7c4ffdaSLeon Romanovsky
400f7c4ffdaSLeon Romanovsky if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
401f7c4ffdaSLeon Romanovsky return -EINVAL;
402f7c4ffdaSLeon Romanovsky
403f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
404f7c4ffdaSLeon Romanovsky 0xff);
405f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
406f7c4ffdaSLeon Romanovsky IPPROTO_GRE);
407f7c4ffdaSLeon Romanovsky
408f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
409f7c4ffdaSLeon Romanovsky ntohs(ib_spec->gre.mask.protocol));
410f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
411f7c4ffdaSLeon Romanovsky ntohs(ib_spec->gre.val.protocol));
412f7c4ffdaSLeon Romanovsky
413f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
414f7c4ffdaSLeon Romanovsky gre_key.nvgre.hi),
415f7c4ffdaSLeon Romanovsky &ib_spec->gre.mask.key,
416f7c4ffdaSLeon Romanovsky sizeof(ib_spec->gre.mask.key));
417f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
418f7c4ffdaSLeon Romanovsky gre_key.nvgre.hi),
419f7c4ffdaSLeon Romanovsky &ib_spec->gre.val.key,
420f7c4ffdaSLeon Romanovsky sizeof(ib_spec->gre.val.key));
421f7c4ffdaSLeon Romanovsky break;
422f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_MPLS:
423f7c4ffdaSLeon Romanovsky switch (prev_type) {
424f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_UDP:
425f7c4ffdaSLeon Romanovsky if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
426f7c4ffdaSLeon Romanovsky ft_field_support.outer_first_mpls_over_udp),
427f7c4ffdaSLeon Romanovsky &ib_spec->mpls.mask.tag))
428f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
429f7c4ffdaSLeon Romanovsky
430f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
431f7c4ffdaSLeon Romanovsky outer_first_mpls_over_udp),
432f7c4ffdaSLeon Romanovsky &ib_spec->mpls.val.tag,
433f7c4ffdaSLeon Romanovsky sizeof(ib_spec->mpls.val.tag));
434f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
435f7c4ffdaSLeon Romanovsky outer_first_mpls_over_udp),
436f7c4ffdaSLeon Romanovsky &ib_spec->mpls.mask.tag,
437f7c4ffdaSLeon Romanovsky sizeof(ib_spec->mpls.mask.tag));
438f7c4ffdaSLeon Romanovsky break;
439f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_GRE:
440f7c4ffdaSLeon Romanovsky if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
441f7c4ffdaSLeon Romanovsky ft_field_support.outer_first_mpls_over_gre),
442f7c4ffdaSLeon Romanovsky &ib_spec->mpls.mask.tag))
443f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
444f7c4ffdaSLeon Romanovsky
445f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
446f7c4ffdaSLeon Romanovsky outer_first_mpls_over_gre),
447f7c4ffdaSLeon Romanovsky &ib_spec->mpls.val.tag,
448f7c4ffdaSLeon Romanovsky sizeof(ib_spec->mpls.val.tag));
449f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
450f7c4ffdaSLeon Romanovsky outer_first_mpls_over_gre),
451f7c4ffdaSLeon Romanovsky &ib_spec->mpls.mask.tag,
452f7c4ffdaSLeon Romanovsky sizeof(ib_spec->mpls.mask.tag));
453f7c4ffdaSLeon Romanovsky break;
454f7c4ffdaSLeon Romanovsky default:
455f7c4ffdaSLeon Romanovsky if (ib_spec->type & IB_FLOW_SPEC_INNER) {
456f7c4ffdaSLeon Romanovsky if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
457f7c4ffdaSLeon Romanovsky ft_field_support.inner_first_mpls),
458f7c4ffdaSLeon Romanovsky &ib_spec->mpls.mask.tag))
459f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
460f7c4ffdaSLeon Romanovsky
461f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
462f7c4ffdaSLeon Romanovsky inner_first_mpls),
463f7c4ffdaSLeon Romanovsky &ib_spec->mpls.val.tag,
464f7c4ffdaSLeon Romanovsky sizeof(ib_spec->mpls.val.tag));
465f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
466f7c4ffdaSLeon Romanovsky inner_first_mpls),
467f7c4ffdaSLeon Romanovsky &ib_spec->mpls.mask.tag,
468f7c4ffdaSLeon Romanovsky sizeof(ib_spec->mpls.mask.tag));
469f7c4ffdaSLeon Romanovsky } else {
470f7c4ffdaSLeon Romanovsky if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
471f7c4ffdaSLeon Romanovsky ft_field_support.outer_first_mpls),
472f7c4ffdaSLeon Romanovsky &ib_spec->mpls.mask.tag))
473f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
474f7c4ffdaSLeon Romanovsky
475f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
476f7c4ffdaSLeon Romanovsky outer_first_mpls),
477f7c4ffdaSLeon Romanovsky &ib_spec->mpls.val.tag,
478f7c4ffdaSLeon Romanovsky sizeof(ib_spec->mpls.val.tag));
479f7c4ffdaSLeon Romanovsky memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
480f7c4ffdaSLeon Romanovsky outer_first_mpls),
481f7c4ffdaSLeon Romanovsky &ib_spec->mpls.mask.tag,
482f7c4ffdaSLeon Romanovsky sizeof(ib_spec->mpls.mask.tag));
483f7c4ffdaSLeon Romanovsky }
484f7c4ffdaSLeon Romanovsky }
485f7c4ffdaSLeon Romanovsky break;
486f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_VXLAN_TUNNEL:
487f7c4ffdaSLeon Romanovsky if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
488f7c4ffdaSLeon Romanovsky LAST_TUNNEL_FIELD))
489f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
490f7c4ffdaSLeon Romanovsky
491f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
492f7c4ffdaSLeon Romanovsky ntohl(ib_spec->tunnel.mask.tunnel_id));
493f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
494f7c4ffdaSLeon Romanovsky ntohl(ib_spec->tunnel.val.tunnel_id));
495f7c4ffdaSLeon Romanovsky break;
496f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_ACTION_TAG:
497f7c4ffdaSLeon Romanovsky if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
498f7c4ffdaSLeon Romanovsky LAST_FLOW_TAG_FIELD))
499f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
500f7c4ffdaSLeon Romanovsky if (ib_spec->flow_tag.tag_id >= BIT(24))
501f7c4ffdaSLeon Romanovsky return -EINVAL;
502f7c4ffdaSLeon Romanovsky
503f7c4ffdaSLeon Romanovsky flow_context->flow_tag = ib_spec->flow_tag.tag_id;
504f7c4ffdaSLeon Romanovsky flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
505f7c4ffdaSLeon Romanovsky break;
506f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_ACTION_DROP:
507f7c4ffdaSLeon Romanovsky if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
508f7c4ffdaSLeon Romanovsky LAST_DROP_FIELD))
509f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
510f7c4ffdaSLeon Romanovsky action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
511f7c4ffdaSLeon Romanovsky break;
512f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_ACTION_HANDLE:
513f7c4ffdaSLeon Romanovsky ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
514f7c4ffdaSLeon Romanovsky flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
515f7c4ffdaSLeon Romanovsky if (ret)
516f7c4ffdaSLeon Romanovsky return ret;
517f7c4ffdaSLeon Romanovsky break;
518f7c4ffdaSLeon Romanovsky case IB_FLOW_SPEC_ACTION_COUNT:
519f7c4ffdaSLeon Romanovsky if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
520f7c4ffdaSLeon Romanovsky LAST_COUNTERS_FIELD))
521f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
522f7c4ffdaSLeon Romanovsky
523f7c4ffdaSLeon Romanovsky /* for now support only one counters spec per flow */
524f7c4ffdaSLeon Romanovsky if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
525f7c4ffdaSLeon Romanovsky return -EINVAL;
526f7c4ffdaSLeon Romanovsky
527f7c4ffdaSLeon Romanovsky action->counters = ib_spec->flow_count.counters;
528f7c4ffdaSLeon Romanovsky action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
529f7c4ffdaSLeon Romanovsky break;
530f7c4ffdaSLeon Romanovsky default:
531f7c4ffdaSLeon Romanovsky return -EINVAL;
532f7c4ffdaSLeon Romanovsky }
533f7c4ffdaSLeon Romanovsky
534f7c4ffdaSLeon Romanovsky return 0;
535f7c4ffdaSLeon Romanovsky }
536f7c4ffdaSLeon Romanovsky
537f7c4ffdaSLeon Romanovsky /* If a flow could catch both multicast and unicast packets,
538f7c4ffdaSLeon Romanovsky * it won't fall into the multicast flow steering table and this rule
539f7c4ffdaSLeon Romanovsky * could steal other multicast packets.
540f7c4ffdaSLeon Romanovsky */
flow_is_multicast_only(const struct ib_flow_attr * ib_attr)541f7c4ffdaSLeon Romanovsky static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
542f7c4ffdaSLeon Romanovsky {
543f7c4ffdaSLeon Romanovsky union ib_flow_spec *flow_spec;
544f7c4ffdaSLeon Romanovsky
545f7c4ffdaSLeon Romanovsky if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
546f7c4ffdaSLeon Romanovsky ib_attr->num_of_specs < 1)
547f7c4ffdaSLeon Romanovsky return false;
548f7c4ffdaSLeon Romanovsky
549f7c4ffdaSLeon Romanovsky flow_spec = (union ib_flow_spec *)(ib_attr + 1);
550f7c4ffdaSLeon Romanovsky if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
551f7c4ffdaSLeon Romanovsky struct ib_flow_spec_ipv4 *ipv4_spec;
552f7c4ffdaSLeon Romanovsky
553f7c4ffdaSLeon Romanovsky ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
554f7c4ffdaSLeon Romanovsky if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
555f7c4ffdaSLeon Romanovsky return true;
556f7c4ffdaSLeon Romanovsky
557f7c4ffdaSLeon Romanovsky return false;
558f7c4ffdaSLeon Romanovsky }
559f7c4ffdaSLeon Romanovsky
560f7c4ffdaSLeon Romanovsky if (flow_spec->type == IB_FLOW_SPEC_ETH) {
561f7c4ffdaSLeon Romanovsky struct ib_flow_spec_eth *eth_spec;
562f7c4ffdaSLeon Romanovsky
563f7c4ffdaSLeon Romanovsky eth_spec = (struct ib_flow_spec_eth *)flow_spec;
564f7c4ffdaSLeon Romanovsky return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
565f7c4ffdaSLeon Romanovsky is_multicast_ether_addr(eth_spec->val.dst_mac);
566f7c4ffdaSLeon Romanovsky }
567f7c4ffdaSLeon Romanovsky
568f7c4ffdaSLeon Romanovsky return false;
569f7c4ffdaSLeon Romanovsky }
570f7c4ffdaSLeon Romanovsky
is_valid_ethertype(struct mlx5_core_dev * mdev,const struct ib_flow_attr * flow_attr,bool check_inner)571f7c4ffdaSLeon Romanovsky static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
572f7c4ffdaSLeon Romanovsky const struct ib_flow_attr *flow_attr,
573f7c4ffdaSLeon Romanovsky bool check_inner)
574f7c4ffdaSLeon Romanovsky {
575f7c4ffdaSLeon Romanovsky union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
576f7c4ffdaSLeon Romanovsky int match_ipv = check_inner ?
577f7c4ffdaSLeon Romanovsky MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
578f7c4ffdaSLeon Romanovsky ft_field_support.inner_ip_version) :
579f7c4ffdaSLeon Romanovsky MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
580f7c4ffdaSLeon Romanovsky ft_field_support.outer_ip_version);
581f7c4ffdaSLeon Romanovsky int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
582f7c4ffdaSLeon Romanovsky bool ipv4_spec_valid, ipv6_spec_valid;
583f7c4ffdaSLeon Romanovsky unsigned int ip_spec_type = 0;
584f7c4ffdaSLeon Romanovsky bool has_ethertype = false;
585f7c4ffdaSLeon Romanovsky unsigned int spec_index;
586f7c4ffdaSLeon Romanovsky bool mask_valid = true;
587f7c4ffdaSLeon Romanovsky u16 eth_type = 0;
588f7c4ffdaSLeon Romanovsky bool type_valid;
589f7c4ffdaSLeon Romanovsky
590f7c4ffdaSLeon Romanovsky /* Validate that ethertype is correct */
591f7c4ffdaSLeon Romanovsky for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
592f7c4ffdaSLeon Romanovsky if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
593f7c4ffdaSLeon Romanovsky ib_spec->eth.mask.ether_type) {
594f7c4ffdaSLeon Romanovsky mask_valid = (ib_spec->eth.mask.ether_type ==
595f7c4ffdaSLeon Romanovsky htons(0xffff));
596f7c4ffdaSLeon Romanovsky has_ethertype = true;
597f7c4ffdaSLeon Romanovsky eth_type = ntohs(ib_spec->eth.val.ether_type);
598f7c4ffdaSLeon Romanovsky } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
599f7c4ffdaSLeon Romanovsky (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
600f7c4ffdaSLeon Romanovsky ip_spec_type = ib_spec->type;
601f7c4ffdaSLeon Romanovsky }
602f7c4ffdaSLeon Romanovsky ib_spec = (void *)ib_spec + ib_spec->size;
603f7c4ffdaSLeon Romanovsky }
604f7c4ffdaSLeon Romanovsky
605f7c4ffdaSLeon Romanovsky type_valid = (!has_ethertype) || (!ip_spec_type);
606f7c4ffdaSLeon Romanovsky if (!type_valid && mask_valid) {
607f7c4ffdaSLeon Romanovsky ipv4_spec_valid = (eth_type == ETH_P_IP) &&
608f7c4ffdaSLeon Romanovsky (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
609f7c4ffdaSLeon Romanovsky ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
610f7c4ffdaSLeon Romanovsky (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
611f7c4ffdaSLeon Romanovsky
612f7c4ffdaSLeon Romanovsky type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
613f7c4ffdaSLeon Romanovsky (((eth_type == ETH_P_MPLS_UC) ||
614f7c4ffdaSLeon Romanovsky (eth_type == ETH_P_MPLS_MC)) && match_ipv);
615f7c4ffdaSLeon Romanovsky }
616f7c4ffdaSLeon Romanovsky
617f7c4ffdaSLeon Romanovsky return type_valid;
618f7c4ffdaSLeon Romanovsky }
619f7c4ffdaSLeon Romanovsky
is_valid_attr(struct mlx5_core_dev * mdev,const struct ib_flow_attr * flow_attr)620f7c4ffdaSLeon Romanovsky static bool is_valid_attr(struct mlx5_core_dev *mdev,
621f7c4ffdaSLeon Romanovsky const struct ib_flow_attr *flow_attr)
622f7c4ffdaSLeon Romanovsky {
623f7c4ffdaSLeon Romanovsky return is_valid_ethertype(mdev, flow_attr, false) &&
624f7c4ffdaSLeon Romanovsky is_valid_ethertype(mdev, flow_attr, true);
625f7c4ffdaSLeon Romanovsky }
626f7c4ffdaSLeon Romanovsky
put_flow_table(struct mlx5_ib_dev * dev,struct mlx5_ib_flow_prio * prio,bool ft_added)627f7c4ffdaSLeon Romanovsky static void put_flow_table(struct mlx5_ib_dev *dev,
628f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *prio, bool ft_added)
629f7c4ffdaSLeon Romanovsky {
630f7c4ffdaSLeon Romanovsky prio->refcount -= !!ft_added;
631f7c4ffdaSLeon Romanovsky if (!prio->refcount) {
632f7c4ffdaSLeon Romanovsky mlx5_destroy_flow_table(prio->flow_table);
633f7c4ffdaSLeon Romanovsky prio->flow_table = NULL;
634f7c4ffdaSLeon Romanovsky }
635f7c4ffdaSLeon Romanovsky }
636f7c4ffdaSLeon Romanovsky
mlx5_ib_destroy_flow(struct ib_flow * flow_id)637f7c4ffdaSLeon Romanovsky static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
638f7c4ffdaSLeon Romanovsky {
639f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *handler = container_of(flow_id,
640f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler,
641f7c4ffdaSLeon Romanovsky ibflow);
642f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *iter, *tmp;
643f7c4ffdaSLeon Romanovsky struct mlx5_ib_dev *dev = handler->dev;
644f7c4ffdaSLeon Romanovsky
645f7c4ffdaSLeon Romanovsky mutex_lock(&dev->flow_db->lock);
646f7c4ffdaSLeon Romanovsky
647f7c4ffdaSLeon Romanovsky list_for_each_entry_safe(iter, tmp, &handler->list, list) {
648f7c4ffdaSLeon Romanovsky mlx5_del_flow_rules(iter->rule);
649f7c4ffdaSLeon Romanovsky put_flow_table(dev, iter->prio, true);
650f7c4ffdaSLeon Romanovsky list_del(&iter->list);
651f7c4ffdaSLeon Romanovsky kfree(iter);
652f7c4ffdaSLeon Romanovsky }
653f7c4ffdaSLeon Romanovsky
654f7c4ffdaSLeon Romanovsky mlx5_del_flow_rules(handler->rule);
655f7c4ffdaSLeon Romanovsky put_flow_table(dev, handler->prio, true);
656f7c4ffdaSLeon Romanovsky mlx5_ib_counters_clear_description(handler->ibcounters);
657f7c4ffdaSLeon Romanovsky mutex_unlock(&dev->flow_db->lock);
658f7c4ffdaSLeon Romanovsky if (handler->flow_matcher)
659f7c4ffdaSLeon Romanovsky atomic_dec(&handler->flow_matcher->usecnt);
660f7c4ffdaSLeon Romanovsky kfree(handler);
661f7c4ffdaSLeon Romanovsky
662f7c4ffdaSLeon Romanovsky return 0;
663f7c4ffdaSLeon Romanovsky }
664f7c4ffdaSLeon Romanovsky
ib_prio_to_core_prio(unsigned int priority,bool dont_trap)665f7c4ffdaSLeon Romanovsky static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
666f7c4ffdaSLeon Romanovsky {
667f7c4ffdaSLeon Romanovsky priority *= 2;
668f7c4ffdaSLeon Romanovsky if (!dont_trap)
669f7c4ffdaSLeon Romanovsky priority++;
670f7c4ffdaSLeon Romanovsky return priority;
671f7c4ffdaSLeon Romanovsky }
672f7c4ffdaSLeon Romanovsky
673f7c4ffdaSLeon Romanovsky enum flow_table_type {
674f7c4ffdaSLeon Romanovsky MLX5_IB_FT_RX,
675f7c4ffdaSLeon Romanovsky MLX5_IB_FT_TX
676f7c4ffdaSLeon Romanovsky };
677f7c4ffdaSLeon Romanovsky
678f7c4ffdaSLeon Romanovsky #define MLX5_FS_MAX_TYPES 6
679f7c4ffdaSLeon Romanovsky #define MLX5_FS_MAX_ENTRIES BIT(16)
680f7c4ffdaSLeon Romanovsky
mlx5_ib_shared_ft_allowed(struct ib_device * device)6810c6ab0caSMark Bloch static bool mlx5_ib_shared_ft_allowed(struct ib_device *device)
6820c6ab0caSMark Bloch {
6830c6ab0caSMark Bloch struct mlx5_ib_dev *dev = to_mdev(device);
6840c6ab0caSMark Bloch
6850c6ab0caSMark Bloch return MLX5_CAP_GEN(dev->mdev, shared_object_to_user_object_allowed);
6860c6ab0caSMark Bloch }
6870c6ab0caSMark Bloch
_get_prio(struct mlx5_ib_dev * dev,struct mlx5_flow_namespace * ns,struct mlx5_ib_flow_prio * prio,int priority,int num_entries,int num_groups,u32 flags)6880c6ab0caSMark Bloch static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
6890c6ab0caSMark Bloch struct mlx5_flow_namespace *ns,
690f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *prio,
691f7c4ffdaSLeon Romanovsky int priority,
692f7c4ffdaSLeon Romanovsky int num_entries, int num_groups,
693f7c4ffdaSLeon Romanovsky u32 flags)
694f7c4ffdaSLeon Romanovsky {
695f7c4ffdaSLeon Romanovsky struct mlx5_flow_table_attr ft_attr = {};
696f7c4ffdaSLeon Romanovsky struct mlx5_flow_table *ft;
697f7c4ffdaSLeon Romanovsky
698f7c4ffdaSLeon Romanovsky ft_attr.prio = priority;
699f7c4ffdaSLeon Romanovsky ft_attr.max_fte = num_entries;
700f7c4ffdaSLeon Romanovsky ft_attr.flags = flags;
701f7c4ffdaSLeon Romanovsky ft_attr.autogroup.max_num_groups = num_groups;
702f7c4ffdaSLeon Romanovsky ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
703f7c4ffdaSLeon Romanovsky if (IS_ERR(ft))
704f7c4ffdaSLeon Romanovsky return ERR_CAST(ft);
705f7c4ffdaSLeon Romanovsky
706f7c4ffdaSLeon Romanovsky prio->flow_table = ft;
707f7c4ffdaSLeon Romanovsky prio->refcount = 0;
708f7c4ffdaSLeon Romanovsky return prio;
709f7c4ffdaSLeon Romanovsky }
710f7c4ffdaSLeon Romanovsky
get_flow_table(struct mlx5_ib_dev * dev,struct ib_flow_attr * flow_attr,enum flow_table_type ft_type)711f7c4ffdaSLeon Romanovsky static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
712f7c4ffdaSLeon Romanovsky struct ib_flow_attr *flow_attr,
713f7c4ffdaSLeon Romanovsky enum flow_table_type ft_type)
714f7c4ffdaSLeon Romanovsky {
715f7c4ffdaSLeon Romanovsky bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
716f7c4ffdaSLeon Romanovsky struct mlx5_flow_namespace *ns = NULL;
717156f3789SLeon Romanovsky enum mlx5_flow_namespace_type fn_type;
718f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *prio;
719f7c4ffdaSLeon Romanovsky struct mlx5_flow_table *ft;
720f7c4ffdaSLeon Romanovsky int max_table_size;
721f7c4ffdaSLeon Romanovsky int num_entries;
722f7c4ffdaSLeon Romanovsky int num_groups;
723f7c4ffdaSLeon Romanovsky bool esw_encap;
724f7c4ffdaSLeon Romanovsky u32 flags = 0;
725f7c4ffdaSLeon Romanovsky int priority;
726f7c4ffdaSLeon Romanovsky
727f7c4ffdaSLeon Romanovsky max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
728f7c4ffdaSLeon Romanovsky log_max_ft_size));
729f7c4ffdaSLeon Romanovsky esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
730f7c4ffdaSLeon Romanovsky DEVLINK_ESWITCH_ENCAP_MODE_NONE;
731156f3789SLeon Romanovsky switch (flow_attr->type) {
732156f3789SLeon Romanovsky case IB_FLOW_ATTR_NORMAL:
733156f3789SLeon Romanovsky if (flow_is_multicast_only(flow_attr) && !dont_trap)
734f7c4ffdaSLeon Romanovsky priority = MLX5_IB_FLOW_MCAST_PRIO;
735f7c4ffdaSLeon Romanovsky else
736f7c4ffdaSLeon Romanovsky priority = ib_prio_to_core_prio(flow_attr->priority,
737f7c4ffdaSLeon Romanovsky dont_trap);
738f7c4ffdaSLeon Romanovsky if (ft_type == MLX5_IB_FT_RX) {
739f7c4ffdaSLeon Romanovsky fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
740f7c4ffdaSLeon Romanovsky prio = &dev->flow_db->prios[priority];
741f7c4ffdaSLeon Romanovsky if (!dev->is_rep && !esw_encap &&
742f7c4ffdaSLeon Romanovsky MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
743f7c4ffdaSLeon Romanovsky flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
744f7c4ffdaSLeon Romanovsky if (!dev->is_rep && !esw_encap &&
745f7c4ffdaSLeon Romanovsky MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
746f7c4ffdaSLeon Romanovsky reformat_l3_tunnel_to_l2))
747f7c4ffdaSLeon Romanovsky flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
748f7c4ffdaSLeon Romanovsky } else {
749156f3789SLeon Romanovsky max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(
750156f3789SLeon Romanovsky dev->mdev, log_max_ft_size));
751f7c4ffdaSLeon Romanovsky fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
752f7c4ffdaSLeon Romanovsky prio = &dev->flow_db->egress_prios[priority];
753f7c4ffdaSLeon Romanovsky if (!dev->is_rep && !esw_encap &&
754f7c4ffdaSLeon Romanovsky MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
755f7c4ffdaSLeon Romanovsky flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
756f7c4ffdaSLeon Romanovsky }
757f7c4ffdaSLeon Romanovsky ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
758f7c4ffdaSLeon Romanovsky num_entries = MLX5_FS_MAX_ENTRIES;
759f7c4ffdaSLeon Romanovsky num_groups = MLX5_FS_MAX_TYPES;
760156f3789SLeon Romanovsky break;
761156f3789SLeon Romanovsky case IB_FLOW_ATTR_ALL_DEFAULT:
762156f3789SLeon Romanovsky case IB_FLOW_ATTR_MC_DEFAULT:
763f7c4ffdaSLeon Romanovsky ns = mlx5_get_flow_namespace(dev->mdev,
764f7c4ffdaSLeon Romanovsky MLX5_FLOW_NAMESPACE_LEFTOVERS);
765156f3789SLeon Romanovsky build_leftovers_ft_param(&priority, &num_entries, &num_groups);
766f7c4ffdaSLeon Romanovsky prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
767156f3789SLeon Romanovsky break;
768156f3789SLeon Romanovsky case IB_FLOW_ATTR_SNIFFER:
769f7c4ffdaSLeon Romanovsky if (!MLX5_CAP_FLOWTABLE(dev->mdev,
770f7c4ffdaSLeon Romanovsky allow_sniffer_and_nic_rx_shared_tir))
771f7c4ffdaSLeon Romanovsky return ERR_PTR(-EOPNOTSUPP);
772f7c4ffdaSLeon Romanovsky
773156f3789SLeon Romanovsky ns = mlx5_get_flow_namespace(
774156f3789SLeon Romanovsky dev->mdev, ft_type == MLX5_IB_FT_RX ?
775f7c4ffdaSLeon Romanovsky MLX5_FLOW_NAMESPACE_SNIFFER_RX :
776f7c4ffdaSLeon Romanovsky MLX5_FLOW_NAMESPACE_SNIFFER_TX);
777f7c4ffdaSLeon Romanovsky
778f7c4ffdaSLeon Romanovsky prio = &dev->flow_db->sniffer[ft_type];
779f7c4ffdaSLeon Romanovsky priority = 0;
780f7c4ffdaSLeon Romanovsky num_entries = 1;
781f7c4ffdaSLeon Romanovsky num_groups = 1;
782156f3789SLeon Romanovsky break;
783156f3789SLeon Romanovsky default:
784156f3789SLeon Romanovsky break;
785f7c4ffdaSLeon Romanovsky }
786f7c4ffdaSLeon Romanovsky
787f7c4ffdaSLeon Romanovsky if (!ns)
788f7c4ffdaSLeon Romanovsky return ERR_PTR(-EOPNOTSUPP);
789f7c4ffdaSLeon Romanovsky
790f7c4ffdaSLeon Romanovsky max_table_size = min_t(int, num_entries, max_table_size);
791f7c4ffdaSLeon Romanovsky
792f7c4ffdaSLeon Romanovsky ft = prio->flow_table;
793f7c4ffdaSLeon Romanovsky if (!ft)
7940c6ab0caSMark Bloch return _get_prio(dev, ns, prio, priority, max_table_size,
7950c6ab0caSMark Bloch num_groups, flags);
796f7c4ffdaSLeon Romanovsky
797f7c4ffdaSLeon Romanovsky return prio;
798f7c4ffdaSLeon Romanovsky }
799f7c4ffdaSLeon Romanovsky
800ffa501efSAharon Landau enum {
801ffa501efSAharon Landau RDMA_RX_ECN_OPCOUNTER_PRIO,
802ffa501efSAharon Landau RDMA_RX_CNP_OPCOUNTER_PRIO,
803ffa501efSAharon Landau };
804ffa501efSAharon Landau
805ffa501efSAharon Landau enum {
806ffa501efSAharon Landau RDMA_TX_CNP_OPCOUNTER_PRIO,
807ffa501efSAharon Landau };
808ffa501efSAharon Landau
set_vhca_port_spec(struct mlx5_ib_dev * dev,u32 port_num,struct mlx5_flow_spec * spec)809ffa501efSAharon Landau static int set_vhca_port_spec(struct mlx5_ib_dev *dev, u32 port_num,
810ffa501efSAharon Landau struct mlx5_flow_spec *spec)
811ffa501efSAharon Landau {
812ffa501efSAharon Landau if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
813ffa501efSAharon Landau ft_field_support.source_vhca_port) ||
814ffa501efSAharon Landau !MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
815ffa501efSAharon Landau ft_field_support.source_vhca_port))
816ffa501efSAharon Landau return -EOPNOTSUPP;
817ffa501efSAharon Landau
818ffa501efSAharon Landau MLX5_SET_TO_ONES(fte_match_param, &spec->match_criteria,
819ffa501efSAharon Landau misc_parameters.source_vhca_port);
820ffa501efSAharon Landau MLX5_SET(fte_match_param, &spec->match_value,
821ffa501efSAharon Landau misc_parameters.source_vhca_port, port_num);
822ffa501efSAharon Landau
823ffa501efSAharon Landau return 0;
824ffa501efSAharon Landau }
825ffa501efSAharon Landau
set_ecn_ce_spec(struct mlx5_ib_dev * dev,u32 port_num,struct mlx5_flow_spec * spec,int ipv)826ffa501efSAharon Landau static int set_ecn_ce_spec(struct mlx5_ib_dev *dev, u32 port_num,
827ffa501efSAharon Landau struct mlx5_flow_spec *spec, int ipv)
828ffa501efSAharon Landau {
829ffa501efSAharon Landau if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
830ffa501efSAharon Landau ft_field_support.outer_ip_version))
831ffa501efSAharon Landau return -EOPNOTSUPP;
832ffa501efSAharon Landau
833ffa501efSAharon Landau if (mlx5_core_mp_enabled(dev->mdev) &&
834ffa501efSAharon Landau set_vhca_port_spec(dev, port_num, spec))
835ffa501efSAharon Landau return -EOPNOTSUPP;
836ffa501efSAharon Landau
837ffa501efSAharon Landau MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
838ffa501efSAharon Landau outer_headers.ip_ecn);
839ffa501efSAharon Landau MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_ecn,
840ffa501efSAharon Landau INET_ECN_CE);
841ffa501efSAharon Landau MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
842ffa501efSAharon Landau outer_headers.ip_version);
843ffa501efSAharon Landau MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
844ffa501efSAharon Landau ipv);
845ffa501efSAharon Landau
846ffa501efSAharon Landau spec->match_criteria_enable =
847ffa501efSAharon Landau get_match_criteria_enable(spec->match_criteria);
848ffa501efSAharon Landau
849ffa501efSAharon Landau return 0;
850ffa501efSAharon Landau }
851ffa501efSAharon Landau
set_cnp_spec(struct mlx5_ib_dev * dev,u32 port_num,struct mlx5_flow_spec * spec)852ffa501efSAharon Landau static int set_cnp_spec(struct mlx5_ib_dev *dev, u32 port_num,
853ffa501efSAharon Landau struct mlx5_flow_spec *spec)
854ffa501efSAharon Landau {
855ffa501efSAharon Landau if (mlx5_core_mp_enabled(dev->mdev) &&
856ffa501efSAharon Landau set_vhca_port_spec(dev, port_num, spec))
857ffa501efSAharon Landau return -EOPNOTSUPP;
858ffa501efSAharon Landau
859ffa501efSAharon Landau MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
860ffa501efSAharon Landau misc_parameters.bth_opcode);
861ffa501efSAharon Landau MLX5_SET(fte_match_param, spec->match_value, misc_parameters.bth_opcode,
862ffa501efSAharon Landau IB_BTH_OPCODE_CNP);
863ffa501efSAharon Landau
864ffa501efSAharon Landau spec->match_criteria_enable =
865ffa501efSAharon Landau get_match_criteria_enable(spec->match_criteria);
866ffa501efSAharon Landau
867ffa501efSAharon Landau return 0;
868ffa501efSAharon Landau }
869ffa501efSAharon Landau
mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev * dev,u32 port_num,struct mlx5_ib_op_fc * opfc,enum mlx5_ib_optional_counter_type type)870ffa501efSAharon Landau int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
871ffa501efSAharon Landau struct mlx5_ib_op_fc *opfc,
872ffa501efSAharon Landau enum mlx5_ib_optional_counter_type type)
873ffa501efSAharon Landau {
874ffa501efSAharon Landau enum mlx5_flow_namespace_type fn_type;
875ffa501efSAharon Landau int priority, i, err, spec_num;
876ffa501efSAharon Landau struct mlx5_flow_act flow_act = {};
877ffa501efSAharon Landau struct mlx5_flow_destination dst;
878ffa501efSAharon Landau struct mlx5_flow_namespace *ns;
879ffa501efSAharon Landau struct mlx5_ib_flow_prio *prio;
880ffa501efSAharon Landau struct mlx5_flow_spec *spec;
881ffa501efSAharon Landau
882ffa501efSAharon Landau spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL);
883ffa501efSAharon Landau if (!spec)
884ffa501efSAharon Landau return -ENOMEM;
885ffa501efSAharon Landau
886ffa501efSAharon Landau switch (type) {
887ffa501efSAharon Landau case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS:
888ffa501efSAharon Landau if (set_ecn_ce_spec(dev, port_num, &spec[0],
889ffa501efSAharon Landau MLX5_FS_IPV4_VERSION) ||
890ffa501efSAharon Landau set_ecn_ce_spec(dev, port_num, &spec[1],
891ffa501efSAharon Landau MLX5_FS_IPV6_VERSION)) {
892ffa501efSAharon Landau err = -EOPNOTSUPP;
893ffa501efSAharon Landau goto free;
894ffa501efSAharon Landau }
895ffa501efSAharon Landau spec_num = 2;
896ffa501efSAharon Landau fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
897ffa501efSAharon Landau priority = RDMA_RX_ECN_OPCOUNTER_PRIO;
898ffa501efSAharon Landau break;
899ffa501efSAharon Landau
900ffa501efSAharon Landau case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS:
901ffa501efSAharon Landau if (!MLX5_CAP_FLOWTABLE(dev->mdev,
902ffa501efSAharon Landau ft_field_support_2_nic_receive_rdma.bth_opcode) ||
903ffa501efSAharon Landau set_cnp_spec(dev, port_num, &spec[0])) {
904ffa501efSAharon Landau err = -EOPNOTSUPP;
905ffa501efSAharon Landau goto free;
906ffa501efSAharon Landau }
907ffa501efSAharon Landau spec_num = 1;
908ffa501efSAharon Landau fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
909ffa501efSAharon Landau priority = RDMA_RX_CNP_OPCOUNTER_PRIO;
910ffa501efSAharon Landau break;
911ffa501efSAharon Landau
912ffa501efSAharon Landau case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS:
913ffa501efSAharon Landau if (!MLX5_CAP_FLOWTABLE(dev->mdev,
914ffa501efSAharon Landau ft_field_support_2_nic_transmit_rdma.bth_opcode) ||
915ffa501efSAharon Landau set_cnp_spec(dev, port_num, &spec[0])) {
916ffa501efSAharon Landau err = -EOPNOTSUPP;
917ffa501efSAharon Landau goto free;
918ffa501efSAharon Landau }
919ffa501efSAharon Landau spec_num = 1;
920ffa501efSAharon Landau fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
921ffa501efSAharon Landau priority = RDMA_TX_CNP_OPCOUNTER_PRIO;
922ffa501efSAharon Landau break;
923ffa501efSAharon Landau
924ffa501efSAharon Landau default:
925ffa501efSAharon Landau err = -EOPNOTSUPP;
926ffa501efSAharon Landau goto free;
927ffa501efSAharon Landau }
928ffa501efSAharon Landau
929ffa501efSAharon Landau ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
930ffa501efSAharon Landau if (!ns) {
931ffa501efSAharon Landau err = -EOPNOTSUPP;
932ffa501efSAharon Landau goto free;
933ffa501efSAharon Landau }
934ffa501efSAharon Landau
935ffa501efSAharon Landau prio = &dev->flow_db->opfcs[type];
936ffa501efSAharon Landau if (!prio->flow_table) {
9370c6ab0caSMark Bloch prio = _get_prio(dev, ns, prio, priority,
938ffa501efSAharon Landau dev->num_ports * MAX_OPFC_RULES, 1, 0);
939ffa501efSAharon Landau if (IS_ERR(prio)) {
940ffa501efSAharon Landau err = PTR_ERR(prio);
941ffa501efSAharon Landau goto free;
942ffa501efSAharon Landau }
943ffa501efSAharon Landau }
944ffa501efSAharon Landau
945ffa501efSAharon Landau dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
946ffa501efSAharon Landau dst.counter_id = mlx5_fc_id(opfc->fc);
947ffa501efSAharon Landau
948ffa501efSAharon Landau flow_act.action =
949ffa501efSAharon Landau MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
950ffa501efSAharon Landau
951ffa501efSAharon Landau for (i = 0; i < spec_num; i++) {
952ffa501efSAharon Landau opfc->rule[i] = mlx5_add_flow_rules(prio->flow_table, &spec[i],
953ffa501efSAharon Landau &flow_act, &dst, 1);
954ffa501efSAharon Landau if (IS_ERR(opfc->rule[i])) {
955ffa501efSAharon Landau err = PTR_ERR(opfc->rule[i]);
956ffa501efSAharon Landau goto del_rules;
957ffa501efSAharon Landau }
958ffa501efSAharon Landau }
959ffa501efSAharon Landau prio->refcount += spec_num;
960ffa501efSAharon Landau kfree(spec);
961ffa501efSAharon Landau
962ffa501efSAharon Landau return 0;
963ffa501efSAharon Landau
964ffa501efSAharon Landau del_rules:
965ffa501efSAharon Landau for (i -= 1; i >= 0; i--)
966ffa501efSAharon Landau mlx5_del_flow_rules(opfc->rule[i]);
967ffa501efSAharon Landau put_flow_table(dev, prio, false);
968ffa501efSAharon Landau free:
969ffa501efSAharon Landau kfree(spec);
970ffa501efSAharon Landau return err;
971ffa501efSAharon Landau }
972ffa501efSAharon Landau
mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev * dev,struct mlx5_ib_op_fc * opfc,enum mlx5_ib_optional_counter_type type)973ffa501efSAharon Landau void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
974ffa501efSAharon Landau struct mlx5_ib_op_fc *opfc,
975ffa501efSAharon Landau enum mlx5_ib_optional_counter_type type)
976ffa501efSAharon Landau {
977ffa501efSAharon Landau int i;
978ffa501efSAharon Landau
979ffa501efSAharon Landau for (i = 0; i < MAX_OPFC_RULES && opfc->rule[i]; i++) {
980ffa501efSAharon Landau mlx5_del_flow_rules(opfc->rule[i]);
981ffa501efSAharon Landau put_flow_table(dev, &dev->flow_db->opfcs[type], true);
982ffa501efSAharon Landau }
983ffa501efSAharon Landau }
984ffa501efSAharon Landau
set_underlay_qp(struct mlx5_ib_dev * dev,struct mlx5_flow_spec * spec,u32 underlay_qpn)985f7c4ffdaSLeon Romanovsky static void set_underlay_qp(struct mlx5_ib_dev *dev,
986f7c4ffdaSLeon Romanovsky struct mlx5_flow_spec *spec,
987f7c4ffdaSLeon Romanovsky u32 underlay_qpn)
988f7c4ffdaSLeon Romanovsky {
989f7c4ffdaSLeon Romanovsky void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
990f7c4ffdaSLeon Romanovsky spec->match_criteria,
991f7c4ffdaSLeon Romanovsky misc_parameters);
992f7c4ffdaSLeon Romanovsky void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
993f7c4ffdaSLeon Romanovsky misc_parameters);
994f7c4ffdaSLeon Romanovsky
995f7c4ffdaSLeon Romanovsky if (underlay_qpn &&
996f7c4ffdaSLeon Romanovsky MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
997f7c4ffdaSLeon Romanovsky ft_field_support.bth_dst_qp)) {
998f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc,
999f7c4ffdaSLeon Romanovsky misc_params_v, bth_dst_qp, underlay_qpn);
1000f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc,
1001f7c4ffdaSLeon Romanovsky misc_params_c, bth_dst_qp, 0xffffff);
1002f7c4ffdaSLeon Romanovsky }
1003f7c4ffdaSLeon Romanovsky }
1004f7c4ffdaSLeon Romanovsky
mlx5_ib_set_rule_source_port(struct mlx5_ib_dev * dev,struct mlx5_flow_spec * spec,struct mlx5_eswitch_rep * rep)1005f7c4ffdaSLeon Romanovsky static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
1006f7c4ffdaSLeon Romanovsky struct mlx5_flow_spec *spec,
1007f7c4ffdaSLeon Romanovsky struct mlx5_eswitch_rep *rep)
1008f7c4ffdaSLeon Romanovsky {
1009f7c4ffdaSLeon Romanovsky struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
1010f7c4ffdaSLeon Romanovsky void *misc;
1011f7c4ffdaSLeon Romanovsky
1012f7c4ffdaSLeon Romanovsky if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1013f7c4ffdaSLeon Romanovsky misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1014f7c4ffdaSLeon Romanovsky misc_parameters_2);
1015f7c4ffdaSLeon Romanovsky
1016f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1017658cfcebSMark Bloch mlx5_eswitch_get_vport_metadata_for_match(rep->esw,
1018f7c4ffdaSLeon Romanovsky rep->vport));
1019f7c4ffdaSLeon Romanovsky misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1020f7c4ffdaSLeon Romanovsky misc_parameters_2);
1021f7c4ffdaSLeon Romanovsky
1022f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1023f7c4ffdaSLeon Romanovsky mlx5_eswitch_get_vport_metadata_mask());
1024f7c4ffdaSLeon Romanovsky } else {
1025f7c4ffdaSLeon Romanovsky misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1026f7c4ffdaSLeon Romanovsky misc_parameters);
1027f7c4ffdaSLeon Romanovsky
1028f7c4ffdaSLeon Romanovsky MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
1029f7c4ffdaSLeon Romanovsky
1030f7c4ffdaSLeon Romanovsky misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1031f7c4ffdaSLeon Romanovsky misc_parameters);
1032f7c4ffdaSLeon Romanovsky
1033f7c4ffdaSLeon Romanovsky MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1034f7c4ffdaSLeon Romanovsky }
1035f7c4ffdaSLeon Romanovsky }
1036f7c4ffdaSLeon Romanovsky
_create_flow_rule(struct mlx5_ib_dev * dev,struct mlx5_ib_flow_prio * ft_prio,const struct ib_flow_attr * flow_attr,struct mlx5_flow_destination * dst,u32 underlay_qpn,struct mlx5_ib_create_flow * ucmd)1037f7c4ffdaSLeon Romanovsky static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
1038f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *ft_prio,
1039f7c4ffdaSLeon Romanovsky const struct ib_flow_attr *flow_attr,
1040f7c4ffdaSLeon Romanovsky struct mlx5_flow_destination *dst,
1041f7c4ffdaSLeon Romanovsky u32 underlay_qpn,
1042f7c4ffdaSLeon Romanovsky struct mlx5_ib_create_flow *ucmd)
1043f7c4ffdaSLeon Romanovsky {
1044f7c4ffdaSLeon Romanovsky struct mlx5_flow_table *ft = ft_prio->flow_table;
1045f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *handler;
1046f7c4ffdaSLeon Romanovsky struct mlx5_flow_act flow_act = {};
1047f7c4ffdaSLeon Romanovsky struct mlx5_flow_spec *spec;
1048f7c4ffdaSLeon Romanovsky struct mlx5_flow_destination dest_arr[2] = {};
1049f7c4ffdaSLeon Romanovsky struct mlx5_flow_destination *rule_dst = dest_arr;
1050f7c4ffdaSLeon Romanovsky const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
1051f7c4ffdaSLeon Romanovsky unsigned int spec_index;
1052f7c4ffdaSLeon Romanovsky u32 prev_type = 0;
1053f7c4ffdaSLeon Romanovsky int err = 0;
1054f7c4ffdaSLeon Romanovsky int dest_num = 0;
1055f7c4ffdaSLeon Romanovsky bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
1056f7c4ffdaSLeon Romanovsky
1057f7c4ffdaSLeon Romanovsky if (!is_valid_attr(dev->mdev, flow_attr))
1058f7c4ffdaSLeon Romanovsky return ERR_PTR(-EINVAL);
1059f7c4ffdaSLeon Romanovsky
1060f7c4ffdaSLeon Romanovsky if (dev->is_rep && is_egress)
1061f7c4ffdaSLeon Romanovsky return ERR_PTR(-EINVAL);
1062f7c4ffdaSLeon Romanovsky
1063f7c4ffdaSLeon Romanovsky spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1064f7c4ffdaSLeon Romanovsky handler = kzalloc(sizeof(*handler), GFP_KERNEL);
1065f7c4ffdaSLeon Romanovsky if (!handler || !spec) {
1066f7c4ffdaSLeon Romanovsky err = -ENOMEM;
1067f7c4ffdaSLeon Romanovsky goto free;
1068f7c4ffdaSLeon Romanovsky }
1069f7c4ffdaSLeon Romanovsky
1070f7c4ffdaSLeon Romanovsky INIT_LIST_HEAD(&handler->list);
1071f7c4ffdaSLeon Romanovsky
1072f7c4ffdaSLeon Romanovsky for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1073f7c4ffdaSLeon Romanovsky err = parse_flow_attr(dev->mdev, spec,
1074f7c4ffdaSLeon Romanovsky ib_flow, flow_attr, &flow_act,
1075f7c4ffdaSLeon Romanovsky prev_type);
1076f7c4ffdaSLeon Romanovsky if (err < 0)
1077f7c4ffdaSLeon Romanovsky goto free;
1078f7c4ffdaSLeon Romanovsky
1079f7c4ffdaSLeon Romanovsky prev_type = ((union ib_flow_spec *)ib_flow)->type;
1080f7c4ffdaSLeon Romanovsky ib_flow += ((union ib_flow_spec *)ib_flow)->size;
1081f7c4ffdaSLeon Romanovsky }
1082f7c4ffdaSLeon Romanovsky
1083f7c4ffdaSLeon Romanovsky if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) {
1084f7c4ffdaSLeon Romanovsky memcpy(&dest_arr[0], dst, sizeof(*dst));
1085f7c4ffdaSLeon Romanovsky dest_num++;
1086f7c4ffdaSLeon Romanovsky }
1087f7c4ffdaSLeon Romanovsky
1088f7c4ffdaSLeon Romanovsky if (!flow_is_multicast_only(flow_attr))
1089f7c4ffdaSLeon Romanovsky set_underlay_qp(dev, spec, underlay_qpn);
1090f7c4ffdaSLeon Romanovsky
1091e6ac9f60SMaor Gottlieb if (dev->is_rep && flow_attr->type != IB_FLOW_ATTR_SNIFFER) {
1092f7c4ffdaSLeon Romanovsky struct mlx5_eswitch_rep *rep;
1093f7c4ffdaSLeon Romanovsky
1094f7c4ffdaSLeon Romanovsky rep = dev->port[flow_attr->port - 1].rep;
1095f7c4ffdaSLeon Romanovsky if (!rep) {
1096f7c4ffdaSLeon Romanovsky err = -EINVAL;
1097f7c4ffdaSLeon Romanovsky goto free;
1098f7c4ffdaSLeon Romanovsky }
1099f7c4ffdaSLeon Romanovsky
1100f7c4ffdaSLeon Romanovsky mlx5_ib_set_rule_source_port(dev, spec, rep);
1101f7c4ffdaSLeon Romanovsky }
1102f7c4ffdaSLeon Romanovsky
1103f7c4ffdaSLeon Romanovsky spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
1104f7c4ffdaSLeon Romanovsky
1105f7c4ffdaSLeon Romanovsky if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1106f7c4ffdaSLeon Romanovsky struct mlx5_ib_mcounters *mcounters;
1107f7c4ffdaSLeon Romanovsky
1108f7c4ffdaSLeon Romanovsky err = mlx5_ib_flow_counters_set_data(flow_act.counters, ucmd);
1109f7c4ffdaSLeon Romanovsky if (err)
1110f7c4ffdaSLeon Romanovsky goto free;
1111f7c4ffdaSLeon Romanovsky
1112f7c4ffdaSLeon Romanovsky mcounters = to_mcounters(flow_act.counters);
1113f7c4ffdaSLeon Romanovsky handler->ibcounters = flow_act.counters;
1114f7c4ffdaSLeon Romanovsky dest_arr[dest_num].type =
1115f7c4ffdaSLeon Romanovsky MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1116f7c4ffdaSLeon Romanovsky dest_arr[dest_num].counter_id =
1117f7c4ffdaSLeon Romanovsky mlx5_fc_id(mcounters->hw_cntrs_hndl);
1118f7c4ffdaSLeon Romanovsky dest_num++;
1119f7c4ffdaSLeon Romanovsky }
1120f7c4ffdaSLeon Romanovsky
1121f7c4ffdaSLeon Romanovsky if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
1122f7c4ffdaSLeon Romanovsky if (!dest_num)
1123f7c4ffdaSLeon Romanovsky rule_dst = NULL;
1124f7c4ffdaSLeon Romanovsky } else {
1125f7c4ffdaSLeon Romanovsky if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1126f7c4ffdaSLeon Romanovsky flow_act.action |=
1127f7c4ffdaSLeon Romanovsky MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1128f7c4ffdaSLeon Romanovsky if (is_egress)
1129f7c4ffdaSLeon Romanovsky flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1130f7c4ffdaSLeon Romanovsky else if (dest_num)
1131f7c4ffdaSLeon Romanovsky flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1132f7c4ffdaSLeon Romanovsky }
1133f7c4ffdaSLeon Romanovsky
1134f7c4ffdaSLeon Romanovsky if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) &&
1135f7c4ffdaSLeon Romanovsky (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1136f7c4ffdaSLeon Romanovsky flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
1137f7c4ffdaSLeon Romanovsky mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
1138f7c4ffdaSLeon Romanovsky spec->flow_context.flow_tag, flow_attr->type);
1139f7c4ffdaSLeon Romanovsky err = -EINVAL;
1140f7c4ffdaSLeon Romanovsky goto free;
1141f7c4ffdaSLeon Romanovsky }
1142f7c4ffdaSLeon Romanovsky handler->rule = mlx5_add_flow_rules(ft, spec,
1143f7c4ffdaSLeon Romanovsky &flow_act,
1144f7c4ffdaSLeon Romanovsky rule_dst, dest_num);
1145f7c4ffdaSLeon Romanovsky
1146f7c4ffdaSLeon Romanovsky if (IS_ERR(handler->rule)) {
1147f7c4ffdaSLeon Romanovsky err = PTR_ERR(handler->rule);
1148f7c4ffdaSLeon Romanovsky goto free;
1149f7c4ffdaSLeon Romanovsky }
1150f7c4ffdaSLeon Romanovsky
1151f7c4ffdaSLeon Romanovsky ft_prio->refcount++;
1152f7c4ffdaSLeon Romanovsky handler->prio = ft_prio;
1153f7c4ffdaSLeon Romanovsky handler->dev = dev;
1154f7c4ffdaSLeon Romanovsky
1155f7c4ffdaSLeon Romanovsky ft_prio->flow_table = ft;
1156f7c4ffdaSLeon Romanovsky free:
1157f7c4ffdaSLeon Romanovsky if (err && handler) {
1158f7c4ffdaSLeon Romanovsky mlx5_ib_counters_clear_description(handler->ibcounters);
1159f7c4ffdaSLeon Romanovsky kfree(handler);
1160f7c4ffdaSLeon Romanovsky }
1161f7c4ffdaSLeon Romanovsky kvfree(spec);
1162f7c4ffdaSLeon Romanovsky return err ? ERR_PTR(err) : handler;
1163f7c4ffdaSLeon Romanovsky }
1164f7c4ffdaSLeon Romanovsky
create_flow_rule(struct mlx5_ib_dev * dev,struct mlx5_ib_flow_prio * ft_prio,const struct ib_flow_attr * flow_attr,struct mlx5_flow_destination * dst)1165f7c4ffdaSLeon Romanovsky static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1166f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *ft_prio,
1167f7c4ffdaSLeon Romanovsky const struct ib_flow_attr *flow_attr,
1168f7c4ffdaSLeon Romanovsky struct mlx5_flow_destination *dst)
1169f7c4ffdaSLeon Romanovsky {
1170f7c4ffdaSLeon Romanovsky return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
1171f7c4ffdaSLeon Romanovsky }
1172f7c4ffdaSLeon Romanovsky
1173f7c4ffdaSLeon Romanovsky enum {
1174f7c4ffdaSLeon Romanovsky LEFTOVERS_MC,
1175f7c4ffdaSLeon Romanovsky LEFTOVERS_UC,
1176f7c4ffdaSLeon Romanovsky };
1177f7c4ffdaSLeon Romanovsky
create_leftovers_rule(struct mlx5_ib_dev * dev,struct mlx5_ib_flow_prio * ft_prio,struct ib_flow_attr * flow_attr,struct mlx5_flow_destination * dst)1178f7c4ffdaSLeon Romanovsky static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
1179f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *ft_prio,
1180f7c4ffdaSLeon Romanovsky struct ib_flow_attr *flow_attr,
1181f7c4ffdaSLeon Romanovsky struct mlx5_flow_destination *dst)
1182f7c4ffdaSLeon Romanovsky {
1183f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *handler_ucast = NULL;
1184f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *handler = NULL;
1185f7c4ffdaSLeon Romanovsky
1186f7c4ffdaSLeon Romanovsky static struct {
1187f7c4ffdaSLeon Romanovsky struct ib_flow_attr flow_attr;
1188f7c4ffdaSLeon Romanovsky struct ib_flow_spec_eth eth_flow;
1189f7c4ffdaSLeon Romanovsky } leftovers_specs[] = {
1190f7c4ffdaSLeon Romanovsky [LEFTOVERS_MC] = {
1191f7c4ffdaSLeon Romanovsky .flow_attr = {
1192f7c4ffdaSLeon Romanovsky .num_of_specs = 1,
1193f7c4ffdaSLeon Romanovsky .size = sizeof(leftovers_specs[0])
1194f7c4ffdaSLeon Romanovsky },
1195f7c4ffdaSLeon Romanovsky .eth_flow = {
1196f7c4ffdaSLeon Romanovsky .type = IB_FLOW_SPEC_ETH,
1197f7c4ffdaSLeon Romanovsky .size = sizeof(struct ib_flow_spec_eth),
1198f7c4ffdaSLeon Romanovsky .mask = {.dst_mac = {0x1} },
1199f7c4ffdaSLeon Romanovsky .val = {.dst_mac = {0x1} }
1200f7c4ffdaSLeon Romanovsky }
1201f7c4ffdaSLeon Romanovsky },
1202f7c4ffdaSLeon Romanovsky [LEFTOVERS_UC] = {
1203f7c4ffdaSLeon Romanovsky .flow_attr = {
1204f7c4ffdaSLeon Romanovsky .num_of_specs = 1,
1205f7c4ffdaSLeon Romanovsky .size = sizeof(leftovers_specs[0])
1206f7c4ffdaSLeon Romanovsky },
1207f7c4ffdaSLeon Romanovsky .eth_flow = {
1208f7c4ffdaSLeon Romanovsky .type = IB_FLOW_SPEC_ETH,
1209f7c4ffdaSLeon Romanovsky .size = sizeof(struct ib_flow_spec_eth),
1210f7c4ffdaSLeon Romanovsky .mask = {.dst_mac = {0x1} },
1211f7c4ffdaSLeon Romanovsky .val = {.dst_mac = {} }
1212f7c4ffdaSLeon Romanovsky }
1213f7c4ffdaSLeon Romanovsky }
1214f7c4ffdaSLeon Romanovsky };
1215f7c4ffdaSLeon Romanovsky
1216f7c4ffdaSLeon Romanovsky handler = create_flow_rule(dev, ft_prio,
1217f7c4ffdaSLeon Romanovsky &leftovers_specs[LEFTOVERS_MC].flow_attr,
1218f7c4ffdaSLeon Romanovsky dst);
1219f7c4ffdaSLeon Romanovsky if (!IS_ERR(handler) &&
1220f7c4ffdaSLeon Romanovsky flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
1221f7c4ffdaSLeon Romanovsky handler_ucast = create_flow_rule(dev, ft_prio,
1222f7c4ffdaSLeon Romanovsky &leftovers_specs[LEFTOVERS_UC].flow_attr,
1223f7c4ffdaSLeon Romanovsky dst);
1224f7c4ffdaSLeon Romanovsky if (IS_ERR(handler_ucast)) {
1225f7c4ffdaSLeon Romanovsky mlx5_del_flow_rules(handler->rule);
1226f7c4ffdaSLeon Romanovsky ft_prio->refcount--;
1227f7c4ffdaSLeon Romanovsky kfree(handler);
1228f7c4ffdaSLeon Romanovsky handler = handler_ucast;
1229f7c4ffdaSLeon Romanovsky } else {
1230f7c4ffdaSLeon Romanovsky list_add(&handler_ucast->list, &handler->list);
1231f7c4ffdaSLeon Romanovsky }
1232f7c4ffdaSLeon Romanovsky }
1233f7c4ffdaSLeon Romanovsky
1234f7c4ffdaSLeon Romanovsky return handler;
1235f7c4ffdaSLeon Romanovsky }
1236f7c4ffdaSLeon Romanovsky
create_sniffer_rule(struct mlx5_ib_dev * dev,struct mlx5_ib_flow_prio * ft_rx,struct mlx5_ib_flow_prio * ft_tx,struct mlx5_flow_destination * dst)1237f7c4ffdaSLeon Romanovsky static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
1238f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *ft_rx,
1239f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *ft_tx,
1240f7c4ffdaSLeon Romanovsky struct mlx5_flow_destination *dst)
1241f7c4ffdaSLeon Romanovsky {
1242f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *handler_rx;
1243f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *handler_tx;
1244f7c4ffdaSLeon Romanovsky int err;
1245f7c4ffdaSLeon Romanovsky static const struct ib_flow_attr flow_attr = {
1246f7c4ffdaSLeon Romanovsky .num_of_specs = 0,
1247e6ac9f60SMaor Gottlieb .type = IB_FLOW_ATTR_SNIFFER,
1248f7c4ffdaSLeon Romanovsky .size = sizeof(flow_attr)
1249f7c4ffdaSLeon Romanovsky };
1250f7c4ffdaSLeon Romanovsky
1251f7c4ffdaSLeon Romanovsky handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
1252f7c4ffdaSLeon Romanovsky if (IS_ERR(handler_rx)) {
1253f7c4ffdaSLeon Romanovsky err = PTR_ERR(handler_rx);
1254f7c4ffdaSLeon Romanovsky goto err;
1255f7c4ffdaSLeon Romanovsky }
1256f7c4ffdaSLeon Romanovsky
1257f7c4ffdaSLeon Romanovsky handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
1258f7c4ffdaSLeon Romanovsky if (IS_ERR(handler_tx)) {
1259f7c4ffdaSLeon Romanovsky err = PTR_ERR(handler_tx);
1260f7c4ffdaSLeon Romanovsky goto err_tx;
1261f7c4ffdaSLeon Romanovsky }
1262f7c4ffdaSLeon Romanovsky
1263f7c4ffdaSLeon Romanovsky list_add(&handler_tx->list, &handler_rx->list);
1264f7c4ffdaSLeon Romanovsky
1265f7c4ffdaSLeon Romanovsky return handler_rx;
1266f7c4ffdaSLeon Romanovsky
1267f7c4ffdaSLeon Romanovsky err_tx:
1268f7c4ffdaSLeon Romanovsky mlx5_del_flow_rules(handler_rx->rule);
1269f7c4ffdaSLeon Romanovsky ft_rx->refcount--;
1270f7c4ffdaSLeon Romanovsky kfree(handler_rx);
1271f7c4ffdaSLeon Romanovsky err:
1272f7c4ffdaSLeon Romanovsky return ERR_PTR(err);
1273f7c4ffdaSLeon Romanovsky }
1274f7c4ffdaSLeon Romanovsky
mlx5_ib_create_flow(struct ib_qp * qp,struct ib_flow_attr * flow_attr,struct ib_udata * udata)1275f7c4ffdaSLeon Romanovsky static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
1276f7c4ffdaSLeon Romanovsky struct ib_flow_attr *flow_attr,
1277f7c4ffdaSLeon Romanovsky struct ib_udata *udata)
1278f7c4ffdaSLeon Romanovsky {
1279f7c4ffdaSLeon Romanovsky struct mlx5_ib_dev *dev = to_mdev(qp->device);
1280f7c4ffdaSLeon Romanovsky struct mlx5_ib_qp *mqp = to_mqp(qp);
1281f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *handler = NULL;
1282f7c4ffdaSLeon Romanovsky struct mlx5_flow_destination *dst = NULL;
1283f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
1284f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *ft_prio;
1285f7c4ffdaSLeon Romanovsky bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
1286f7c4ffdaSLeon Romanovsky struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
1287f7c4ffdaSLeon Romanovsky size_t min_ucmd_sz, required_ucmd_sz;
1288f7c4ffdaSLeon Romanovsky int err;
1289f7c4ffdaSLeon Romanovsky int underlay_qpn;
1290f7c4ffdaSLeon Romanovsky
1291f7c4ffdaSLeon Romanovsky if (udata && udata->inlen) {
129270c1430fSLeon Romanovsky min_ucmd_sz = offsetofend(struct mlx5_ib_create_flow, reserved);
1293f7c4ffdaSLeon Romanovsky if (udata->inlen < min_ucmd_sz)
1294f7c4ffdaSLeon Romanovsky return ERR_PTR(-EOPNOTSUPP);
1295f7c4ffdaSLeon Romanovsky
1296f7c4ffdaSLeon Romanovsky err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
1297f7c4ffdaSLeon Romanovsky if (err)
1298f7c4ffdaSLeon Romanovsky return ERR_PTR(err);
1299f7c4ffdaSLeon Romanovsky
1300f7c4ffdaSLeon Romanovsky /* currently supports only one counters data */
1301f7c4ffdaSLeon Romanovsky if (ucmd_hdr.ncounters_data > 1)
1302f7c4ffdaSLeon Romanovsky return ERR_PTR(-EINVAL);
1303f7c4ffdaSLeon Romanovsky
1304f7c4ffdaSLeon Romanovsky required_ucmd_sz = min_ucmd_sz +
1305f7c4ffdaSLeon Romanovsky sizeof(struct mlx5_ib_flow_counters_data) *
1306f7c4ffdaSLeon Romanovsky ucmd_hdr.ncounters_data;
1307f7c4ffdaSLeon Romanovsky if (udata->inlen > required_ucmd_sz &&
1308f7c4ffdaSLeon Romanovsky !ib_is_udata_cleared(udata, required_ucmd_sz,
1309f7c4ffdaSLeon Romanovsky udata->inlen - required_ucmd_sz))
1310f7c4ffdaSLeon Romanovsky return ERR_PTR(-EOPNOTSUPP);
1311f7c4ffdaSLeon Romanovsky
1312f7c4ffdaSLeon Romanovsky ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
1313f7c4ffdaSLeon Romanovsky if (!ucmd)
1314f7c4ffdaSLeon Romanovsky return ERR_PTR(-ENOMEM);
1315f7c4ffdaSLeon Romanovsky
1316f7c4ffdaSLeon Romanovsky err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
1317f7c4ffdaSLeon Romanovsky if (err)
1318f7c4ffdaSLeon Romanovsky goto free_ucmd;
1319f7c4ffdaSLeon Romanovsky }
1320f7c4ffdaSLeon Romanovsky
1321f7c4ffdaSLeon Romanovsky if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
1322f7c4ffdaSLeon Romanovsky err = -ENOMEM;
1323f7c4ffdaSLeon Romanovsky goto free_ucmd;
1324f7c4ffdaSLeon Romanovsky }
1325f7c4ffdaSLeon Romanovsky
13262adcb4c5SMaor Gottlieb if (flow_attr->flags &
13272adcb4c5SMaor Gottlieb ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
1328f7c4ffdaSLeon Romanovsky err = -EINVAL;
1329f7c4ffdaSLeon Romanovsky goto free_ucmd;
1330f7c4ffdaSLeon Romanovsky }
1331f7c4ffdaSLeon Romanovsky
1332f7c4ffdaSLeon Romanovsky if (is_egress &&
1333f7c4ffdaSLeon Romanovsky (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1334f7c4ffdaSLeon Romanovsky flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
1335f7c4ffdaSLeon Romanovsky err = -EINVAL;
1336f7c4ffdaSLeon Romanovsky goto free_ucmd;
1337f7c4ffdaSLeon Romanovsky }
1338f7c4ffdaSLeon Romanovsky
1339f7c4ffdaSLeon Romanovsky dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1340f7c4ffdaSLeon Romanovsky if (!dst) {
1341f7c4ffdaSLeon Romanovsky err = -ENOMEM;
1342f7c4ffdaSLeon Romanovsky goto free_ucmd;
1343f7c4ffdaSLeon Romanovsky }
1344f7c4ffdaSLeon Romanovsky
1345f7c4ffdaSLeon Romanovsky mutex_lock(&dev->flow_db->lock);
1346f7c4ffdaSLeon Romanovsky
1347f7c4ffdaSLeon Romanovsky ft_prio = get_flow_table(dev, flow_attr,
1348f7c4ffdaSLeon Romanovsky is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
1349f7c4ffdaSLeon Romanovsky if (IS_ERR(ft_prio)) {
1350f7c4ffdaSLeon Romanovsky err = PTR_ERR(ft_prio);
1351f7c4ffdaSLeon Romanovsky goto unlock;
1352f7c4ffdaSLeon Romanovsky }
1353f7c4ffdaSLeon Romanovsky if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
1354f7c4ffdaSLeon Romanovsky ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
1355f7c4ffdaSLeon Romanovsky if (IS_ERR(ft_prio_tx)) {
1356f7c4ffdaSLeon Romanovsky err = PTR_ERR(ft_prio_tx);
1357f7c4ffdaSLeon Romanovsky ft_prio_tx = NULL;
1358f7c4ffdaSLeon Romanovsky goto destroy_ft;
1359f7c4ffdaSLeon Romanovsky }
1360f7c4ffdaSLeon Romanovsky }
1361f7c4ffdaSLeon Romanovsky
1362f7c4ffdaSLeon Romanovsky if (is_egress) {
1363f7c4ffdaSLeon Romanovsky dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
1364f7c4ffdaSLeon Romanovsky } else {
1365f7c4ffdaSLeon Romanovsky dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1366f7c4ffdaSLeon Romanovsky if (mqp->is_rss)
1367f7c4ffdaSLeon Romanovsky dst->tir_num = mqp->rss_qp.tirn;
1368f7c4ffdaSLeon Romanovsky else
1369f7c4ffdaSLeon Romanovsky dst->tir_num = mqp->raw_packet_qp.rq.tirn;
1370f7c4ffdaSLeon Romanovsky }
1371f7c4ffdaSLeon Romanovsky
1372156f3789SLeon Romanovsky switch (flow_attr->type) {
1373156f3789SLeon Romanovsky case IB_FLOW_ATTR_NORMAL:
1374f7c4ffdaSLeon Romanovsky underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ?
1375f7c4ffdaSLeon Romanovsky mqp->underlay_qpn :
1376f7c4ffdaSLeon Romanovsky 0;
1377f7c4ffdaSLeon Romanovsky handler = _create_flow_rule(dev, ft_prio, flow_attr, dst,
1378f7c4ffdaSLeon Romanovsky underlay_qpn, ucmd);
1379156f3789SLeon Romanovsky break;
1380156f3789SLeon Romanovsky case IB_FLOW_ATTR_ALL_DEFAULT:
1381156f3789SLeon Romanovsky case IB_FLOW_ATTR_MC_DEFAULT:
1382156f3789SLeon Romanovsky handler = create_leftovers_rule(dev, ft_prio, flow_attr, dst);
1383156f3789SLeon Romanovsky break;
1384156f3789SLeon Romanovsky case IB_FLOW_ATTR_SNIFFER:
1385f7c4ffdaSLeon Romanovsky handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
1386156f3789SLeon Romanovsky break;
1387156f3789SLeon Romanovsky default:
1388f7c4ffdaSLeon Romanovsky err = -EINVAL;
1389f7c4ffdaSLeon Romanovsky goto destroy_ft;
1390f7c4ffdaSLeon Romanovsky }
1391f7c4ffdaSLeon Romanovsky
1392f7c4ffdaSLeon Romanovsky if (IS_ERR(handler)) {
1393f7c4ffdaSLeon Romanovsky err = PTR_ERR(handler);
1394f7c4ffdaSLeon Romanovsky handler = NULL;
1395f7c4ffdaSLeon Romanovsky goto destroy_ft;
1396f7c4ffdaSLeon Romanovsky }
1397f7c4ffdaSLeon Romanovsky
1398f7c4ffdaSLeon Romanovsky mutex_unlock(&dev->flow_db->lock);
1399f7c4ffdaSLeon Romanovsky kfree(dst);
1400f7c4ffdaSLeon Romanovsky kfree(ucmd);
1401f7c4ffdaSLeon Romanovsky
1402f7c4ffdaSLeon Romanovsky return &handler->ibflow;
1403f7c4ffdaSLeon Romanovsky
1404f7c4ffdaSLeon Romanovsky destroy_ft:
1405f7c4ffdaSLeon Romanovsky put_flow_table(dev, ft_prio, false);
1406f7c4ffdaSLeon Romanovsky if (ft_prio_tx)
1407f7c4ffdaSLeon Romanovsky put_flow_table(dev, ft_prio_tx, false);
1408f7c4ffdaSLeon Romanovsky unlock:
1409f7c4ffdaSLeon Romanovsky mutex_unlock(&dev->flow_db->lock);
1410f7c4ffdaSLeon Romanovsky kfree(dst);
1411f7c4ffdaSLeon Romanovsky free_ucmd:
1412f7c4ffdaSLeon Romanovsky kfree(ucmd);
1413f7c4ffdaSLeon Romanovsky return ERR_PTR(err);
1414f7c4ffdaSLeon Romanovsky }
1415f7c4ffdaSLeon Romanovsky
1416f7c4ffdaSLeon Romanovsky static struct mlx5_ib_flow_prio *
_get_flow_table(struct mlx5_ib_dev * dev,u16 user_priority,enum mlx5_flow_namespace_type ns_type,bool mcast)1417e74d2e4dSMark Bloch _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
1418e74d2e4dSMark Bloch enum mlx5_flow_namespace_type ns_type,
1419f7c4ffdaSLeon Romanovsky bool mcast)
1420f7c4ffdaSLeon Romanovsky {
1421f7c4ffdaSLeon Romanovsky struct mlx5_flow_namespace *ns = NULL;
1422f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *prio = NULL;
1423f7c4ffdaSLeon Romanovsky int max_table_size = 0;
1424f7c4ffdaSLeon Romanovsky bool esw_encap;
1425f7c4ffdaSLeon Romanovsky u32 flags = 0;
1426f7c4ffdaSLeon Romanovsky int priority;
1427f7c4ffdaSLeon Romanovsky
1428f7c4ffdaSLeon Romanovsky if (mcast)
1429f7c4ffdaSLeon Romanovsky priority = MLX5_IB_FLOW_MCAST_PRIO;
1430f7c4ffdaSLeon Romanovsky else
1431e74d2e4dSMark Bloch priority = ib_prio_to_core_prio(user_priority, false);
1432f7c4ffdaSLeon Romanovsky
1433f7c4ffdaSLeon Romanovsky esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
1434f7c4ffdaSLeon Romanovsky DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1435e74d2e4dSMark Bloch switch (ns_type) {
1436156f3789SLeon Romanovsky case MLX5_FLOW_NAMESPACE_BYPASS:
1437156f3789SLeon Romanovsky max_table_size = BIT(
1438156f3789SLeon Romanovsky MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size));
1439f7c4ffdaSLeon Romanovsky if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
1440f7c4ffdaSLeon Romanovsky flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
1441f7c4ffdaSLeon Romanovsky if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
1442f7c4ffdaSLeon Romanovsky reformat_l3_tunnel_to_l2) &&
1443f7c4ffdaSLeon Romanovsky !esw_encap)
1444f7c4ffdaSLeon Romanovsky flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1445156f3789SLeon Romanovsky break;
1446156f3789SLeon Romanovsky case MLX5_FLOW_NAMESPACE_EGRESS:
1447f7c4ffdaSLeon Romanovsky max_table_size = BIT(
1448f7c4ffdaSLeon Romanovsky MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
1449156f3789SLeon Romanovsky if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) &&
1450156f3789SLeon Romanovsky !esw_encap)
1451f7c4ffdaSLeon Romanovsky flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1452156f3789SLeon Romanovsky break;
145322c3f2f5SMaor Gottlieb case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
1454f7c4ffdaSLeon Romanovsky max_table_size = BIT(
1455f7c4ffdaSLeon Romanovsky MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
1456f7c4ffdaSLeon Romanovsky if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
1457f7c4ffdaSLeon Romanovsky flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
1458156f3789SLeon Romanovsky if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev,
1459156f3789SLeon Romanovsky reformat_l3_tunnel_to_l2) &&
1460f7c4ffdaSLeon Romanovsky esw_encap)
1461f7c4ffdaSLeon Romanovsky flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1462e74d2e4dSMark Bloch priority = user_priority;
1463156f3789SLeon Romanovsky break;
1464156f3789SLeon Romanovsky case MLX5_FLOW_NAMESPACE_RDMA_RX:
1465156f3789SLeon Romanovsky max_table_size = BIT(
1466156f3789SLeon Romanovsky MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size));
1467e74d2e4dSMark Bloch priority = user_priority;
1468156f3789SLeon Romanovsky break;
1469156f3789SLeon Romanovsky case MLX5_FLOW_NAMESPACE_RDMA_TX:
1470156f3789SLeon Romanovsky max_table_size = BIT(
1471156f3789SLeon Romanovsky MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size));
1472e74d2e4dSMark Bloch priority = user_priority;
1473156f3789SLeon Romanovsky break;
1474156f3789SLeon Romanovsky default:
1475156f3789SLeon Romanovsky break;
1476f7c4ffdaSLeon Romanovsky }
1477f7c4ffdaSLeon Romanovsky
1478f7c4ffdaSLeon Romanovsky max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
1479f7c4ffdaSLeon Romanovsky
1480e74d2e4dSMark Bloch ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
1481f7c4ffdaSLeon Romanovsky if (!ns)
1482f7c4ffdaSLeon Romanovsky return ERR_PTR(-EOPNOTSUPP);
1483f7c4ffdaSLeon Romanovsky
1484e74d2e4dSMark Bloch switch (ns_type) {
1485156f3789SLeon Romanovsky case MLX5_FLOW_NAMESPACE_BYPASS:
1486f7c4ffdaSLeon Romanovsky prio = &dev->flow_db->prios[priority];
1487156f3789SLeon Romanovsky break;
1488156f3789SLeon Romanovsky case MLX5_FLOW_NAMESPACE_EGRESS:
1489f7c4ffdaSLeon Romanovsky prio = &dev->flow_db->egress_prios[priority];
1490156f3789SLeon Romanovsky break;
149122c3f2f5SMaor Gottlieb case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
1492a973f86bSMaor Gottlieb prio = &dev->flow_db->fdb[priority];
1493156f3789SLeon Romanovsky break;
1494156f3789SLeon Romanovsky case MLX5_FLOW_NAMESPACE_RDMA_RX:
1495f7c4ffdaSLeon Romanovsky prio = &dev->flow_db->rdma_rx[priority];
1496156f3789SLeon Romanovsky break;
1497156f3789SLeon Romanovsky case MLX5_FLOW_NAMESPACE_RDMA_TX:
1498f7c4ffdaSLeon Romanovsky prio = &dev->flow_db->rdma_tx[priority];
1499156f3789SLeon Romanovsky break;
1500156f3789SLeon Romanovsky default: return ERR_PTR(-EINVAL);
1501156f3789SLeon Romanovsky }
1502f7c4ffdaSLeon Romanovsky
1503f7c4ffdaSLeon Romanovsky if (!prio)
1504f7c4ffdaSLeon Romanovsky return ERR_PTR(-EINVAL);
1505f7c4ffdaSLeon Romanovsky
1506f7c4ffdaSLeon Romanovsky if (prio->flow_table)
1507f7c4ffdaSLeon Romanovsky return prio;
1508f7c4ffdaSLeon Romanovsky
15090c6ab0caSMark Bloch return _get_prio(dev, ns, prio, priority, max_table_size,
1510f7c4ffdaSLeon Romanovsky MLX5_FS_MAX_TYPES, flags);
1511f7c4ffdaSLeon Romanovsky }
1512f7c4ffdaSLeon Romanovsky
1513f7c4ffdaSLeon Romanovsky static struct mlx5_ib_flow_handler *
_create_raw_flow_rule(struct mlx5_ib_dev * dev,struct mlx5_ib_flow_prio * ft_prio,struct mlx5_flow_destination * dst,struct mlx5_ib_flow_matcher * fs_matcher,struct mlx5_flow_context * flow_context,struct mlx5_flow_act * flow_act,void * cmd_in,int inlen,int dst_num)1514f7c4ffdaSLeon Romanovsky _create_raw_flow_rule(struct mlx5_ib_dev *dev,
1515f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *ft_prio,
1516f7c4ffdaSLeon Romanovsky struct mlx5_flow_destination *dst,
1517f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_matcher *fs_matcher,
1518f7c4ffdaSLeon Romanovsky struct mlx5_flow_context *flow_context,
1519f7c4ffdaSLeon Romanovsky struct mlx5_flow_act *flow_act,
1520f7c4ffdaSLeon Romanovsky void *cmd_in, int inlen,
1521f7c4ffdaSLeon Romanovsky int dst_num)
1522f7c4ffdaSLeon Romanovsky {
1523f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *handler;
1524f7c4ffdaSLeon Romanovsky struct mlx5_flow_spec *spec;
1525f7c4ffdaSLeon Romanovsky struct mlx5_flow_table *ft = ft_prio->flow_table;
1526f7c4ffdaSLeon Romanovsky int err = 0;
1527f7c4ffdaSLeon Romanovsky
1528f7c4ffdaSLeon Romanovsky spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1529f7c4ffdaSLeon Romanovsky handler = kzalloc(sizeof(*handler), GFP_KERNEL);
1530f7c4ffdaSLeon Romanovsky if (!handler || !spec) {
1531f7c4ffdaSLeon Romanovsky err = -ENOMEM;
1532f7c4ffdaSLeon Romanovsky goto free;
1533f7c4ffdaSLeon Romanovsky }
1534f7c4ffdaSLeon Romanovsky
1535f7c4ffdaSLeon Romanovsky INIT_LIST_HEAD(&handler->list);
1536f7c4ffdaSLeon Romanovsky
1537f7c4ffdaSLeon Romanovsky memcpy(spec->match_value, cmd_in, inlen);
1538f7c4ffdaSLeon Romanovsky memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
1539f7c4ffdaSLeon Romanovsky fs_matcher->mask_len);
1540f7c4ffdaSLeon Romanovsky spec->match_criteria_enable = fs_matcher->match_criteria_enable;
1541f7c4ffdaSLeon Romanovsky spec->flow_context = *flow_context;
1542f7c4ffdaSLeon Romanovsky
1543f7c4ffdaSLeon Romanovsky handler->rule = mlx5_add_flow_rules(ft, spec,
1544f7c4ffdaSLeon Romanovsky flow_act, dst, dst_num);
1545f7c4ffdaSLeon Romanovsky
1546f7c4ffdaSLeon Romanovsky if (IS_ERR(handler->rule)) {
1547f7c4ffdaSLeon Romanovsky err = PTR_ERR(handler->rule);
1548f7c4ffdaSLeon Romanovsky goto free;
1549f7c4ffdaSLeon Romanovsky }
1550f7c4ffdaSLeon Romanovsky
1551f7c4ffdaSLeon Romanovsky ft_prio->refcount++;
1552f7c4ffdaSLeon Romanovsky handler->prio = ft_prio;
1553f7c4ffdaSLeon Romanovsky handler->dev = dev;
1554f7c4ffdaSLeon Romanovsky ft_prio->flow_table = ft;
1555f7c4ffdaSLeon Romanovsky
1556f7c4ffdaSLeon Romanovsky free:
1557f7c4ffdaSLeon Romanovsky if (err)
1558f7c4ffdaSLeon Romanovsky kfree(handler);
1559f7c4ffdaSLeon Romanovsky kvfree(spec);
1560f7c4ffdaSLeon Romanovsky return err ? ERR_PTR(err) : handler;
1561f7c4ffdaSLeon Romanovsky }
1562f7c4ffdaSLeon Romanovsky
raw_fs_is_multicast(struct mlx5_ib_flow_matcher * fs_matcher,void * match_v)1563f7c4ffdaSLeon Romanovsky static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
1564f7c4ffdaSLeon Romanovsky void *match_v)
1565f7c4ffdaSLeon Romanovsky {
1566f7c4ffdaSLeon Romanovsky void *match_c;
1567f7c4ffdaSLeon Romanovsky void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
1568f7c4ffdaSLeon Romanovsky void *dmac, *dmac_mask;
1569f7c4ffdaSLeon Romanovsky void *ipv4, *ipv4_mask;
1570f7c4ffdaSLeon Romanovsky
1571f7c4ffdaSLeon Romanovsky if (!(fs_matcher->match_criteria_enable &
1572f7c4ffdaSLeon Romanovsky (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
1573f7c4ffdaSLeon Romanovsky return false;
1574f7c4ffdaSLeon Romanovsky
1575f7c4ffdaSLeon Romanovsky match_c = fs_matcher->matcher_mask.match_params;
1576f7c4ffdaSLeon Romanovsky match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
1577f7c4ffdaSLeon Romanovsky outer_headers);
1578f7c4ffdaSLeon Romanovsky match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
1579f7c4ffdaSLeon Romanovsky outer_headers);
1580f7c4ffdaSLeon Romanovsky
1581f7c4ffdaSLeon Romanovsky dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
1582f7c4ffdaSLeon Romanovsky dmac_47_16);
1583f7c4ffdaSLeon Romanovsky dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
1584f7c4ffdaSLeon Romanovsky dmac_47_16);
1585f7c4ffdaSLeon Romanovsky
1586f7c4ffdaSLeon Romanovsky if (is_multicast_ether_addr(dmac) &&
1587f7c4ffdaSLeon Romanovsky is_multicast_ether_addr(dmac_mask))
1588f7c4ffdaSLeon Romanovsky return true;
1589f7c4ffdaSLeon Romanovsky
1590f7c4ffdaSLeon Romanovsky ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
1591f7c4ffdaSLeon Romanovsky dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1592f7c4ffdaSLeon Romanovsky
1593f7c4ffdaSLeon Romanovsky ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
1594f7c4ffdaSLeon Romanovsky dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1595f7c4ffdaSLeon Romanovsky
1596f7c4ffdaSLeon Romanovsky if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
1597f7c4ffdaSLeon Romanovsky ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
1598f7c4ffdaSLeon Romanovsky return true;
1599f7c4ffdaSLeon Romanovsky
1600f7c4ffdaSLeon Romanovsky return false;
1601f7c4ffdaSLeon Romanovsky }
1602f7c4ffdaSLeon Romanovsky
raw_fs_rule_add(struct mlx5_ib_dev * dev,struct mlx5_ib_flow_matcher * fs_matcher,struct mlx5_flow_context * flow_context,struct mlx5_flow_act * flow_act,u32 counter_id,void * cmd_in,int inlen,int dest_id,int dest_type)1603f7c4ffdaSLeon Romanovsky static struct mlx5_ib_flow_handler *raw_fs_rule_add(
1604f7c4ffdaSLeon Romanovsky struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
1605f7c4ffdaSLeon Romanovsky struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act,
1606f7c4ffdaSLeon Romanovsky u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type)
1607f7c4ffdaSLeon Romanovsky {
1608f7c4ffdaSLeon Romanovsky struct mlx5_flow_destination *dst;
1609f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_prio *ft_prio;
1610f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *handler;
1611f7c4ffdaSLeon Romanovsky int dst_num = 0;
1612f7c4ffdaSLeon Romanovsky bool mcast;
1613f7c4ffdaSLeon Romanovsky int err;
1614f7c4ffdaSLeon Romanovsky
1615f7c4ffdaSLeon Romanovsky if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
1616f7c4ffdaSLeon Romanovsky return ERR_PTR(-EOPNOTSUPP);
1617f7c4ffdaSLeon Romanovsky
1618f7c4ffdaSLeon Romanovsky if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
1619f7c4ffdaSLeon Romanovsky return ERR_PTR(-ENOMEM);
1620f7c4ffdaSLeon Romanovsky
1621f7c4ffdaSLeon Romanovsky dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
1622f7c4ffdaSLeon Romanovsky if (!dst)
1623f7c4ffdaSLeon Romanovsky return ERR_PTR(-ENOMEM);
1624f7c4ffdaSLeon Romanovsky
1625f7c4ffdaSLeon Romanovsky mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
1626f7c4ffdaSLeon Romanovsky mutex_lock(&dev->flow_db->lock);
1627f7c4ffdaSLeon Romanovsky
1628e74d2e4dSMark Bloch ft_prio = _get_flow_table(dev, fs_matcher->priority,
1629e74d2e4dSMark Bloch fs_matcher->ns_type, mcast);
1630f7c4ffdaSLeon Romanovsky if (IS_ERR(ft_prio)) {
1631f7c4ffdaSLeon Romanovsky err = PTR_ERR(ft_prio);
1632f7c4ffdaSLeon Romanovsky goto unlock;
1633f7c4ffdaSLeon Romanovsky }
1634f7c4ffdaSLeon Romanovsky
1635156f3789SLeon Romanovsky switch (dest_type) {
1636156f3789SLeon Romanovsky case MLX5_FLOW_DESTINATION_TYPE_TIR:
1637f7c4ffdaSLeon Romanovsky dst[dst_num].type = dest_type;
1638f7c4ffdaSLeon Romanovsky dst[dst_num++].tir_num = dest_id;
1639f7c4ffdaSLeon Romanovsky flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1640156f3789SLeon Romanovsky break;
1641156f3789SLeon Romanovsky case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
1642f7c4ffdaSLeon Romanovsky dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
1643f7c4ffdaSLeon Romanovsky dst[dst_num++].ft_num = dest_id;
1644f7c4ffdaSLeon Romanovsky flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1645156f3789SLeon Romanovsky break;
1646156f3789SLeon Romanovsky case MLX5_FLOW_DESTINATION_TYPE_PORT:
1647f7c4ffdaSLeon Romanovsky dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
1648f7c4ffdaSLeon Romanovsky flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1649156f3789SLeon Romanovsky break;
1650156f3789SLeon Romanovsky default:
1651156f3789SLeon Romanovsky break;
1652f7c4ffdaSLeon Romanovsky }
1653f7c4ffdaSLeon Romanovsky
1654f7c4ffdaSLeon Romanovsky if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1655f7c4ffdaSLeon Romanovsky dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1656f7c4ffdaSLeon Romanovsky dst[dst_num].counter_id = counter_id;
1657f7c4ffdaSLeon Romanovsky dst_num++;
1658f7c4ffdaSLeon Romanovsky }
1659f7c4ffdaSLeon Romanovsky
1660c7370080SMaor Gottlieb handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
1661c7370080SMaor Gottlieb fs_matcher, flow_context, flow_act,
1662f7c4ffdaSLeon Romanovsky cmd_in, inlen, dst_num);
1663f7c4ffdaSLeon Romanovsky
1664f7c4ffdaSLeon Romanovsky if (IS_ERR(handler)) {
1665f7c4ffdaSLeon Romanovsky err = PTR_ERR(handler);
1666f7c4ffdaSLeon Romanovsky goto destroy_ft;
1667f7c4ffdaSLeon Romanovsky }
1668f7c4ffdaSLeon Romanovsky
1669f7c4ffdaSLeon Romanovsky mutex_unlock(&dev->flow_db->lock);
1670f7c4ffdaSLeon Romanovsky atomic_inc(&fs_matcher->usecnt);
1671f7c4ffdaSLeon Romanovsky handler->flow_matcher = fs_matcher;
1672f7c4ffdaSLeon Romanovsky
1673f7c4ffdaSLeon Romanovsky kfree(dst);
1674f7c4ffdaSLeon Romanovsky
1675f7c4ffdaSLeon Romanovsky return handler;
1676f7c4ffdaSLeon Romanovsky
1677f7c4ffdaSLeon Romanovsky destroy_ft:
1678f7c4ffdaSLeon Romanovsky put_flow_table(dev, ft_prio, false);
1679f7c4ffdaSLeon Romanovsky unlock:
1680f7c4ffdaSLeon Romanovsky mutex_unlock(&dev->flow_db->lock);
1681f7c4ffdaSLeon Romanovsky kfree(dst);
1682f7c4ffdaSLeon Romanovsky
1683f7c4ffdaSLeon Romanovsky return ERR_PTR(err);
1684f7c4ffdaSLeon Romanovsky }
1685f7c4ffdaSLeon Romanovsky
destroy_flow_action_raw(struct mlx5_ib_flow_action * maction)1686f7c4ffdaSLeon Romanovsky static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
1687f7c4ffdaSLeon Romanovsky {
1688f7c4ffdaSLeon Romanovsky switch (maction->flow_action_raw.sub_type) {
1689f7c4ffdaSLeon Romanovsky case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
1690f7c4ffdaSLeon Romanovsky mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
1691f7c4ffdaSLeon Romanovsky maction->flow_action_raw.modify_hdr);
1692f7c4ffdaSLeon Romanovsky break;
1693f7c4ffdaSLeon Romanovsky case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
1694f7c4ffdaSLeon Romanovsky mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
1695f7c4ffdaSLeon Romanovsky maction->flow_action_raw.pkt_reformat);
1696f7c4ffdaSLeon Romanovsky break;
1697f7c4ffdaSLeon Romanovsky case MLX5_IB_FLOW_ACTION_DECAP:
1698f7c4ffdaSLeon Romanovsky break;
1699f7c4ffdaSLeon Romanovsky default:
1700f7c4ffdaSLeon Romanovsky break;
1701f7c4ffdaSLeon Romanovsky }
1702f7c4ffdaSLeon Romanovsky }
1703f7c4ffdaSLeon Romanovsky
mlx5_ib_destroy_flow_action(struct ib_flow_action * action)1704f7c4ffdaSLeon Romanovsky static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
1705f7c4ffdaSLeon Romanovsky {
1706f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_action *maction = to_mflow_act(action);
1707f7c4ffdaSLeon Romanovsky
1708f7c4ffdaSLeon Romanovsky switch (action->type) {
1709f7c4ffdaSLeon Romanovsky case IB_FLOW_ACTION_UNSPECIFIED:
1710f7c4ffdaSLeon Romanovsky destroy_flow_action_raw(maction);
1711f7c4ffdaSLeon Romanovsky break;
1712f7c4ffdaSLeon Romanovsky default:
1713f7c4ffdaSLeon Romanovsky WARN_ON(true);
1714f7c4ffdaSLeon Romanovsky break;
1715f7c4ffdaSLeon Romanovsky }
1716f7c4ffdaSLeon Romanovsky
1717f7c4ffdaSLeon Romanovsky kfree(maction);
1718f7c4ffdaSLeon Romanovsky return 0;
1719f7c4ffdaSLeon Romanovsky }
1720f7c4ffdaSLeon Romanovsky
1721f7c4ffdaSLeon Romanovsky static int
mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,enum mlx5_flow_namespace_type * namespace)1722f7c4ffdaSLeon Romanovsky mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
1723f7c4ffdaSLeon Romanovsky enum mlx5_flow_namespace_type *namespace)
1724f7c4ffdaSLeon Romanovsky {
1725f7c4ffdaSLeon Romanovsky switch (table_type) {
1726f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX:
1727f7c4ffdaSLeon Romanovsky *namespace = MLX5_FLOW_NAMESPACE_BYPASS;
1728f7c4ffdaSLeon Romanovsky break;
1729f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
1730f7c4ffdaSLeon Romanovsky *namespace = MLX5_FLOW_NAMESPACE_EGRESS;
1731f7c4ffdaSLeon Romanovsky break;
1732f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB:
173322c3f2f5SMaor Gottlieb *namespace = MLX5_FLOW_NAMESPACE_FDB_BYPASS;
1734f7c4ffdaSLeon Romanovsky break;
1735f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
1736f7c4ffdaSLeon Romanovsky *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
1737f7c4ffdaSLeon Romanovsky break;
1738f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX:
1739f7c4ffdaSLeon Romanovsky *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX;
1740f7c4ffdaSLeon Romanovsky break;
1741f7c4ffdaSLeon Romanovsky default:
1742f7c4ffdaSLeon Romanovsky return -EINVAL;
1743f7c4ffdaSLeon Romanovsky }
1744f7c4ffdaSLeon Romanovsky
1745f7c4ffdaSLeon Romanovsky return 0;
1746f7c4ffdaSLeon Romanovsky }
1747f7c4ffdaSLeon Romanovsky
1748f7c4ffdaSLeon Romanovsky static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
1749f7c4ffdaSLeon Romanovsky [MLX5_IB_FLOW_TYPE_NORMAL] = {
1750f7c4ffdaSLeon Romanovsky .type = UVERBS_ATTR_TYPE_PTR_IN,
1751f7c4ffdaSLeon Romanovsky .u.ptr = {
1752f7c4ffdaSLeon Romanovsky .len = sizeof(u16), /* data is priority */
1753f7c4ffdaSLeon Romanovsky .min_len = sizeof(u16),
1754f7c4ffdaSLeon Romanovsky }
1755f7c4ffdaSLeon Romanovsky },
1756f7c4ffdaSLeon Romanovsky [MLX5_IB_FLOW_TYPE_SNIFFER] = {
1757f7c4ffdaSLeon Romanovsky .type = UVERBS_ATTR_TYPE_PTR_IN,
1758f7c4ffdaSLeon Romanovsky UVERBS_ATTR_NO_DATA(),
1759f7c4ffdaSLeon Romanovsky },
1760f7c4ffdaSLeon Romanovsky [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = {
1761f7c4ffdaSLeon Romanovsky .type = UVERBS_ATTR_TYPE_PTR_IN,
1762f7c4ffdaSLeon Romanovsky UVERBS_ATTR_NO_DATA(),
1763f7c4ffdaSLeon Romanovsky },
1764f7c4ffdaSLeon Romanovsky [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = {
1765f7c4ffdaSLeon Romanovsky .type = UVERBS_ATTR_TYPE_PTR_IN,
1766f7c4ffdaSLeon Romanovsky UVERBS_ATTR_NO_DATA(),
1767f7c4ffdaSLeon Romanovsky },
1768f7c4ffdaSLeon Romanovsky };
1769f7c4ffdaSLeon Romanovsky
is_flow_dest(void * obj,int * dest_id,int * dest_type)1770f7c4ffdaSLeon Romanovsky static bool is_flow_dest(void *obj, int *dest_id, int *dest_type)
1771f7c4ffdaSLeon Romanovsky {
1772f7c4ffdaSLeon Romanovsky struct devx_obj *devx_obj = obj;
1773f7c4ffdaSLeon Romanovsky u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
1774f7c4ffdaSLeon Romanovsky
1775f7c4ffdaSLeon Romanovsky switch (opcode) {
1776f7c4ffdaSLeon Romanovsky case MLX5_CMD_OP_DESTROY_TIR:
1777f7c4ffdaSLeon Romanovsky *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1778f7c4ffdaSLeon Romanovsky *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
1779f7c4ffdaSLeon Romanovsky obj_id);
1780f7c4ffdaSLeon Romanovsky return true;
1781f7c4ffdaSLeon Romanovsky
1782f7c4ffdaSLeon Romanovsky case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
1783f7c4ffdaSLeon Romanovsky *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1784f7c4ffdaSLeon Romanovsky *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
1785f7c4ffdaSLeon Romanovsky table_id);
1786f7c4ffdaSLeon Romanovsky return true;
1787f7c4ffdaSLeon Romanovsky default:
1788f7c4ffdaSLeon Romanovsky return false;
1789f7c4ffdaSLeon Romanovsky }
1790f7c4ffdaSLeon Romanovsky }
1791f7c4ffdaSLeon Romanovsky
get_dests(struct uverbs_attr_bundle * attrs,struct mlx5_ib_flow_matcher * fs_matcher,int * dest_id,int * dest_type,struct ib_qp ** qp,u32 * flags)1792f7c4ffdaSLeon Romanovsky static int get_dests(struct uverbs_attr_bundle *attrs,
1793f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id,
1794f7c4ffdaSLeon Romanovsky int *dest_type, struct ib_qp **qp, u32 *flags)
1795f7c4ffdaSLeon Romanovsky {
1796f7c4ffdaSLeon Romanovsky bool dest_devx, dest_qp;
1797f7c4ffdaSLeon Romanovsky void *devx_obj;
1798f7c4ffdaSLeon Romanovsky int err;
1799f7c4ffdaSLeon Romanovsky
1800f7c4ffdaSLeon Romanovsky dest_devx = uverbs_attr_is_valid(attrs,
1801f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
1802f7c4ffdaSLeon Romanovsky dest_qp = uverbs_attr_is_valid(attrs,
1803f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
1804f7c4ffdaSLeon Romanovsky
1805f7c4ffdaSLeon Romanovsky *flags = 0;
1806f7c4ffdaSLeon Romanovsky err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
1807f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS |
1808f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP);
1809f7c4ffdaSLeon Romanovsky if (err)
1810f7c4ffdaSLeon Romanovsky return err;
1811f7c4ffdaSLeon Romanovsky
1812f7c4ffdaSLeon Romanovsky /* Both flags are not allowed */
1813f7c4ffdaSLeon Romanovsky if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS &&
1814f7c4ffdaSLeon Romanovsky *flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
1815f7c4ffdaSLeon Romanovsky return -EINVAL;
1816f7c4ffdaSLeon Romanovsky
1817f7c4ffdaSLeon Romanovsky if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
1818f7c4ffdaSLeon Romanovsky if (dest_devx && (dest_qp || *flags))
1819f7c4ffdaSLeon Romanovsky return -EINVAL;
1820f7c4ffdaSLeon Romanovsky else if (dest_qp && *flags)
1821f7c4ffdaSLeon Romanovsky return -EINVAL;
1822f7c4ffdaSLeon Romanovsky }
1823f7c4ffdaSLeon Romanovsky
1824f7c4ffdaSLeon Romanovsky /* Allow only DEVX object, drop as dest for FDB */
182522c3f2f5SMaor Gottlieb if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS &&
182622c3f2f5SMaor Gottlieb !(dest_devx || (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)))
1827f7c4ffdaSLeon Romanovsky return -EINVAL;
1828f7c4ffdaSLeon Romanovsky
1829f7c4ffdaSLeon Romanovsky /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
1830f7c4ffdaSLeon Romanovsky if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
1831f7c4ffdaSLeon Romanovsky ((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
1832f7c4ffdaSLeon Romanovsky return -EINVAL;
1833f7c4ffdaSLeon Romanovsky
1834f7c4ffdaSLeon Romanovsky *qp = NULL;
1835f7c4ffdaSLeon Romanovsky if (dest_devx) {
1836f7c4ffdaSLeon Romanovsky devx_obj =
1837f7c4ffdaSLeon Romanovsky uverbs_attr_get_obj(attrs,
1838f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
1839f7c4ffdaSLeon Romanovsky
1840f7c4ffdaSLeon Romanovsky /* Verify that the given DEVX object is a flow
1841f7c4ffdaSLeon Romanovsky * steering destination.
1842f7c4ffdaSLeon Romanovsky */
1843f7c4ffdaSLeon Romanovsky if (!is_flow_dest(devx_obj, dest_id, dest_type))
1844f7c4ffdaSLeon Romanovsky return -EINVAL;
1845f7c4ffdaSLeon Romanovsky /* Allow only flow table as dest when inserting to FDB or RDMA_RX */
184622c3f2f5SMaor Gottlieb if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS ||
1847f7c4ffdaSLeon Romanovsky fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
1848f7c4ffdaSLeon Romanovsky *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1849f7c4ffdaSLeon Romanovsky return -EINVAL;
1850f7c4ffdaSLeon Romanovsky } else if (dest_qp) {
1851f7c4ffdaSLeon Romanovsky struct mlx5_ib_qp *mqp;
1852f7c4ffdaSLeon Romanovsky
1853f7c4ffdaSLeon Romanovsky *qp = uverbs_attr_get_obj(attrs,
1854f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
1855f7c4ffdaSLeon Romanovsky if (IS_ERR(*qp))
1856f7c4ffdaSLeon Romanovsky return PTR_ERR(*qp);
1857f7c4ffdaSLeon Romanovsky
1858f7c4ffdaSLeon Romanovsky if ((*qp)->qp_type != IB_QPT_RAW_PACKET)
1859f7c4ffdaSLeon Romanovsky return -EINVAL;
1860f7c4ffdaSLeon Romanovsky
1861f7c4ffdaSLeon Romanovsky mqp = to_mqp(*qp);
1862f7c4ffdaSLeon Romanovsky if (mqp->is_rss)
1863f7c4ffdaSLeon Romanovsky *dest_id = mqp->rss_qp.tirn;
1864f7c4ffdaSLeon Romanovsky else
1865f7c4ffdaSLeon Romanovsky *dest_id = mqp->raw_packet_qp.rq.tirn;
1866f7c4ffdaSLeon Romanovsky *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1867c7370080SMaor Gottlieb } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
1868c7370080SMaor Gottlieb fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
1869c7370080SMaor Gottlieb !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
1870f7c4ffdaSLeon Romanovsky *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
1871f7c4ffdaSLeon Romanovsky }
1872f7c4ffdaSLeon Romanovsky
1873f7c4ffdaSLeon Romanovsky if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
187423fcc7deSMichael Guralnik (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
187523fcc7deSMichael Guralnik fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX))
1876f7c4ffdaSLeon Romanovsky return -EINVAL;
1877f7c4ffdaSLeon Romanovsky
1878f7c4ffdaSLeon Romanovsky return 0;
1879f7c4ffdaSLeon Romanovsky }
1880f7c4ffdaSLeon Romanovsky
is_flow_counter(void * obj,u32 offset,u32 * counter_id)1881f7c4ffdaSLeon Romanovsky static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
1882f7c4ffdaSLeon Romanovsky {
1883f7c4ffdaSLeon Romanovsky struct devx_obj *devx_obj = obj;
1884f7c4ffdaSLeon Romanovsky u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
1885f7c4ffdaSLeon Romanovsky
1886f7c4ffdaSLeon Romanovsky if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
1887f7c4ffdaSLeon Romanovsky
1888f7c4ffdaSLeon Romanovsky if (offset && offset >= devx_obj->flow_counter_bulk_size)
1889f7c4ffdaSLeon Romanovsky return false;
1890f7c4ffdaSLeon Romanovsky
1891f7c4ffdaSLeon Romanovsky *counter_id = MLX5_GET(dealloc_flow_counter_in,
1892f7c4ffdaSLeon Romanovsky devx_obj->dinbox,
1893f7c4ffdaSLeon Romanovsky flow_counter_id);
1894f7c4ffdaSLeon Romanovsky *counter_id += offset;
1895f7c4ffdaSLeon Romanovsky return true;
1896f7c4ffdaSLeon Romanovsky }
1897f7c4ffdaSLeon Romanovsky
1898f7c4ffdaSLeon Romanovsky return false;
1899f7c4ffdaSLeon Romanovsky }
1900f7c4ffdaSLeon Romanovsky
1901f7c4ffdaSLeon Romanovsky #define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)1902f7c4ffdaSLeon Romanovsky static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
1903f7c4ffdaSLeon Romanovsky struct uverbs_attr_bundle *attrs)
1904f7c4ffdaSLeon Romanovsky {
1905f7c4ffdaSLeon Romanovsky struct mlx5_flow_context flow_context = {.flow_tag =
1906f7c4ffdaSLeon Romanovsky MLX5_FS_DEFAULT_FLOW_TAG};
1907f7c4ffdaSLeon Romanovsky u32 *offset_attr, offset = 0, counter_id = 0;
19080829d2daSDaria Velikovsky int dest_id, dest_type = -1, inlen, len, ret, i;
1909f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_handler *flow_handler;
1910f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_matcher *fs_matcher;
1911f7c4ffdaSLeon Romanovsky struct ib_uobject **arr_flow_actions;
1912f7c4ffdaSLeon Romanovsky struct ib_uflow_resources *uflow_res;
1913f7c4ffdaSLeon Romanovsky struct mlx5_flow_act flow_act = {};
1914f7c4ffdaSLeon Romanovsky struct ib_qp *qp = NULL;
1915f7c4ffdaSLeon Romanovsky void *devx_obj, *cmd_in;
1916f7c4ffdaSLeon Romanovsky struct ib_uobject *uobj;
1917f7c4ffdaSLeon Romanovsky struct mlx5_ib_dev *dev;
1918f7c4ffdaSLeon Romanovsky u32 flags;
1919f7c4ffdaSLeon Romanovsky
1920f7c4ffdaSLeon Romanovsky if (!capable(CAP_NET_RAW))
1921f7c4ffdaSLeon Romanovsky return -EPERM;
1922f7c4ffdaSLeon Romanovsky
1923f7c4ffdaSLeon Romanovsky fs_matcher = uverbs_attr_get_obj(attrs,
1924f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
1925f7c4ffdaSLeon Romanovsky uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
1926f7c4ffdaSLeon Romanovsky dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1927f7c4ffdaSLeon Romanovsky
1928f7c4ffdaSLeon Romanovsky if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags))
1929f7c4ffdaSLeon Romanovsky return -EINVAL;
1930f7c4ffdaSLeon Romanovsky
1931f7c4ffdaSLeon Romanovsky if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS)
1932f7c4ffdaSLeon Romanovsky flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
1933f7c4ffdaSLeon Romanovsky
1934f7c4ffdaSLeon Romanovsky if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
1935f7c4ffdaSLeon Romanovsky flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1936f7c4ffdaSLeon Romanovsky
1937f7c4ffdaSLeon Romanovsky len = uverbs_attr_get_uobjs_arr(attrs,
1938f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
1939f7c4ffdaSLeon Romanovsky if (len) {
1940f7c4ffdaSLeon Romanovsky devx_obj = arr_flow_actions[0]->object;
1941f7c4ffdaSLeon Romanovsky
1942f7c4ffdaSLeon Romanovsky if (uverbs_attr_is_valid(attrs,
1943f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) {
1944f7c4ffdaSLeon Romanovsky
1945f7c4ffdaSLeon Romanovsky int num_offsets = uverbs_attr_ptr_get_array_size(
1946f7c4ffdaSLeon Romanovsky attrs,
1947f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
1948f7c4ffdaSLeon Romanovsky sizeof(u32));
1949f7c4ffdaSLeon Romanovsky
1950f7c4ffdaSLeon Romanovsky if (num_offsets != 1)
1951f7c4ffdaSLeon Romanovsky return -EINVAL;
1952f7c4ffdaSLeon Romanovsky
1953f7c4ffdaSLeon Romanovsky offset_attr = uverbs_attr_get_alloced_ptr(
1954f7c4ffdaSLeon Romanovsky attrs,
1955f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET);
1956f7c4ffdaSLeon Romanovsky offset = *offset_attr;
1957f7c4ffdaSLeon Romanovsky }
1958f7c4ffdaSLeon Romanovsky
1959f7c4ffdaSLeon Romanovsky if (!is_flow_counter(devx_obj, offset, &counter_id))
1960f7c4ffdaSLeon Romanovsky return -EINVAL;
1961f7c4ffdaSLeon Romanovsky
1962f7c4ffdaSLeon Romanovsky flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1963f7c4ffdaSLeon Romanovsky }
1964f7c4ffdaSLeon Romanovsky
1965f7c4ffdaSLeon Romanovsky cmd_in = uverbs_attr_get_alloced_ptr(
1966f7c4ffdaSLeon Romanovsky attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
1967f7c4ffdaSLeon Romanovsky inlen = uverbs_attr_get_len(attrs,
1968f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
1969f7c4ffdaSLeon Romanovsky
1970f7c4ffdaSLeon Romanovsky uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
1971f7c4ffdaSLeon Romanovsky if (!uflow_res)
1972f7c4ffdaSLeon Romanovsky return -ENOMEM;
1973f7c4ffdaSLeon Romanovsky
1974f7c4ffdaSLeon Romanovsky len = uverbs_attr_get_uobjs_arr(attrs,
1975f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
1976f7c4ffdaSLeon Romanovsky for (i = 0; i < len; i++) {
1977f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_action *maction =
1978f7c4ffdaSLeon Romanovsky to_mflow_act(arr_flow_actions[i]->object);
1979f7c4ffdaSLeon Romanovsky
1980f7c4ffdaSLeon Romanovsky ret = parse_flow_flow_action(maction, false, &flow_act);
1981f7c4ffdaSLeon Romanovsky if (ret)
1982f7c4ffdaSLeon Romanovsky goto err_out;
1983f7c4ffdaSLeon Romanovsky flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE,
1984f7c4ffdaSLeon Romanovsky arr_flow_actions[i]->object);
1985f7c4ffdaSLeon Romanovsky }
1986f7c4ffdaSLeon Romanovsky
1987f7c4ffdaSLeon Romanovsky ret = uverbs_copy_from(&flow_context.flow_tag, attrs,
1988f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_TAG);
1989f7c4ffdaSLeon Romanovsky if (!ret) {
1990f7c4ffdaSLeon Romanovsky if (flow_context.flow_tag >= BIT(24)) {
1991f7c4ffdaSLeon Romanovsky ret = -EINVAL;
1992f7c4ffdaSLeon Romanovsky goto err_out;
1993f7c4ffdaSLeon Romanovsky }
1994f7c4ffdaSLeon Romanovsky flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
1995f7c4ffdaSLeon Romanovsky }
1996f7c4ffdaSLeon Romanovsky
1997f7c4ffdaSLeon Romanovsky flow_handler =
1998f7c4ffdaSLeon Romanovsky raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act,
1999f7c4ffdaSLeon Romanovsky counter_id, cmd_in, inlen, dest_id, dest_type);
2000f7c4ffdaSLeon Romanovsky if (IS_ERR(flow_handler)) {
2001f7c4ffdaSLeon Romanovsky ret = PTR_ERR(flow_handler);
2002f7c4ffdaSLeon Romanovsky goto err_out;
2003f7c4ffdaSLeon Romanovsky }
2004f7c4ffdaSLeon Romanovsky
2005f7c4ffdaSLeon Romanovsky ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res);
2006f7c4ffdaSLeon Romanovsky
2007f7c4ffdaSLeon Romanovsky return 0;
2008f7c4ffdaSLeon Romanovsky err_out:
2009f7c4ffdaSLeon Romanovsky ib_uverbs_flow_resources_free(uflow_res);
2010f7c4ffdaSLeon Romanovsky return ret;
2011f7c4ffdaSLeon Romanovsky }
2012f7c4ffdaSLeon Romanovsky
flow_matcher_cleanup(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)2013f7c4ffdaSLeon Romanovsky static int flow_matcher_cleanup(struct ib_uobject *uobject,
2014f7c4ffdaSLeon Romanovsky enum rdma_remove_reason why,
2015f7c4ffdaSLeon Romanovsky struct uverbs_attr_bundle *attrs)
2016f7c4ffdaSLeon Romanovsky {
2017f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_matcher *obj = uobject->object;
2018f7c4ffdaSLeon Romanovsky
2019efa968eeSLeon Romanovsky if (atomic_read(&obj->usecnt))
2020efa968eeSLeon Romanovsky return -EBUSY;
2021f7c4ffdaSLeon Romanovsky
2022f7c4ffdaSLeon Romanovsky kfree(obj);
2023f7c4ffdaSLeon Romanovsky return 0;
2024f7c4ffdaSLeon Romanovsky }
2025f7c4ffdaSLeon Romanovsky
steering_anchor_create_ft(struct mlx5_ib_dev * dev,struct mlx5_ib_flow_prio * ft_prio,enum mlx5_flow_namespace_type ns_type)2026e1f4a52aSMark Bloch static int steering_anchor_create_ft(struct mlx5_ib_dev *dev,
2027e1f4a52aSMark Bloch struct mlx5_ib_flow_prio *ft_prio,
2028e1f4a52aSMark Bloch enum mlx5_flow_namespace_type ns_type)
2029e1f4a52aSMark Bloch {
2030e1f4a52aSMark Bloch struct mlx5_flow_table_attr ft_attr = {};
2031e1f4a52aSMark Bloch struct mlx5_flow_namespace *ns;
2032e1f4a52aSMark Bloch struct mlx5_flow_table *ft;
2033e1f4a52aSMark Bloch
2034e1f4a52aSMark Bloch if (ft_prio->anchor.ft)
2035e1f4a52aSMark Bloch return 0;
2036e1f4a52aSMark Bloch
2037e1f4a52aSMark Bloch ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
2038e1f4a52aSMark Bloch if (!ns)
2039e1f4a52aSMark Bloch return -EOPNOTSUPP;
2040e1f4a52aSMark Bloch
2041e1f4a52aSMark Bloch ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
2042e1f4a52aSMark Bloch ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
2043e1f4a52aSMark Bloch ft_attr.prio = 0;
2044e1f4a52aSMark Bloch ft_attr.max_fte = 2;
2045e1f4a52aSMark Bloch ft_attr.level = 1;
2046e1f4a52aSMark Bloch
2047e1f4a52aSMark Bloch ft = mlx5_create_flow_table(ns, &ft_attr);
2048e1f4a52aSMark Bloch if (IS_ERR(ft))
2049e1f4a52aSMark Bloch return PTR_ERR(ft);
2050e1f4a52aSMark Bloch
2051e1f4a52aSMark Bloch ft_prio->anchor.ft = ft;
2052e1f4a52aSMark Bloch
2053e1f4a52aSMark Bloch return 0;
2054e1f4a52aSMark Bloch }
2055e1f4a52aSMark Bloch
steering_anchor_destroy_ft(struct mlx5_ib_flow_prio * ft_prio)2056e1f4a52aSMark Bloch static void steering_anchor_destroy_ft(struct mlx5_ib_flow_prio *ft_prio)
2057e1f4a52aSMark Bloch {
2058e1f4a52aSMark Bloch if (ft_prio->anchor.ft) {
2059e1f4a52aSMark Bloch mlx5_destroy_flow_table(ft_prio->anchor.ft);
2060e1f4a52aSMark Bloch ft_prio->anchor.ft = NULL;
2061e1f4a52aSMark Bloch }
2062e1f4a52aSMark Bloch }
2063e1f4a52aSMark Bloch
2064e1f4a52aSMark Bloch static int
steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio * ft_prio)2065e1f4a52aSMark Bloch steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
2066e1f4a52aSMark Bloch {
2067e1f4a52aSMark Bloch int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2068e1f4a52aSMark Bloch struct mlx5_flow_group *fg;
2069e1f4a52aSMark Bloch void *flow_group_in;
2070e1f4a52aSMark Bloch int err = 0;
2071e1f4a52aSMark Bloch
2072e1f4a52aSMark Bloch if (ft_prio->anchor.fg_drop)
2073e1f4a52aSMark Bloch return 0;
2074e1f4a52aSMark Bloch
2075e1f4a52aSMark Bloch flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2076e1f4a52aSMark Bloch if (!flow_group_in)
2077e1f4a52aSMark Bloch return -ENOMEM;
2078e1f4a52aSMark Bloch
2079e1f4a52aSMark Bloch MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
2080e1f4a52aSMark Bloch MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
2081e1f4a52aSMark Bloch
2082e1f4a52aSMark Bloch fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
2083e1f4a52aSMark Bloch if (IS_ERR(fg)) {
2084e1f4a52aSMark Bloch err = PTR_ERR(fg);
2085e1f4a52aSMark Bloch goto out;
2086e1f4a52aSMark Bloch }
2087e1f4a52aSMark Bloch
2088e1f4a52aSMark Bloch ft_prio->anchor.fg_drop = fg;
2089e1f4a52aSMark Bloch
2090e1f4a52aSMark Bloch out:
2091e1f4a52aSMark Bloch kvfree(flow_group_in);
2092e1f4a52aSMark Bloch
2093e1f4a52aSMark Bloch return err;
2094e1f4a52aSMark Bloch }
2095e1f4a52aSMark Bloch
2096e1f4a52aSMark Bloch static void
steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio * ft_prio)2097e1f4a52aSMark Bloch steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
2098e1f4a52aSMark Bloch {
2099e1f4a52aSMark Bloch if (ft_prio->anchor.fg_drop) {
2100e1f4a52aSMark Bloch mlx5_destroy_flow_group(ft_prio->anchor.fg_drop);
2101e1f4a52aSMark Bloch ft_prio->anchor.fg_drop = NULL;
2102e1f4a52aSMark Bloch }
2103e1f4a52aSMark Bloch }
2104e1f4a52aSMark Bloch
2105e1f4a52aSMark Bloch static int
steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio * ft_prio)2106e1f4a52aSMark Bloch steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
2107e1f4a52aSMark Bloch {
2108e1f4a52aSMark Bloch int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2109e1f4a52aSMark Bloch struct mlx5_flow_group *fg;
2110e1f4a52aSMark Bloch void *flow_group_in;
2111e1f4a52aSMark Bloch int err = 0;
2112e1f4a52aSMark Bloch
2113e1f4a52aSMark Bloch if (ft_prio->anchor.fg_goto_table)
2114e1f4a52aSMark Bloch return 0;
2115e1f4a52aSMark Bloch
2116e1f4a52aSMark Bloch flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2117e1f4a52aSMark Bloch if (!flow_group_in)
2118e1f4a52aSMark Bloch return -ENOMEM;
2119e1f4a52aSMark Bloch
2120e1f4a52aSMark Bloch fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
2121e1f4a52aSMark Bloch if (IS_ERR(fg)) {
2122e1f4a52aSMark Bloch err = PTR_ERR(fg);
2123e1f4a52aSMark Bloch goto out;
2124e1f4a52aSMark Bloch }
2125e1f4a52aSMark Bloch ft_prio->anchor.fg_goto_table = fg;
2126e1f4a52aSMark Bloch
2127e1f4a52aSMark Bloch out:
2128e1f4a52aSMark Bloch kvfree(flow_group_in);
2129e1f4a52aSMark Bloch
2130e1f4a52aSMark Bloch return err;
2131e1f4a52aSMark Bloch }
2132e1f4a52aSMark Bloch
2133e1f4a52aSMark Bloch static void
steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio * ft_prio)2134e1f4a52aSMark Bloch steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
2135e1f4a52aSMark Bloch {
2136e1f4a52aSMark Bloch if (ft_prio->anchor.fg_goto_table) {
2137e1f4a52aSMark Bloch mlx5_destroy_flow_group(ft_prio->anchor.fg_goto_table);
2138e1f4a52aSMark Bloch ft_prio->anchor.fg_goto_table = NULL;
2139e1f4a52aSMark Bloch }
2140e1f4a52aSMark Bloch }
2141e1f4a52aSMark Bloch
2142e1f4a52aSMark Bloch static int
steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio * ft_prio)2143e1f4a52aSMark Bloch steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
2144e1f4a52aSMark Bloch {
2145e1f4a52aSMark Bloch struct mlx5_flow_act flow_act = {};
2146e1f4a52aSMark Bloch struct mlx5_flow_handle *handle;
2147e1f4a52aSMark Bloch
2148e1f4a52aSMark Bloch if (ft_prio->anchor.rule_drop)
2149e1f4a52aSMark Bloch return 0;
2150e1f4a52aSMark Bloch
2151e1f4a52aSMark Bloch flow_act.fg = ft_prio->anchor.fg_drop;
2152e1f4a52aSMark Bloch flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
2153e1f4a52aSMark Bloch
2154e1f4a52aSMark Bloch handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
2155e1f4a52aSMark Bloch NULL, 0);
2156e1f4a52aSMark Bloch if (IS_ERR(handle))
2157e1f4a52aSMark Bloch return PTR_ERR(handle);
2158e1f4a52aSMark Bloch
2159e1f4a52aSMark Bloch ft_prio->anchor.rule_drop = handle;
2160e1f4a52aSMark Bloch
2161e1f4a52aSMark Bloch return 0;
2162e1f4a52aSMark Bloch }
2163e1f4a52aSMark Bloch
steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio * ft_prio)2164e1f4a52aSMark Bloch static void steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
2165e1f4a52aSMark Bloch {
2166e1f4a52aSMark Bloch if (ft_prio->anchor.rule_drop) {
2167e1f4a52aSMark Bloch mlx5_del_flow_rules(ft_prio->anchor.rule_drop);
2168e1f4a52aSMark Bloch ft_prio->anchor.rule_drop = NULL;
2169e1f4a52aSMark Bloch }
2170e1f4a52aSMark Bloch }
2171e1f4a52aSMark Bloch
2172e1f4a52aSMark Bloch static int
steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio * ft_prio)2173e1f4a52aSMark Bloch steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
2174e1f4a52aSMark Bloch {
2175e1f4a52aSMark Bloch struct mlx5_flow_destination dest = {};
2176e1f4a52aSMark Bloch struct mlx5_flow_act flow_act = {};
2177e1f4a52aSMark Bloch struct mlx5_flow_handle *handle;
2178e1f4a52aSMark Bloch
2179e1f4a52aSMark Bloch if (ft_prio->anchor.rule_goto_table)
2180e1f4a52aSMark Bloch return 0;
2181e1f4a52aSMark Bloch
2182e1f4a52aSMark Bloch flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2183e1f4a52aSMark Bloch flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
2184e1f4a52aSMark Bloch flow_act.fg = ft_prio->anchor.fg_goto_table;
2185e1f4a52aSMark Bloch
2186e1f4a52aSMark Bloch dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2187e1f4a52aSMark Bloch dest.ft = ft_prio->flow_table;
2188e1f4a52aSMark Bloch
2189e1f4a52aSMark Bloch handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
2190e1f4a52aSMark Bloch &dest, 1);
2191e1f4a52aSMark Bloch if (IS_ERR(handle))
2192e1f4a52aSMark Bloch return PTR_ERR(handle);
2193e1f4a52aSMark Bloch
2194e1f4a52aSMark Bloch ft_prio->anchor.rule_goto_table = handle;
2195e1f4a52aSMark Bloch
2196e1f4a52aSMark Bloch return 0;
2197e1f4a52aSMark Bloch }
2198e1f4a52aSMark Bloch
2199e1f4a52aSMark Bloch static void
steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio * ft_prio)2200e1f4a52aSMark Bloch steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
2201e1f4a52aSMark Bloch {
2202e1f4a52aSMark Bloch if (ft_prio->anchor.rule_goto_table) {
2203e1f4a52aSMark Bloch mlx5_del_flow_rules(ft_prio->anchor.rule_goto_table);
2204e1f4a52aSMark Bloch ft_prio->anchor.rule_goto_table = NULL;
2205e1f4a52aSMark Bloch }
2206e1f4a52aSMark Bloch }
2207e1f4a52aSMark Bloch
steering_anchor_create_res(struct mlx5_ib_dev * dev,struct mlx5_ib_flow_prio * ft_prio,enum mlx5_flow_namespace_type ns_type)2208e1f4a52aSMark Bloch static int steering_anchor_create_res(struct mlx5_ib_dev *dev,
2209e1f4a52aSMark Bloch struct mlx5_ib_flow_prio *ft_prio,
2210e1f4a52aSMark Bloch enum mlx5_flow_namespace_type ns_type)
2211e1f4a52aSMark Bloch {
2212e1f4a52aSMark Bloch int err;
2213e1f4a52aSMark Bloch
2214e1f4a52aSMark Bloch err = steering_anchor_create_ft(dev, ft_prio, ns_type);
2215e1f4a52aSMark Bloch if (err)
2216e1f4a52aSMark Bloch return err;
2217e1f4a52aSMark Bloch
2218e1f4a52aSMark Bloch err = steering_anchor_create_fg_drop(ft_prio);
2219e1f4a52aSMark Bloch if (err)
2220e1f4a52aSMark Bloch goto destroy_ft;
2221e1f4a52aSMark Bloch
2222e1f4a52aSMark Bloch err = steering_anchor_create_fg_goto_table(ft_prio);
2223e1f4a52aSMark Bloch if (err)
2224e1f4a52aSMark Bloch goto destroy_fg_drop;
2225e1f4a52aSMark Bloch
2226e1f4a52aSMark Bloch err = steering_anchor_create_rule_drop(ft_prio);
2227e1f4a52aSMark Bloch if (err)
2228e1f4a52aSMark Bloch goto destroy_fg_goto_table;
2229e1f4a52aSMark Bloch
2230e1f4a52aSMark Bloch err = steering_anchor_create_rule_goto_table(ft_prio);
2231e1f4a52aSMark Bloch if (err)
2232e1f4a52aSMark Bloch goto destroy_rule_drop;
2233e1f4a52aSMark Bloch
2234e1f4a52aSMark Bloch return 0;
2235e1f4a52aSMark Bloch
2236e1f4a52aSMark Bloch destroy_rule_drop:
2237e1f4a52aSMark Bloch steering_anchor_destroy_rule_drop(ft_prio);
2238e1f4a52aSMark Bloch destroy_fg_goto_table:
2239e1f4a52aSMark Bloch steering_anchor_destroy_fg_goto_table(ft_prio);
2240e1f4a52aSMark Bloch destroy_fg_drop:
2241e1f4a52aSMark Bloch steering_anchor_destroy_fg_drop(ft_prio);
2242e1f4a52aSMark Bloch destroy_ft:
2243e1f4a52aSMark Bloch steering_anchor_destroy_ft(ft_prio);
2244e1f4a52aSMark Bloch
2245e1f4a52aSMark Bloch return err;
2246e1f4a52aSMark Bloch }
2247e1f4a52aSMark Bloch
mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio * ft_prio)2248e1f4a52aSMark Bloch static void mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio *ft_prio)
2249e1f4a52aSMark Bloch {
2250e1f4a52aSMark Bloch steering_anchor_destroy_rule_goto_table(ft_prio);
2251e1f4a52aSMark Bloch steering_anchor_destroy_rule_drop(ft_prio);
2252e1f4a52aSMark Bloch steering_anchor_destroy_fg_goto_table(ft_prio);
2253e1f4a52aSMark Bloch steering_anchor_destroy_fg_drop(ft_prio);
2254e1f4a52aSMark Bloch steering_anchor_destroy_ft(ft_prio);
2255e1f4a52aSMark Bloch }
2256e1f4a52aSMark Bloch
steering_anchor_cleanup(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)22570c6ab0caSMark Bloch static int steering_anchor_cleanup(struct ib_uobject *uobject,
22580c6ab0caSMark Bloch enum rdma_remove_reason why,
22590c6ab0caSMark Bloch struct uverbs_attr_bundle *attrs)
22600c6ab0caSMark Bloch {
22610c6ab0caSMark Bloch struct mlx5_ib_steering_anchor *obj = uobject->object;
22620c6ab0caSMark Bloch
22630c6ab0caSMark Bloch if (atomic_read(&obj->usecnt))
22640c6ab0caSMark Bloch return -EBUSY;
22650c6ab0caSMark Bloch
22660c6ab0caSMark Bloch mutex_lock(&obj->dev->flow_db->lock);
2267e1f4a52aSMark Bloch if (!--obj->ft_prio->anchor.rule_goto_table_ref)
2268e1f4a52aSMark Bloch steering_anchor_destroy_rule_goto_table(obj->ft_prio);
2269e1f4a52aSMark Bloch
22700c6ab0caSMark Bloch put_flow_table(obj->dev, obj->ft_prio, true);
22710c6ab0caSMark Bloch mutex_unlock(&obj->dev->flow_db->lock);
22720c6ab0caSMark Bloch
22730c6ab0caSMark Bloch kfree(obj);
22740c6ab0caSMark Bloch return 0;
22750c6ab0caSMark Bloch }
22760c6ab0caSMark Bloch
fs_cleanup_anchor(struct mlx5_ib_flow_prio * prio,int count)2277e1f4a52aSMark Bloch static void fs_cleanup_anchor(struct mlx5_ib_flow_prio *prio,
2278e1f4a52aSMark Bloch int count)
2279e1f4a52aSMark Bloch {
2280e1f4a52aSMark Bloch while (count--)
2281e1f4a52aSMark Bloch mlx5_steering_anchor_destroy_res(&prio[count]);
2282e1f4a52aSMark Bloch }
2283e1f4a52aSMark Bloch
mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev * dev)2284e1f4a52aSMark Bloch void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev)
2285e1f4a52aSMark Bloch {
2286e1f4a52aSMark Bloch fs_cleanup_anchor(dev->flow_db->prios, MLX5_IB_NUM_FLOW_FT);
2287e1f4a52aSMark Bloch fs_cleanup_anchor(dev->flow_db->egress_prios, MLX5_IB_NUM_FLOW_FT);
2288e1f4a52aSMark Bloch fs_cleanup_anchor(dev->flow_db->sniffer, MLX5_IB_NUM_SNIFFER_FTS);
2289e1f4a52aSMark Bloch fs_cleanup_anchor(dev->flow_db->egress, MLX5_IB_NUM_EGRESS_FTS);
2290e1f4a52aSMark Bloch fs_cleanup_anchor(dev->flow_db->fdb, MLX5_IB_NUM_FDB_FTS);
2291e1f4a52aSMark Bloch fs_cleanup_anchor(dev->flow_db->rdma_rx, MLX5_IB_NUM_FLOW_FT);
2292e1f4a52aSMark Bloch fs_cleanup_anchor(dev->flow_db->rdma_tx, MLX5_IB_NUM_FLOW_FT);
2293e1f4a52aSMark Bloch }
2294e1f4a52aSMark Bloch
mlx5_ib_matcher_ns(struct uverbs_attr_bundle * attrs,struct mlx5_ib_flow_matcher * obj)2295f7c4ffdaSLeon Romanovsky static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
2296f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_matcher *obj)
2297f7c4ffdaSLeon Romanovsky {
2298f7c4ffdaSLeon Romanovsky enum mlx5_ib_uapi_flow_table_type ft_type =
2299f7c4ffdaSLeon Romanovsky MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX;
2300f7c4ffdaSLeon Romanovsky u32 flags;
2301f7c4ffdaSLeon Romanovsky int err;
2302f7c4ffdaSLeon Romanovsky
2303f7c4ffdaSLeon Romanovsky /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older
2304f7c4ffdaSLeon Romanovsky * users should switch to it. We leave this to not break userspace
2305f7c4ffdaSLeon Romanovsky */
2306f7c4ffdaSLeon Romanovsky if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE) &&
2307f7c4ffdaSLeon Romanovsky uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS))
2308f7c4ffdaSLeon Romanovsky return -EINVAL;
2309f7c4ffdaSLeon Romanovsky
2310f7c4ffdaSLeon Romanovsky if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE)) {
2311f7c4ffdaSLeon Romanovsky err = uverbs_get_const(&ft_type, attrs,
2312f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE);
2313f7c4ffdaSLeon Romanovsky if (err)
2314f7c4ffdaSLeon Romanovsky return err;
2315f7c4ffdaSLeon Romanovsky
2316f7c4ffdaSLeon Romanovsky err = mlx5_ib_ft_type_to_namespace(ft_type, &obj->ns_type);
2317f7c4ffdaSLeon Romanovsky if (err)
2318f7c4ffdaSLeon Romanovsky return err;
2319f7c4ffdaSLeon Romanovsky
2320f7c4ffdaSLeon Romanovsky return 0;
2321f7c4ffdaSLeon Romanovsky }
2322f7c4ffdaSLeon Romanovsky
2323f7c4ffdaSLeon Romanovsky if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) {
2324f7c4ffdaSLeon Romanovsky err = uverbs_get_flags32(&flags, attrs,
2325f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
2326f7c4ffdaSLeon Romanovsky IB_FLOW_ATTR_FLAGS_EGRESS);
2327f7c4ffdaSLeon Romanovsky if (err)
2328f7c4ffdaSLeon Romanovsky return err;
2329f7c4ffdaSLeon Romanovsky
2330c9776457SMaor Gottlieb if (flags)
2331c9776457SMaor Gottlieb return mlx5_ib_ft_type_to_namespace(
2332f7c4ffdaSLeon Romanovsky MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
2333f7c4ffdaSLeon Romanovsky &obj->ns_type);
2334f7c4ffdaSLeon Romanovsky }
2335f7c4ffdaSLeon Romanovsky
2336f7c4ffdaSLeon Romanovsky obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
2337f7c4ffdaSLeon Romanovsky
2338f7c4ffdaSLeon Romanovsky return 0;
2339f7c4ffdaSLeon Romanovsky }
2340f7c4ffdaSLeon Romanovsky
UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)2341f7c4ffdaSLeon Romanovsky static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
2342f7c4ffdaSLeon Romanovsky struct uverbs_attr_bundle *attrs)
2343f7c4ffdaSLeon Romanovsky {
2344f7c4ffdaSLeon Romanovsky struct ib_uobject *uobj = uverbs_attr_get_uobject(
2345f7c4ffdaSLeon Romanovsky attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
2346f7c4ffdaSLeon Romanovsky struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
2347f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_matcher *obj;
2348f7c4ffdaSLeon Romanovsky int err;
2349f7c4ffdaSLeon Romanovsky
2350f7c4ffdaSLeon Romanovsky obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
2351f7c4ffdaSLeon Romanovsky if (!obj)
2352f7c4ffdaSLeon Romanovsky return -ENOMEM;
2353f7c4ffdaSLeon Romanovsky
2354f7c4ffdaSLeon Romanovsky obj->mask_len = uverbs_attr_get_len(
2355f7c4ffdaSLeon Romanovsky attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
2356f7c4ffdaSLeon Romanovsky err = uverbs_copy_from(&obj->matcher_mask,
2357f7c4ffdaSLeon Romanovsky attrs,
2358f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
2359f7c4ffdaSLeon Romanovsky if (err)
2360f7c4ffdaSLeon Romanovsky goto end;
2361f7c4ffdaSLeon Romanovsky
2362f7c4ffdaSLeon Romanovsky obj->flow_type = uverbs_attr_get_enum_id(
2363f7c4ffdaSLeon Romanovsky attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
2364f7c4ffdaSLeon Romanovsky
2365f7c4ffdaSLeon Romanovsky if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) {
2366f7c4ffdaSLeon Romanovsky err = uverbs_copy_from(&obj->priority,
2367f7c4ffdaSLeon Romanovsky attrs,
2368f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
2369f7c4ffdaSLeon Romanovsky if (err)
2370f7c4ffdaSLeon Romanovsky goto end;
2371f7c4ffdaSLeon Romanovsky }
2372f7c4ffdaSLeon Romanovsky
2373f7c4ffdaSLeon Romanovsky err = uverbs_copy_from(&obj->match_criteria_enable,
2374f7c4ffdaSLeon Romanovsky attrs,
2375f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA);
2376f7c4ffdaSLeon Romanovsky if (err)
2377f7c4ffdaSLeon Romanovsky goto end;
2378f7c4ffdaSLeon Romanovsky
2379f7c4ffdaSLeon Romanovsky err = mlx5_ib_matcher_ns(attrs, obj);
2380f7c4ffdaSLeon Romanovsky if (err)
2381f7c4ffdaSLeon Romanovsky goto end;
2382f7c4ffdaSLeon Romanovsky
238322c3f2f5SMaor Gottlieb if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS &&
2384edc0b0bcSMark Bloch mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
2385edc0b0bcSMark Bloch err = -EINVAL;
2386edc0b0bcSMark Bloch goto end;
2387edc0b0bcSMark Bloch }
2388edc0b0bcSMark Bloch
2389f7c4ffdaSLeon Romanovsky uobj->object = obj;
2390f7c4ffdaSLeon Romanovsky obj->mdev = dev->mdev;
2391f7c4ffdaSLeon Romanovsky atomic_set(&obj->usecnt, 0);
2392f7c4ffdaSLeon Romanovsky return 0;
2393f7c4ffdaSLeon Romanovsky
2394f7c4ffdaSLeon Romanovsky end:
2395f7c4ffdaSLeon Romanovsky kfree(obj);
2396f7c4ffdaSLeon Romanovsky return err;
2397f7c4ffdaSLeon Romanovsky }
2398f7c4ffdaSLeon Romanovsky
UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)23990c6ab0caSMark Bloch static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
24000c6ab0caSMark Bloch struct uverbs_attr_bundle *attrs)
24010c6ab0caSMark Bloch {
24020c6ab0caSMark Bloch struct ib_uobject *uobj = uverbs_attr_get_uobject(
24030c6ab0caSMark Bloch attrs, MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE);
24040c6ab0caSMark Bloch struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
24050c6ab0caSMark Bloch enum mlx5_ib_uapi_flow_table_type ib_uapi_ft_type;
24060c6ab0caSMark Bloch enum mlx5_flow_namespace_type ns_type;
24070c6ab0caSMark Bloch struct mlx5_ib_steering_anchor *obj;
24080c6ab0caSMark Bloch struct mlx5_ib_flow_prio *ft_prio;
24090c6ab0caSMark Bloch u16 priority;
24100c6ab0caSMark Bloch u32 ft_id;
24110c6ab0caSMark Bloch int err;
24120c6ab0caSMark Bloch
24130c6ab0caSMark Bloch if (!capable(CAP_NET_RAW))
24140c6ab0caSMark Bloch return -EPERM;
24150c6ab0caSMark Bloch
24160c6ab0caSMark Bloch err = uverbs_get_const(&ib_uapi_ft_type, attrs,
24170c6ab0caSMark Bloch MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE);
24180c6ab0caSMark Bloch if (err)
24190c6ab0caSMark Bloch return err;
24200c6ab0caSMark Bloch
24210c6ab0caSMark Bloch err = mlx5_ib_ft_type_to_namespace(ib_uapi_ft_type, &ns_type);
24220c6ab0caSMark Bloch if (err)
24230c6ab0caSMark Bloch return err;
24240c6ab0caSMark Bloch
24250c6ab0caSMark Bloch err = uverbs_copy_from(&priority, attrs,
24260c6ab0caSMark Bloch MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY);
24270c6ab0caSMark Bloch if (err)
24280c6ab0caSMark Bloch return err;
24290c6ab0caSMark Bloch
24300c6ab0caSMark Bloch obj = kzalloc(sizeof(*obj), GFP_KERNEL);
24310c6ab0caSMark Bloch if (!obj)
24320c6ab0caSMark Bloch return -ENOMEM;
24330c6ab0caSMark Bloch
24340c6ab0caSMark Bloch mutex_lock(&dev->flow_db->lock);
2435e1f4a52aSMark Bloch
24360c6ab0caSMark Bloch ft_prio = _get_flow_table(dev, priority, ns_type, 0);
24370c6ab0caSMark Bloch if (IS_ERR(ft_prio)) {
24380c6ab0caSMark Bloch err = PTR_ERR(ft_prio);
24390c6ab0caSMark Bloch goto free_obj;
24400c6ab0caSMark Bloch }
24410c6ab0caSMark Bloch
24420c6ab0caSMark Bloch ft_prio->refcount++;
2443e1f4a52aSMark Bloch
2444e1f4a52aSMark Bloch if (!ft_prio->anchor.rule_goto_table_ref) {
2445e1f4a52aSMark Bloch err = steering_anchor_create_res(dev, ft_prio, ns_type);
2446e1f4a52aSMark Bloch if (err)
2447e1f4a52aSMark Bloch goto put_flow_table;
2448e1f4a52aSMark Bloch }
2449e1f4a52aSMark Bloch
2450e1f4a52aSMark Bloch ft_prio->anchor.rule_goto_table_ref++;
2451e1f4a52aSMark Bloch
2452e1f4a52aSMark Bloch ft_id = mlx5_flow_table_id(ft_prio->anchor.ft);
24530c6ab0caSMark Bloch
24540c6ab0caSMark Bloch err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
24550c6ab0caSMark Bloch &ft_id, sizeof(ft_id));
24560c6ab0caSMark Bloch if (err)
2457e1f4a52aSMark Bloch goto destroy_res;
2458e1f4a52aSMark Bloch
2459e1f4a52aSMark Bloch mutex_unlock(&dev->flow_db->lock);
24600c6ab0caSMark Bloch
24610c6ab0caSMark Bloch uobj->object = obj;
24620c6ab0caSMark Bloch obj->dev = dev;
24630c6ab0caSMark Bloch obj->ft_prio = ft_prio;
24640c6ab0caSMark Bloch atomic_set(&obj->usecnt, 0);
24650c6ab0caSMark Bloch
24660c6ab0caSMark Bloch return 0;
24670c6ab0caSMark Bloch
2468e1f4a52aSMark Bloch destroy_res:
2469e1f4a52aSMark Bloch --ft_prio->anchor.rule_goto_table_ref;
2470e1f4a52aSMark Bloch mlx5_steering_anchor_destroy_res(ft_prio);
24710c6ab0caSMark Bloch put_flow_table:
24720c6ab0caSMark Bloch put_flow_table(dev, ft_prio, true);
24730c6ab0caSMark Bloch free_obj:
2474*2fad8f06SHamdan Igbaria mutex_unlock(&dev->flow_db->lock);
24750c6ab0caSMark Bloch kfree(obj);
24760c6ab0caSMark Bloch
24770c6ab0caSMark Bloch return err;
24780c6ab0caSMark Bloch }
24790c6ab0caSMark Bloch
2480f7c4ffdaSLeon Romanovsky static struct ib_flow_action *
mlx5_ib_create_modify_header(struct mlx5_ib_dev * dev,enum mlx5_ib_uapi_flow_table_type ft_type,u8 num_actions,void * in)2481f7c4ffdaSLeon Romanovsky mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
2482f7c4ffdaSLeon Romanovsky enum mlx5_ib_uapi_flow_table_type ft_type,
2483f7c4ffdaSLeon Romanovsky u8 num_actions, void *in)
2484f7c4ffdaSLeon Romanovsky {
2485f7c4ffdaSLeon Romanovsky enum mlx5_flow_namespace_type namespace;
2486f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_action *maction;
2487f7c4ffdaSLeon Romanovsky int ret;
2488f7c4ffdaSLeon Romanovsky
2489f7c4ffdaSLeon Romanovsky ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
2490f7c4ffdaSLeon Romanovsky if (ret)
2491f7c4ffdaSLeon Romanovsky return ERR_PTR(-EINVAL);
2492f7c4ffdaSLeon Romanovsky
2493f7c4ffdaSLeon Romanovsky maction = kzalloc(sizeof(*maction), GFP_KERNEL);
2494f7c4ffdaSLeon Romanovsky if (!maction)
2495f7c4ffdaSLeon Romanovsky return ERR_PTR(-ENOMEM);
2496f7c4ffdaSLeon Romanovsky
2497f7c4ffdaSLeon Romanovsky maction->flow_action_raw.modify_hdr =
2498f7c4ffdaSLeon Romanovsky mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in);
2499f7c4ffdaSLeon Romanovsky
2500f7c4ffdaSLeon Romanovsky if (IS_ERR(maction->flow_action_raw.modify_hdr)) {
2501f7c4ffdaSLeon Romanovsky ret = PTR_ERR(maction->flow_action_raw.modify_hdr);
2502f7c4ffdaSLeon Romanovsky kfree(maction);
2503f7c4ffdaSLeon Romanovsky return ERR_PTR(ret);
2504f7c4ffdaSLeon Romanovsky }
2505f7c4ffdaSLeon Romanovsky maction->flow_action_raw.sub_type =
2506f7c4ffdaSLeon Romanovsky MLX5_IB_FLOW_ACTION_MODIFY_HEADER;
2507f7c4ffdaSLeon Romanovsky maction->flow_action_raw.dev = dev;
2508f7c4ffdaSLeon Romanovsky
2509f7c4ffdaSLeon Romanovsky return &maction->ib_action;
2510f7c4ffdaSLeon Romanovsky }
2511f7c4ffdaSLeon Romanovsky
mlx5_ib_modify_header_supported(struct mlx5_ib_dev * dev)2512f7c4ffdaSLeon Romanovsky static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
2513f7c4ffdaSLeon Romanovsky {
2514f7c4ffdaSLeon Romanovsky return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
2515f7c4ffdaSLeon Romanovsky max_modify_header_actions) ||
2516f7c4ffdaSLeon Romanovsky MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
2517f7c4ffdaSLeon Romanovsky max_modify_header_actions) ||
2518f7c4ffdaSLeon Romanovsky MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
2519f7c4ffdaSLeon Romanovsky max_modify_header_actions);
2520f7c4ffdaSLeon Romanovsky }
2521f7c4ffdaSLeon Romanovsky
UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)2522f7c4ffdaSLeon Romanovsky static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
2523f7c4ffdaSLeon Romanovsky struct uverbs_attr_bundle *attrs)
2524f7c4ffdaSLeon Romanovsky {
2525f7c4ffdaSLeon Romanovsky struct ib_uobject *uobj = uverbs_attr_get_uobject(
2526f7c4ffdaSLeon Romanovsky attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
2527f7c4ffdaSLeon Romanovsky struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
2528f7c4ffdaSLeon Romanovsky enum mlx5_ib_uapi_flow_table_type ft_type;
2529f7c4ffdaSLeon Romanovsky struct ib_flow_action *action;
2530f7c4ffdaSLeon Romanovsky int num_actions;
2531f7c4ffdaSLeon Romanovsky void *in;
2532f7c4ffdaSLeon Romanovsky int ret;
2533f7c4ffdaSLeon Romanovsky
2534f7c4ffdaSLeon Romanovsky if (!mlx5_ib_modify_header_supported(mdev))
2535f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
2536f7c4ffdaSLeon Romanovsky
2537f7c4ffdaSLeon Romanovsky in = uverbs_attr_get_alloced_ptr(attrs,
2538f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
2539f7c4ffdaSLeon Romanovsky
2540f7c4ffdaSLeon Romanovsky num_actions = uverbs_attr_ptr_get_array_size(
2541f7c4ffdaSLeon Romanovsky attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
2542f7c4ffdaSLeon Romanovsky MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto));
2543f7c4ffdaSLeon Romanovsky if (num_actions < 0)
2544f7c4ffdaSLeon Romanovsky return num_actions;
2545f7c4ffdaSLeon Romanovsky
2546f7c4ffdaSLeon Romanovsky ret = uverbs_get_const(&ft_type, attrs,
2547f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE);
2548f7c4ffdaSLeon Romanovsky if (ret)
2549f7c4ffdaSLeon Romanovsky return ret;
2550f7c4ffdaSLeon Romanovsky action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
2551f7c4ffdaSLeon Romanovsky if (IS_ERR(action))
2552f7c4ffdaSLeon Romanovsky return PTR_ERR(action);
2553f7c4ffdaSLeon Romanovsky
2554f7c4ffdaSLeon Romanovsky uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev,
2555f7c4ffdaSLeon Romanovsky IB_FLOW_ACTION_UNSPECIFIED);
2556f7c4ffdaSLeon Romanovsky
2557f7c4ffdaSLeon Romanovsky return 0;
2558f7c4ffdaSLeon Romanovsky }
2559f7c4ffdaSLeon Romanovsky
mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev * ibdev,u8 packet_reformat_type,u8 ft_type)2560f7c4ffdaSLeon Romanovsky static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev,
2561f7c4ffdaSLeon Romanovsky u8 packet_reformat_type,
2562f7c4ffdaSLeon Romanovsky u8 ft_type)
2563f7c4ffdaSLeon Romanovsky {
2564f7c4ffdaSLeon Romanovsky switch (packet_reformat_type) {
2565f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
2566f7c4ffdaSLeon Romanovsky if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
2567f7c4ffdaSLeon Romanovsky return MLX5_CAP_FLOWTABLE(ibdev->mdev,
2568f7c4ffdaSLeon Romanovsky encap_general_header);
2569f7c4ffdaSLeon Romanovsky break;
2570f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
2571f7c4ffdaSLeon Romanovsky if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
2572f7c4ffdaSLeon Romanovsky return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
2573f7c4ffdaSLeon Romanovsky reformat_l2_to_l3_tunnel);
2574f7c4ffdaSLeon Romanovsky break;
2575f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
2576f7c4ffdaSLeon Romanovsky if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
2577f7c4ffdaSLeon Romanovsky return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
2578f7c4ffdaSLeon Romanovsky reformat_l3_tunnel_to_l2);
2579f7c4ffdaSLeon Romanovsky break;
2580f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2:
2581f7c4ffdaSLeon Romanovsky if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
2582f7c4ffdaSLeon Romanovsky return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
2583f7c4ffdaSLeon Romanovsky break;
2584f7c4ffdaSLeon Romanovsky default:
2585f7c4ffdaSLeon Romanovsky break;
2586f7c4ffdaSLeon Romanovsky }
2587f7c4ffdaSLeon Romanovsky
2588f7c4ffdaSLeon Romanovsky return false;
2589f7c4ffdaSLeon Romanovsky }
2590f7c4ffdaSLeon Romanovsky
mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt,u8 * prm_prt)2591f7c4ffdaSLeon Romanovsky static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt)
2592f7c4ffdaSLeon Romanovsky {
2593f7c4ffdaSLeon Romanovsky switch (dv_prt) {
2594f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
2595f7c4ffdaSLeon Romanovsky *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
2596f7c4ffdaSLeon Romanovsky break;
2597f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
2598f7c4ffdaSLeon Romanovsky *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
2599f7c4ffdaSLeon Romanovsky break;
2600f7c4ffdaSLeon Romanovsky case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
2601f7c4ffdaSLeon Romanovsky *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
2602f7c4ffdaSLeon Romanovsky break;
2603f7c4ffdaSLeon Romanovsky default:
2604f7c4ffdaSLeon Romanovsky return -EINVAL;
2605f7c4ffdaSLeon Romanovsky }
2606f7c4ffdaSLeon Romanovsky
2607f7c4ffdaSLeon Romanovsky return 0;
2608f7c4ffdaSLeon Romanovsky }
2609f7c4ffdaSLeon Romanovsky
mlx5_ib_flow_action_create_packet_reformat_ctx(struct mlx5_ib_dev * dev,struct mlx5_ib_flow_action * maction,u8 ft_type,u8 dv_prt,void * in,size_t len)2610f7c4ffdaSLeon Romanovsky static int mlx5_ib_flow_action_create_packet_reformat_ctx(
2611f7c4ffdaSLeon Romanovsky struct mlx5_ib_dev *dev,
2612f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_action *maction,
2613f7c4ffdaSLeon Romanovsky u8 ft_type, u8 dv_prt,
2614f7c4ffdaSLeon Romanovsky void *in, size_t len)
2615f7c4ffdaSLeon Romanovsky {
26163f3f05abSYevgeny Kliteynik struct mlx5_pkt_reformat_params reformat_params;
2617f7c4ffdaSLeon Romanovsky enum mlx5_flow_namespace_type namespace;
2618f7c4ffdaSLeon Romanovsky u8 prm_prt;
2619f7c4ffdaSLeon Romanovsky int ret;
2620f7c4ffdaSLeon Romanovsky
2621f7c4ffdaSLeon Romanovsky ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
2622f7c4ffdaSLeon Romanovsky if (ret)
2623f7c4ffdaSLeon Romanovsky return ret;
2624f7c4ffdaSLeon Romanovsky
2625f7c4ffdaSLeon Romanovsky ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt);
2626f7c4ffdaSLeon Romanovsky if (ret)
2627f7c4ffdaSLeon Romanovsky return ret;
2628f7c4ffdaSLeon Romanovsky
26293f3f05abSYevgeny Kliteynik memset(&reformat_params, 0, sizeof(reformat_params));
26303f3f05abSYevgeny Kliteynik reformat_params.type = prm_prt;
26313f3f05abSYevgeny Kliteynik reformat_params.size = len;
26323f3f05abSYevgeny Kliteynik reformat_params.data = in;
2633f7c4ffdaSLeon Romanovsky maction->flow_action_raw.pkt_reformat =
26343f3f05abSYevgeny Kliteynik mlx5_packet_reformat_alloc(dev->mdev, &reformat_params,
26353f3f05abSYevgeny Kliteynik namespace);
2636f7c4ffdaSLeon Romanovsky if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
2637f7c4ffdaSLeon Romanovsky ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
2638f7c4ffdaSLeon Romanovsky return ret;
2639f7c4ffdaSLeon Romanovsky }
2640f7c4ffdaSLeon Romanovsky
2641f7c4ffdaSLeon Romanovsky maction->flow_action_raw.sub_type =
2642f7c4ffdaSLeon Romanovsky MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
2643f7c4ffdaSLeon Romanovsky maction->flow_action_raw.dev = dev;
2644f7c4ffdaSLeon Romanovsky
2645f7c4ffdaSLeon Romanovsky return 0;
2646f7c4ffdaSLeon Romanovsky }
2647f7c4ffdaSLeon Romanovsky
UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)2648f7c4ffdaSLeon Romanovsky static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
2649f7c4ffdaSLeon Romanovsky struct uverbs_attr_bundle *attrs)
2650f7c4ffdaSLeon Romanovsky {
2651f7c4ffdaSLeon Romanovsky struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
2652f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
2653f7c4ffdaSLeon Romanovsky struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
2654f7c4ffdaSLeon Romanovsky enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
2655f7c4ffdaSLeon Romanovsky enum mlx5_ib_uapi_flow_table_type ft_type;
2656f7c4ffdaSLeon Romanovsky struct mlx5_ib_flow_action *maction;
2657f7c4ffdaSLeon Romanovsky int ret;
2658f7c4ffdaSLeon Romanovsky
2659f7c4ffdaSLeon Romanovsky ret = uverbs_get_const(&ft_type, attrs,
2660f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE);
2661f7c4ffdaSLeon Romanovsky if (ret)
2662f7c4ffdaSLeon Romanovsky return ret;
2663f7c4ffdaSLeon Romanovsky
2664f7c4ffdaSLeon Romanovsky ret = uverbs_get_const(&dv_prt, attrs,
2665f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE);
2666f7c4ffdaSLeon Romanovsky if (ret)
2667f7c4ffdaSLeon Romanovsky return ret;
2668f7c4ffdaSLeon Romanovsky
2669f7c4ffdaSLeon Romanovsky if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
2670f7c4ffdaSLeon Romanovsky return -EOPNOTSUPP;
2671f7c4ffdaSLeon Romanovsky
2672f7c4ffdaSLeon Romanovsky maction = kzalloc(sizeof(*maction), GFP_KERNEL);
2673f7c4ffdaSLeon Romanovsky if (!maction)
2674f7c4ffdaSLeon Romanovsky return -ENOMEM;
2675f7c4ffdaSLeon Romanovsky
2676f7c4ffdaSLeon Romanovsky if (dv_prt ==
2677f7c4ffdaSLeon Romanovsky MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) {
2678f7c4ffdaSLeon Romanovsky maction->flow_action_raw.sub_type =
2679f7c4ffdaSLeon Romanovsky MLX5_IB_FLOW_ACTION_DECAP;
2680f7c4ffdaSLeon Romanovsky maction->flow_action_raw.dev = mdev;
2681f7c4ffdaSLeon Romanovsky } else {
2682f7c4ffdaSLeon Romanovsky void *in;
2683f7c4ffdaSLeon Romanovsky int len;
2684f7c4ffdaSLeon Romanovsky
2685f7c4ffdaSLeon Romanovsky in = uverbs_attr_get_alloced_ptr(attrs,
2686f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
2687f7c4ffdaSLeon Romanovsky if (IS_ERR(in)) {
2688f7c4ffdaSLeon Romanovsky ret = PTR_ERR(in);
2689f7c4ffdaSLeon Romanovsky goto free_maction;
2690f7c4ffdaSLeon Romanovsky }
2691f7c4ffdaSLeon Romanovsky
2692f7c4ffdaSLeon Romanovsky len = uverbs_attr_get_len(attrs,
2693f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
2694f7c4ffdaSLeon Romanovsky
2695f7c4ffdaSLeon Romanovsky ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
2696f7c4ffdaSLeon Romanovsky maction, ft_type, dv_prt, in, len);
2697f7c4ffdaSLeon Romanovsky if (ret)
2698f7c4ffdaSLeon Romanovsky goto free_maction;
2699f7c4ffdaSLeon Romanovsky }
2700f7c4ffdaSLeon Romanovsky
2701f7c4ffdaSLeon Romanovsky uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev,
2702f7c4ffdaSLeon Romanovsky IB_FLOW_ACTION_UNSPECIFIED);
2703f7c4ffdaSLeon Romanovsky return 0;
2704f7c4ffdaSLeon Romanovsky
2705f7c4ffdaSLeon Romanovsky free_maction:
2706f7c4ffdaSLeon Romanovsky kfree(maction);
2707f7c4ffdaSLeon Romanovsky return ret;
2708f7c4ffdaSLeon Romanovsky }
2709f7c4ffdaSLeon Romanovsky
2710f7c4ffdaSLeon Romanovsky DECLARE_UVERBS_NAMED_METHOD(
2711f7c4ffdaSLeon Romanovsky MLX5_IB_METHOD_CREATE_FLOW,
2712f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
2713f7c4ffdaSLeon Romanovsky UVERBS_OBJECT_FLOW,
2714f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_NEW,
2715f7c4ffdaSLeon Romanovsky UA_MANDATORY),
2716f7c4ffdaSLeon Romanovsky UVERBS_ATTR_PTR_IN(
2717f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
2718f7c4ffdaSLeon Romanovsky UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
2719f7c4ffdaSLeon Romanovsky UA_MANDATORY,
2720f7c4ffdaSLeon Romanovsky UA_ALLOC_AND_COPY),
2721f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
2722f7c4ffdaSLeon Romanovsky MLX5_IB_OBJECT_FLOW_MATCHER,
2723f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_READ,
2724f7c4ffdaSLeon Romanovsky UA_MANDATORY),
2725f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
2726f7c4ffdaSLeon Romanovsky UVERBS_OBJECT_QP,
2727f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_READ),
2728f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
2729f7c4ffdaSLeon Romanovsky MLX5_IB_OBJECT_DEVX_OBJ,
2730f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_READ),
2731f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
2732f7c4ffdaSLeon Romanovsky UVERBS_OBJECT_FLOW_ACTION,
2733f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_READ, 1,
2734f7c4ffdaSLeon Romanovsky MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS,
2735f7c4ffdaSLeon Romanovsky UA_OPTIONAL),
2736f7c4ffdaSLeon Romanovsky UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG,
2737f7c4ffdaSLeon Romanovsky UVERBS_ATTR_TYPE(u32),
2738f7c4ffdaSLeon Romanovsky UA_OPTIONAL),
2739f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
2740f7c4ffdaSLeon Romanovsky MLX5_IB_OBJECT_DEVX_OBJ,
2741f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_READ, 1, 1,
2742f7c4ffdaSLeon Romanovsky UA_OPTIONAL),
2743f7c4ffdaSLeon Romanovsky UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
2744f7c4ffdaSLeon Romanovsky UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
2745f7c4ffdaSLeon Romanovsky UA_OPTIONAL,
2746f7c4ffdaSLeon Romanovsky UA_ALLOC_AND_COPY),
2747f7c4ffdaSLeon Romanovsky UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
2748f7c4ffdaSLeon Romanovsky enum mlx5_ib_create_flow_flags,
2749f7c4ffdaSLeon Romanovsky UA_OPTIONAL));
2750f7c4ffdaSLeon Romanovsky
2751f7c4ffdaSLeon Romanovsky DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2752f7c4ffdaSLeon Romanovsky MLX5_IB_METHOD_DESTROY_FLOW,
2753f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
2754f7c4ffdaSLeon Romanovsky UVERBS_OBJECT_FLOW,
2755f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_DESTROY,
2756f7c4ffdaSLeon Romanovsky UA_MANDATORY));
2757f7c4ffdaSLeon Romanovsky
2758f7c4ffdaSLeon Romanovsky ADD_UVERBS_METHODS(mlx5_ib_fs,
2759f7c4ffdaSLeon Romanovsky UVERBS_OBJECT_FLOW,
2760f7c4ffdaSLeon Romanovsky &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
2761f7c4ffdaSLeon Romanovsky &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
2762f7c4ffdaSLeon Romanovsky
2763f7c4ffdaSLeon Romanovsky DECLARE_UVERBS_NAMED_METHOD(
2764f7c4ffdaSLeon Romanovsky MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER,
2765f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE,
2766f7c4ffdaSLeon Romanovsky UVERBS_OBJECT_FLOW_ACTION,
2767f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_NEW,
2768f7c4ffdaSLeon Romanovsky UA_MANDATORY),
2769f7c4ffdaSLeon Romanovsky UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
2770f7c4ffdaSLeon Romanovsky UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
2771f7c4ffdaSLeon Romanovsky set_add_copy_action_in_auto)),
2772f7c4ffdaSLeon Romanovsky UA_MANDATORY,
2773f7c4ffdaSLeon Romanovsky UA_ALLOC_AND_COPY),
2774f7c4ffdaSLeon Romanovsky UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
2775f7c4ffdaSLeon Romanovsky enum mlx5_ib_uapi_flow_table_type,
2776f7c4ffdaSLeon Romanovsky UA_MANDATORY));
2777f7c4ffdaSLeon Romanovsky
2778f7c4ffdaSLeon Romanovsky DECLARE_UVERBS_NAMED_METHOD(
2779f7c4ffdaSLeon Romanovsky MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
2780f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE,
2781f7c4ffdaSLeon Romanovsky UVERBS_OBJECT_FLOW_ACTION,
2782f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_NEW,
2783f7c4ffdaSLeon Romanovsky UA_MANDATORY),
2784f7c4ffdaSLeon Romanovsky UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
2785f7c4ffdaSLeon Romanovsky UVERBS_ATTR_MIN_SIZE(1),
2786f7c4ffdaSLeon Romanovsky UA_ALLOC_AND_COPY,
2787f7c4ffdaSLeon Romanovsky UA_OPTIONAL),
2788f7c4ffdaSLeon Romanovsky UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
2789f7c4ffdaSLeon Romanovsky enum mlx5_ib_uapi_flow_action_packet_reformat_type,
2790f7c4ffdaSLeon Romanovsky UA_MANDATORY),
2791f7c4ffdaSLeon Romanovsky UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
2792f7c4ffdaSLeon Romanovsky enum mlx5_ib_uapi_flow_table_type,
2793f7c4ffdaSLeon Romanovsky UA_MANDATORY));
2794f7c4ffdaSLeon Romanovsky
2795f7c4ffdaSLeon Romanovsky ADD_UVERBS_METHODS(
2796f7c4ffdaSLeon Romanovsky mlx5_ib_flow_actions,
2797f7c4ffdaSLeon Romanovsky UVERBS_OBJECT_FLOW_ACTION,
2798f7c4ffdaSLeon Romanovsky &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER),
2799f7c4ffdaSLeon Romanovsky &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT));
2800f7c4ffdaSLeon Romanovsky
2801f7c4ffdaSLeon Romanovsky DECLARE_UVERBS_NAMED_METHOD(
2802f7c4ffdaSLeon Romanovsky MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
2803f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
2804f7c4ffdaSLeon Romanovsky MLX5_IB_OBJECT_FLOW_MATCHER,
2805f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_NEW,
2806f7c4ffdaSLeon Romanovsky UA_MANDATORY),
2807f7c4ffdaSLeon Romanovsky UVERBS_ATTR_PTR_IN(
2808f7c4ffdaSLeon Romanovsky MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
2809f7c4ffdaSLeon Romanovsky UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
2810f7c4ffdaSLeon Romanovsky UA_MANDATORY),
2811f7c4ffdaSLeon Romanovsky UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
2812f7c4ffdaSLeon Romanovsky mlx5_ib_flow_type,
2813f7c4ffdaSLeon Romanovsky UA_MANDATORY),
2814f7c4ffdaSLeon Romanovsky UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
2815f7c4ffdaSLeon Romanovsky UVERBS_ATTR_TYPE(u8),
2816f7c4ffdaSLeon Romanovsky UA_MANDATORY),
2817f7c4ffdaSLeon Romanovsky UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
2818f7c4ffdaSLeon Romanovsky enum ib_flow_flags,
2819f7c4ffdaSLeon Romanovsky UA_OPTIONAL),
2820f7c4ffdaSLeon Romanovsky UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE,
2821f7c4ffdaSLeon Romanovsky enum mlx5_ib_uapi_flow_table_type,
2822f7c4ffdaSLeon Romanovsky UA_OPTIONAL));
2823f7c4ffdaSLeon Romanovsky
2824f7c4ffdaSLeon Romanovsky DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2825f7c4ffdaSLeon Romanovsky MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
2826f7c4ffdaSLeon Romanovsky UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE,
2827f7c4ffdaSLeon Romanovsky MLX5_IB_OBJECT_FLOW_MATCHER,
2828f7c4ffdaSLeon Romanovsky UVERBS_ACCESS_DESTROY,
2829f7c4ffdaSLeon Romanovsky UA_MANDATORY));
2830f7c4ffdaSLeon Romanovsky
2831f7c4ffdaSLeon Romanovsky DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
2832f7c4ffdaSLeon Romanovsky UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup),
2833f7c4ffdaSLeon Romanovsky &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
2834f7c4ffdaSLeon Romanovsky &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
2835f7c4ffdaSLeon Romanovsky
28360c6ab0caSMark Bloch DECLARE_UVERBS_NAMED_METHOD(
28370c6ab0caSMark Bloch MLX5_IB_METHOD_STEERING_ANCHOR_CREATE,
28380c6ab0caSMark Bloch UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE,
28390c6ab0caSMark Bloch MLX5_IB_OBJECT_STEERING_ANCHOR,
28400c6ab0caSMark Bloch UVERBS_ACCESS_NEW,
28410c6ab0caSMark Bloch UA_MANDATORY),
28420c6ab0caSMark Bloch UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE,
28430c6ab0caSMark Bloch enum mlx5_ib_uapi_flow_table_type,
28440c6ab0caSMark Bloch UA_MANDATORY),
28450c6ab0caSMark Bloch UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY,
28460c6ab0caSMark Bloch UVERBS_ATTR_TYPE(u16),
28470c6ab0caSMark Bloch UA_MANDATORY),
28480c6ab0caSMark Bloch UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
28490c6ab0caSMark Bloch UVERBS_ATTR_TYPE(u32),
28500c6ab0caSMark Bloch UA_MANDATORY));
28510c6ab0caSMark Bloch
28520c6ab0caSMark Bloch DECLARE_UVERBS_NAMED_METHOD_DESTROY(
28530c6ab0caSMark Bloch MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY,
28540c6ab0caSMark Bloch UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE,
28550c6ab0caSMark Bloch MLX5_IB_OBJECT_STEERING_ANCHOR,
28560c6ab0caSMark Bloch UVERBS_ACCESS_DESTROY,
28570c6ab0caSMark Bloch UA_MANDATORY));
28580c6ab0caSMark Bloch
28590c6ab0caSMark Bloch DECLARE_UVERBS_NAMED_OBJECT(
28600c6ab0caSMark Bloch MLX5_IB_OBJECT_STEERING_ANCHOR,
28610c6ab0caSMark Bloch UVERBS_TYPE_ALLOC_IDR(steering_anchor_cleanup),
28620c6ab0caSMark Bloch &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE),
28630c6ab0caSMark Bloch &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY));
28640c6ab0caSMark Bloch
2865f7c4ffdaSLeon Romanovsky const struct uapi_definition mlx5_ib_flow_defs[] = {
2866f7c4ffdaSLeon Romanovsky UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2867f7c4ffdaSLeon Romanovsky MLX5_IB_OBJECT_FLOW_MATCHER),
2868f7c4ffdaSLeon Romanovsky UAPI_DEF_CHAIN_OBJ_TREE(
2869f7c4ffdaSLeon Romanovsky UVERBS_OBJECT_FLOW,
2870f7c4ffdaSLeon Romanovsky &mlx5_ib_fs),
2871f7c4ffdaSLeon Romanovsky UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
2872f7c4ffdaSLeon Romanovsky &mlx5_ib_flow_actions),
28730c6ab0caSMark Bloch UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
28740c6ab0caSMark Bloch MLX5_IB_OBJECT_STEERING_ANCHOR,
28750c6ab0caSMark Bloch UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)),
2876f7c4ffdaSLeon Romanovsky {},
2877f7c4ffdaSLeon Romanovsky };
2878f7c4ffdaSLeon Romanovsky
2879f7c4ffdaSLeon Romanovsky static const struct ib_device_ops flow_ops = {
2880f7c4ffdaSLeon Romanovsky .create_flow = mlx5_ib_create_flow,
2881f7c4ffdaSLeon Romanovsky .destroy_flow = mlx5_ib_destroy_flow,
2882f7c4ffdaSLeon Romanovsky .destroy_flow_action = mlx5_ib_destroy_flow_action,
2883f7c4ffdaSLeon Romanovsky };
2884f7c4ffdaSLeon Romanovsky
mlx5_ib_fs_init(struct mlx5_ib_dev * dev)2885f7c4ffdaSLeon Romanovsky int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
2886f7c4ffdaSLeon Romanovsky {
2887f7c4ffdaSLeon Romanovsky dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
2888f7c4ffdaSLeon Romanovsky
2889f7c4ffdaSLeon Romanovsky if (!dev->flow_db)
2890f7c4ffdaSLeon Romanovsky return -ENOMEM;
2891f7c4ffdaSLeon Romanovsky
2892f7c4ffdaSLeon Romanovsky mutex_init(&dev->flow_db->lock);
2893f7c4ffdaSLeon Romanovsky
2894f7c4ffdaSLeon Romanovsky ib_set_device_ops(&dev->ib_dev, &flow_ops);
2895f7c4ffdaSLeon Romanovsky return 0;
2896f7c4ffdaSLeon Romanovsky }
2897