1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
7 				       bool other_vport,
8 				       u16 vport_number,
9 				       u64 *icm_address_rx,
10 				       u64 *icm_address_tx)
11 {
12 	u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
13 	u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
14 	int err;
15 
16 	MLX5_SET(query_esw_vport_context_in, in, opcode,
17 		 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
18 	MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
19 	MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
20 
21 	err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
22 	if (err)
23 		return err;
24 
25 	*icm_address_rx =
26 		MLX5_GET64(query_esw_vport_context_out, out,
27 			   esw_vport_context.sw_steering_vport_icm_address_rx);
28 	*icm_address_tx =
29 		MLX5_GET64(query_esw_vport_context_out, out,
30 			   esw_vport_context.sw_steering_vport_icm_address_tx);
31 	return 0;
32 }
33 
34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
35 			  u16 vport_number, u16 *gvmi)
36 {
37 	u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
38 	int out_size;
39 	void *out;
40 	int err;
41 
42 	out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
43 	out = kzalloc(out_size, GFP_KERNEL);
44 	if (!out)
45 		return -ENOMEM;
46 
47 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
48 	MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
49 	MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
50 	MLX5_SET(query_hca_cap_in, in, op_mod,
51 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
52 		 HCA_CAP_OPMOD_GET_CUR);
53 
54 	err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
55 	if (err) {
56 		kfree(out);
57 		return err;
58 	}
59 
60 	*gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
61 
62 	kfree(out);
63 	return 0;
64 }
65 
66 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
67 			      struct mlx5dr_esw_caps *caps)
68 {
69 	caps->drop_icm_address_rx =
70 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
71 					 sw_steering_fdb_action_drop_icm_address_rx);
72 	caps->drop_icm_address_tx =
73 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
74 					 sw_steering_fdb_action_drop_icm_address_tx);
75 	caps->uplink_icm_address_rx =
76 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
77 					 sw_steering_uplink_icm_address_rx);
78 	caps->uplink_icm_address_tx =
79 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
80 					 sw_steering_uplink_icm_address_tx);
81 	caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
82 	if (!caps->sw_owner_v2)
83 		caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
84 
85 	return 0;
86 }
87 
88 static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
89 					  u16 vport, bool *roce_en)
90 {
91 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
92 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
93 	int err;
94 
95 	MLX5_SET(query_nic_vport_context_in, in, opcode,
96 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
97 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
98 	MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
99 
100 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
101 	if (err)
102 		return err;
103 
104 	*roce_en = MLX5_GET(query_nic_vport_context_out, out,
105 			    nic_vport_context.roce_en);
106 	return 0;
107 }
108 
109 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
110 			    struct mlx5dr_cmd_caps *caps)
111 {
112 	bool roce_en;
113 	int err;
114 
115 	caps->prio_tag_required	= MLX5_CAP_GEN(mdev, prio_tag_required);
116 	caps->eswitch_manager	= MLX5_CAP_GEN(mdev, eswitch_manager);
117 	caps->gvmi		= MLX5_CAP_GEN(mdev, vhca_id);
118 	caps->flex_protocols	= MLX5_CAP_GEN(mdev, flex_parser_protocols);
119 	caps->sw_format_ver	= MLX5_CAP_GEN(mdev, steering_format_version);
120 
121 	if (MLX5_CAP_GEN(mdev, roce)) {
122 		err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
123 		if (err)
124 			return err;
125 
126 		caps->roce_caps.roce_en = roce_en;
127 		caps->roce_caps.fl_rc_qp_when_roce_disabled =
128 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
129 		caps->roce_caps.fl_rc_qp_when_roce_enabled =
130 			MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
131 	}
132 
133 	caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
134 
135 	caps->support_modify_argument =
136 		MLX5_CAP_GEN_64(mdev, general_obj_types) &
137 		MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT;
138 
139 	if (caps->support_modify_argument) {
140 		caps->log_header_modify_argument_granularity =
141 			MLX5_CAP_GEN(mdev, log_header_modify_argument_granularity);
142 		caps->log_header_modify_argument_max_alloc =
143 			MLX5_CAP_GEN(mdev, log_header_modify_argument_max_alloc);
144 	}
145 
146 	/* geneve_tlv_option_0_exist is the indication of
147 	 * STE support for lookup type flex_parser_ok
148 	 */
149 	caps->flex_parser_ok_bits_supp =
150 		MLX5_CAP_FLOWTABLE(mdev,
151 				   flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
152 
153 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
154 		caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
155 		caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
156 	}
157 
158 	if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
159 		caps->flex_parser_id_icmpv6_dw0 =
160 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
161 		caps->flex_parser_id_icmpv6_dw1 =
162 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
163 	}
164 
165 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
166 		caps->flex_parser_id_geneve_tlv_option_0 =
167 			MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
168 
169 	if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
170 		caps->flex_parser_id_mpls_over_gre =
171 			MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
172 
173 	if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
174 		caps->flex_parser_id_mpls_over_udp =
175 			MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
176 
177 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
178 		caps->flex_parser_id_gtpu_dw_0 =
179 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
180 
181 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
182 		caps->flex_parser_id_gtpu_teid =
183 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
184 
185 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
186 		caps->flex_parser_id_gtpu_dw_2 =
187 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
188 
189 	if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
190 		caps->flex_parser_id_gtpu_first_ext_dw_0 =
191 			MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
192 
193 	caps->nic_rx_drop_address =
194 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
195 	caps->nic_tx_drop_address =
196 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
197 	caps->nic_tx_allow_address =
198 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
199 
200 	caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
201 	caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
202 
203 	if (!caps->rx_sw_owner_v2)
204 		caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
205 	if (!caps->tx_sw_owner_v2)
206 		caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
207 
208 	caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
209 
210 	caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
211 	caps->hdr_modify_icm_addr =
212 		MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
213 
214 	caps->log_modify_pattern_icm_size =
215 		MLX5_CAP_DEV_MEM(mdev, log_header_modify_pattern_sw_icm_size);
216 
217 	caps->hdr_modify_pattern_icm_addr =
218 		MLX5_CAP64_DEV_MEM(mdev, header_modify_pattern_sw_icm_start_address);
219 
220 	caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
221 
222 	caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
223 
224 	return 0;
225 }
226 
227 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
228 				enum fs_flow_table_type type,
229 				u32 table_id,
230 				struct mlx5dr_cmd_query_flow_table_details *output)
231 {
232 	u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
233 	u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
234 	int err;
235 
236 	MLX5_SET(query_flow_table_in, in, opcode,
237 		 MLX5_CMD_OP_QUERY_FLOW_TABLE);
238 
239 	MLX5_SET(query_flow_table_in, in, table_type, type);
240 	MLX5_SET(query_flow_table_in, in, table_id, table_id);
241 
242 	err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
243 	if (err)
244 		return err;
245 
246 	output->status = MLX5_GET(query_flow_table_out, out, status);
247 	output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
248 
249 	output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
250 						 flow_table_context.sw_owner_icm_root_1);
251 	output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
252 						 flow_table_context.sw_owner_icm_root_0);
253 
254 	return 0;
255 }
256 
257 int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
258 				  u32 sampler_id,
259 				  u64 *rx_icm_addr,
260 				  u64 *tx_icm_addr)
261 {
262 	u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
263 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
264 	void *attr;
265 	int ret;
266 
267 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
268 		 MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
269 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
270 		 MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
271 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
272 
273 	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
274 	if (ret)
275 		return ret;
276 
277 	attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
278 
279 	*rx_icm_addr = MLX5_GET64(sampler_obj, attr,
280 				  sw_steering_icm_address_rx);
281 	*tx_icm_addr = MLX5_GET64(sampler_obj, attr,
282 				  sw_steering_icm_address_tx);
283 
284 	return 0;
285 }
286 
287 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
288 {
289 	u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
290 
291 	/* Skip SYNC in case the device is internal error state.
292 	 * Besides a device error, this also happens when we're
293 	 * in fast teardown
294 	 */
295 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
296 		return 0;
297 
298 	MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
299 
300 	return mlx5_cmd_exec_in(mdev, sync_steering, in);
301 }
302 
303 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
304 					u32 table_type,
305 					u32 table_id,
306 					u32 group_id,
307 					u32 modify_header_id,
308 					u16 vport)
309 {
310 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
311 	void *in_flow_context;
312 	unsigned int inlen;
313 	void *in_dests;
314 	u32 *in;
315 	int err;
316 
317 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
318 		1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
319 
320 	in = kvzalloc(inlen, GFP_KERNEL);
321 	if (!in)
322 		return -ENOMEM;
323 
324 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
325 	MLX5_SET(set_fte_in, in, table_type, table_type);
326 	MLX5_SET(set_fte_in, in, table_id, table_id);
327 
328 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
329 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
330 	MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
331 	MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
332 	MLX5_SET(flow_context, in_flow_context, action,
333 		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
334 		 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
335 
336 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
337 	MLX5_SET(dest_format_struct, in_dests, destination_type,
338 		 MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
339 	MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
340 
341 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
342 	kvfree(in);
343 
344 	return err;
345 }
346 
347 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
348 				    u32 table_type,
349 				    u32 table_id)
350 {
351 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
352 
353 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
354 	MLX5_SET(delete_fte_in, in, table_type, table_type);
355 	MLX5_SET(delete_fte_in, in, table_id, table_id);
356 
357 	return mlx5_cmd_exec_in(mdev, delete_fte, in);
358 }
359 
360 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
361 				   u32 table_type,
362 				   u8 num_of_actions,
363 				   u64 *actions,
364 				   u32 *modify_header_id)
365 {
366 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
367 	void *p_actions;
368 	u32 inlen;
369 	u32 *in;
370 	int err;
371 
372 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
373 		 num_of_actions * sizeof(u64);
374 	in = kvzalloc(inlen, GFP_KERNEL);
375 	if (!in)
376 		return -ENOMEM;
377 
378 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
379 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
380 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
381 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
382 	p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
383 	memcpy(p_actions, actions, num_of_actions * sizeof(u64));
384 
385 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
386 	if (err)
387 		goto out;
388 
389 	*modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
390 				     modify_header_id);
391 out:
392 	kvfree(in);
393 	return err;
394 }
395 
396 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
397 				     u32 modify_header_id)
398 {
399 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
400 
401 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
402 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
403 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
404 		 modify_header_id);
405 
406 	return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
407 }
408 
409 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
410 				       u32 table_type,
411 				       u32 table_id,
412 				       u32 *group_id)
413 {
414 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
415 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
416 	u32 *in;
417 	int err;
418 
419 	in = kvzalloc(inlen, GFP_KERNEL);
420 	if (!in)
421 		return -ENOMEM;
422 
423 	MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
424 	MLX5_SET(create_flow_group_in, in, table_type, table_type);
425 	MLX5_SET(create_flow_group_in, in, table_id, table_id);
426 
427 	err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
428 	if (err)
429 		goto out;
430 
431 	*group_id = MLX5_GET(create_flow_group_out, out, group_id);
432 
433 out:
434 	kvfree(in);
435 	return err;
436 }
437 
438 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
439 				  u32 table_type,
440 				  u32 table_id,
441 				  u32 group_id)
442 {
443 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
444 
445 	MLX5_SET(destroy_flow_group_in, in, opcode,
446 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
447 	MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
448 	MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
449 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
450 
451 	return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
452 }
453 
454 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
455 				 struct mlx5dr_cmd_create_flow_table_attr *attr,
456 				 u64 *fdb_rx_icm_addr,
457 				 u32 *table_id)
458 {
459 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
460 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
461 	void *ft_mdev;
462 	int err;
463 
464 	MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
465 	MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
466 	MLX5_SET(create_flow_table_in, in, uid, attr->uid);
467 
468 	ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
469 	MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
470 	MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
471 	MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
472 
473 	if (attr->sw_owner) {
474 		/* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
475 		 * icm_addr_1 used for FDB TX
476 		 */
477 		if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
478 			MLX5_SET64(flow_table_context, ft_mdev,
479 				   sw_owner_icm_root_0, attr->icm_addr_rx);
480 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
481 			MLX5_SET64(flow_table_context, ft_mdev,
482 				   sw_owner_icm_root_0, attr->icm_addr_tx);
483 		} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
484 			MLX5_SET64(flow_table_context, ft_mdev,
485 				   sw_owner_icm_root_0, attr->icm_addr_rx);
486 			MLX5_SET64(flow_table_context, ft_mdev,
487 				   sw_owner_icm_root_1, attr->icm_addr_tx);
488 		}
489 	}
490 
491 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
492 		 attr->decap_en);
493 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
494 		 attr->reformat_en);
495 
496 	err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
497 	if (err)
498 		return err;
499 
500 	*table_id = MLX5_GET(create_flow_table_out, out, table_id);
501 	if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
502 	    fdb_rx_icm_addr)
503 		*fdb_rx_icm_addr =
504 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
505 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
506 		(u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
507 
508 	return 0;
509 }
510 
511 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
512 				  u32 table_id,
513 				  u32 table_type)
514 {
515 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
516 
517 	MLX5_SET(destroy_flow_table_in, in, opcode,
518 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
519 	MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
520 	MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
521 
522 	return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
523 }
524 
525 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
526 				   enum mlx5_reformat_ctx_type rt,
527 				   u8 reformat_param_0,
528 				   u8 reformat_param_1,
529 				   size_t reformat_size,
530 				   void *reformat_data,
531 				   u32 *reformat_id)
532 {
533 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
534 	size_t inlen, cmd_data_sz, cmd_total_sz;
535 	void *prctx;
536 	void *pdata;
537 	void *in;
538 	int err;
539 
540 	cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
541 	cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
542 					packet_reformat_context.reformat_data);
543 	inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
544 	in = kvzalloc(inlen, GFP_KERNEL);
545 	if (!in)
546 		return -ENOMEM;
547 
548 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
549 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
550 
551 	prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
552 	pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
553 
554 	MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
555 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
556 	MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
557 	MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
558 	if (reformat_data && reformat_size)
559 		memcpy(pdata, reformat_data, reformat_size);
560 
561 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
562 	if (err)
563 		return err;
564 
565 	*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
566 	kvfree(in);
567 
568 	return err;
569 }
570 
571 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
572 				     u32 reformat_id)
573 {
574 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
575 
576 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
577 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
578 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
579 		 reformat_id);
580 
581 	mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
582 }
583 
584 static void dr_cmd_set_definer_format(void *ptr, u16 format_id,
585 				      u8 *dw_selectors,
586 				      u8 *byte_selectors)
587 {
588 	if (format_id != MLX5_IFC_DEFINER_FORMAT_ID_SELECT)
589 		return;
590 
591 	MLX5_SET(match_definer, ptr, format_select_dw0, dw_selectors[0]);
592 	MLX5_SET(match_definer, ptr, format_select_dw1, dw_selectors[1]);
593 	MLX5_SET(match_definer, ptr, format_select_dw2, dw_selectors[2]);
594 	MLX5_SET(match_definer, ptr, format_select_dw3, dw_selectors[3]);
595 	MLX5_SET(match_definer, ptr, format_select_dw4, dw_selectors[4]);
596 	MLX5_SET(match_definer, ptr, format_select_dw5, dw_selectors[5]);
597 	MLX5_SET(match_definer, ptr, format_select_dw6, dw_selectors[6]);
598 	MLX5_SET(match_definer, ptr, format_select_dw7, dw_selectors[7]);
599 	MLX5_SET(match_definer, ptr, format_select_dw8, dw_selectors[8]);
600 
601 	MLX5_SET(match_definer, ptr, format_select_byte0, byte_selectors[0]);
602 	MLX5_SET(match_definer, ptr, format_select_byte1, byte_selectors[1]);
603 	MLX5_SET(match_definer, ptr, format_select_byte2, byte_selectors[2]);
604 	MLX5_SET(match_definer, ptr, format_select_byte3, byte_selectors[3]);
605 	MLX5_SET(match_definer, ptr, format_select_byte4, byte_selectors[4]);
606 	MLX5_SET(match_definer, ptr, format_select_byte5, byte_selectors[5]);
607 	MLX5_SET(match_definer, ptr, format_select_byte6, byte_selectors[6]);
608 	MLX5_SET(match_definer, ptr, format_select_byte7, byte_selectors[7]);
609 }
610 
611 int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
612 			      u16 format_id,
613 			      u8 *dw_selectors,
614 			      u8 *byte_selectors,
615 			      u8 *match_mask,
616 			      u32 *definer_id)
617 {
618 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
619 	u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
620 	void *ptr;
621 	int err;
622 
623 	ptr = MLX5_ADDR_OF(create_match_definer_in, in,
624 			   general_obj_in_cmd_hdr);
625 	MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
626 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
627 	MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
628 		 MLX5_OBJ_TYPE_MATCH_DEFINER);
629 
630 	ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
631 	MLX5_SET(match_definer, ptr, format_id, format_id);
632 
633 	dr_cmd_set_definer_format(ptr, format_id,
634 				  dw_selectors, byte_selectors);
635 
636 	ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
637 	memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
638 
639 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
640 	if (err)
641 		return err;
642 
643 	*definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
644 
645 	return 0;
646 }
647 
648 void
649 mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev, u32 definer_id)
650 {
651 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
652 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
653 
654 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
655 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
656 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
657 
658 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
659 }
660 
661 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
662 			 u16 index, struct mlx5dr_cmd_gid_attr *attr)
663 {
664 	u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
665 	u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
666 	int err;
667 
668 	MLX5_SET(query_roce_address_in, in, opcode,
669 		 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
670 
671 	MLX5_SET(query_roce_address_in, in, roce_address_index, index);
672 	MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
673 
674 	err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
675 	if (err)
676 		return err;
677 
678 	memcpy(&attr->gid,
679 	       MLX5_ADDR_OF(query_roce_address_out,
680 			    out, roce_address.source_l3_address),
681 	       sizeof(attr->gid));
682 	memcpy(attr->mac,
683 	       MLX5_ADDR_OF(query_roce_address_out, out,
684 			    roce_address.source_mac_47_32),
685 	       sizeof(attr->mac));
686 
687 	if (MLX5_GET(query_roce_address_out, out,
688 		     roce_address.roce_version) == MLX5_ROCE_VERSION_2)
689 		attr->roce_ver = MLX5_ROCE_VERSION_2;
690 	else
691 		attr->roce_ver = MLX5_ROCE_VERSION_1;
692 
693 	return 0;
694 }
695 
696 int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
697 					u16 log_obj_range, u32 pd,
698 					u32 *obj_id)
699 {
700 	u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {};
701 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
702 	void *attr;
703 	int ret;
704 
705 	attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
706 	MLX5_SET(general_obj_in_cmd_hdr, attr, opcode,
707 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
708 	MLX5_SET(general_obj_in_cmd_hdr, attr, obj_type,
709 		 MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
710 	MLX5_SET(general_obj_in_cmd_hdr, attr,
711 		 op_param.create.log_obj_range, log_obj_range);
712 
713 	attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
714 	MLX5_SET(modify_header_arg, attr, access_pd, pd);
715 
716 	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
717 	if (ret)
718 		return ret;
719 
720 	*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
721 	return 0;
722 }
723 
724 void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
725 					  u32 obj_id)
726 {
727 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
728 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
729 
730 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
731 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
732 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
733 		 MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
734 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
735 
736 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
737 }
738 
739 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
740 					struct mlx5dr_cmd_fte_info *fte,
741 					bool *extended_dest)
742 {
743 	int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
744 	int num_fwd_destinations = 0;
745 	int num_encap = 0;
746 	int i;
747 
748 	*extended_dest = false;
749 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
750 		return 0;
751 	for (i = 0; i < fte->dests_size; i++) {
752 		if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
753 		    fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
754 			continue;
755 		if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
756 		     fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
757 		    fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
758 			num_encap++;
759 		num_fwd_destinations++;
760 	}
761 
762 	if (num_fwd_destinations > 1 && num_encap > 0)
763 		*extended_dest = true;
764 
765 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
766 		mlx5_core_warn(dev, "FW does not support extended destination");
767 		return -EOPNOTSUPP;
768 	}
769 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
770 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
771 			       1 << fw_log_max_fdb_encap_uplink);
772 		return -EOPNOTSUPP;
773 	}
774 
775 	return 0;
776 }
777 
778 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
779 		       int opmod, int modify_mask,
780 		       struct mlx5dr_cmd_ft_info *ft,
781 		       u32 group_id,
782 		       struct mlx5dr_cmd_fte_info *fte)
783 {
784 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
785 	void *in_flow_context, *vlan;
786 	bool extended_dest = false;
787 	void *in_match_value;
788 	unsigned int inlen;
789 	int dst_cnt_size;
790 	void *in_dests;
791 	u32 *in;
792 	int err;
793 	int i;
794 
795 	if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
796 		return -EOPNOTSUPP;
797 
798 	if (!extended_dest)
799 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
800 	else
801 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
802 
803 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
804 	in = kvzalloc(inlen, GFP_KERNEL);
805 	if (!in)
806 		return -ENOMEM;
807 
808 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
809 	MLX5_SET(set_fte_in, in, op_mod, opmod);
810 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
811 	MLX5_SET(set_fte_in, in, table_type, ft->type);
812 	MLX5_SET(set_fte_in, in, table_id, ft->id);
813 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
814 	MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
815 	if (ft->vport) {
816 		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
817 		MLX5_SET(set_fte_in, in, other_vport, 1);
818 	}
819 
820 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
821 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
822 
823 	MLX5_SET(flow_context, in_flow_context, flow_tag,
824 		 fte->flow_context.flow_tag);
825 	MLX5_SET(flow_context, in_flow_context, flow_source,
826 		 fte->flow_context.flow_source);
827 
828 	MLX5_SET(flow_context, in_flow_context, extended_destination,
829 		 extended_dest);
830 	if (extended_dest) {
831 		u32 action;
832 
833 		action = fte->action.action &
834 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
835 		MLX5_SET(flow_context, in_flow_context, action, action);
836 	} else {
837 		MLX5_SET(flow_context, in_flow_context, action,
838 			 fte->action.action);
839 		if (fte->action.pkt_reformat)
840 			MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
841 				 fte->action.pkt_reformat->id);
842 	}
843 	if (fte->action.modify_hdr)
844 		MLX5_SET(flow_context, in_flow_context, modify_header_id,
845 			 fte->action.modify_hdr->id);
846 
847 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
848 
849 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
850 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
851 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
852 
853 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
854 
855 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
856 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
857 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
858 
859 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
860 				      match_value);
861 	memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
862 
863 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
864 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
865 		int list_size = 0;
866 
867 		for (i = 0; i < fte->dests_size; i++) {
868 			enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
869 			enum mlx5_ifc_flow_destination_type ifc_type;
870 			unsigned int id;
871 
872 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
873 				continue;
874 
875 			switch (type) {
876 			case MLX5_FLOW_DESTINATION_TYPE_NONE:
877 				continue;
878 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
879 				id = fte->dest_arr[i].ft_num;
880 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
881 				break;
882 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
883 				id = fte->dest_arr[i].ft_id;
884 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
885 
886 				break;
887 			case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
888 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
889 				if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
890 					id = fte->dest_arr[i].vport.num;
891 					MLX5_SET(dest_format_struct, in_dests,
892 						 destination_eswitch_owner_vhca_id_valid,
893 						 !!(fte->dest_arr[i].vport.flags &
894 						    MLX5_FLOW_DEST_VPORT_VHCA_ID));
895 					ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
896 				} else {
897 					id = 0;
898 					ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
899 					MLX5_SET(dest_format_struct, in_dests,
900 						 destination_eswitch_owner_vhca_id_valid, 1);
901 				}
902 				MLX5_SET(dest_format_struct, in_dests,
903 					 destination_eswitch_owner_vhca_id,
904 					 fte->dest_arr[i].vport.vhca_id);
905 				if (extended_dest && (fte->dest_arr[i].vport.flags &
906 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
907 					MLX5_SET(dest_format_struct, in_dests,
908 						 packet_reformat,
909 						 !!(fte->dest_arr[i].vport.flags &
910 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
911 					MLX5_SET(extended_dest_format, in_dests,
912 						 packet_reformat_id,
913 						 fte->dest_arr[i].vport.reformat_id);
914 				}
915 				break;
916 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
917 				id = fte->dest_arr[i].sampler_id;
918 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
919 				break;
920 			default:
921 				id = fte->dest_arr[i].tir_num;
922 				ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
923 			}
924 
925 			MLX5_SET(dest_format_struct, in_dests, destination_type,
926 				 ifc_type);
927 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
928 			in_dests += dst_cnt_size;
929 			list_size++;
930 		}
931 
932 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
933 			 list_size);
934 	}
935 
936 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
937 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
938 					log_max_flow_counter,
939 					ft->type));
940 		int list_size = 0;
941 
942 		for (i = 0; i < fte->dests_size; i++) {
943 			if (fte->dest_arr[i].type !=
944 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
945 				continue;
946 
947 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
948 				 fte->dest_arr[i].counter_id);
949 			in_dests += dst_cnt_size;
950 			list_size++;
951 		}
952 		if (list_size > max_list_size) {
953 			err = -EINVAL;
954 			goto err_out;
955 		}
956 
957 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
958 			 list_size);
959 	}
960 
961 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
962 err_out:
963 	kvfree(in);
964 	return err;
965 }
966