1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies */
3 
4 #include <linux/mlx5/vport.h>
5 #include <rdma/ib_verbs.h>
6 #include <net/addrconf.h>
7 
8 #include "lib/mlx5.h"
9 #include "eswitch.h"
10 #include "fs_core.h"
11 #include "rdma.h"
12 
13 static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev)
14 {
15 	struct mlx5_core_roce *roce = &dev->priv.roce;
16 
17 	mlx5_del_flow_rules(roce->allow_rule);
18 	mlx5_destroy_flow_group(roce->fg);
19 	mlx5_destroy_flow_table(roce->ft);
20 }
21 
22 static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
23 {
24 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
25 	struct mlx5_core_roce *roce = &dev->priv.roce;
26 	struct mlx5_flow_handle *flow_rule = NULL;
27 	struct mlx5_flow_table_attr ft_attr = {};
28 	struct mlx5_flow_namespace *ns = NULL;
29 	struct mlx5_flow_act flow_act = {};
30 	struct mlx5_flow_spec *spec;
31 	struct mlx5_flow_table *ft;
32 	struct mlx5_flow_group *fg;
33 	void *match_criteria;
34 	u32 *flow_group_in;
35 	void *misc;
36 	int err;
37 
38 	if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
39 	      MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)))
40 		return -EOPNOTSUPP;
41 
42 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
43 	if (!flow_group_in)
44 		return -ENOMEM;
45 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
46 	if (!spec) {
47 		kvfree(flow_group_in);
48 		return -ENOMEM;
49 	}
50 
51 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL);
52 	if (!ns) {
53 		mlx5_core_err(dev, "Failed to get RDMA RX namespace");
54 		err = -EOPNOTSUPP;
55 		goto free;
56 	}
57 
58 	ft_attr.max_fte = 1;
59 	ft = mlx5_create_flow_table(ns, &ft_attr);
60 	if (IS_ERR(ft)) {
61 		mlx5_core_err(dev, "Failed to create RDMA RX flow table");
62 		err = PTR_ERR(ft);
63 		goto free;
64 	}
65 
66 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
67 		 MLX5_MATCH_MISC_PARAMETERS);
68 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
69 				      match_criteria);
70 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
71 			 misc_parameters.source_port);
72 
73 	fg = mlx5_create_flow_group(ft, flow_group_in);
74 	if (IS_ERR(fg)) {
75 		err = PTR_ERR(fg);
76 		mlx5_core_err(dev, "Failed to create RDMA RX flow group err(%d)\n", err);
77 		goto destroy_flow_table;
78 	}
79 
80 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
81 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
82 			    misc_parameters);
83 	MLX5_SET(fte_match_set_misc, misc, source_port,
84 		 dev->priv.eswitch->manager_vport);
85 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
86 			    misc_parameters);
87 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
88 
89 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
90 	flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
91 	if (IS_ERR(flow_rule)) {
92 		err = PTR_ERR(flow_rule);
93 		mlx5_core_err(dev, "Failed to add RoCE allow rule, err=%d\n",
94 			      err);
95 		goto destroy_flow_group;
96 	}
97 
98 	kvfree(spec);
99 	kvfree(flow_group_in);
100 	roce->ft = ft;
101 	roce->fg = fg;
102 	roce->allow_rule = flow_rule;
103 
104 	return 0;
105 
106 destroy_flow_group:
107 	mlx5_destroy_flow_group(fg);
108 destroy_flow_table:
109 	mlx5_destroy_flow_table(ft);
110 free:
111 	kvfree(spec);
112 	kvfree(flow_group_in);
113 	return err;
114 }
115 
116 static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
117 {
118 	mlx5_core_roce_gid_set(dev, 0, 0, 0,
119 			       NULL, NULL, false, 0, 1);
120 }
121 
122 static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
123 {
124 	u8 hw_id[ETH_ALEN];
125 
126 	mlx5_query_mac_address(dev, hw_id);
127 	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
128 	addrconf_addr_eui48(&gid->raw[8], hw_id);
129 }
130 
131 static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
132 {
133 	union ib_gid gid;
134 	u8 mac[ETH_ALEN];
135 
136 	mlx5_rdma_make_default_gid(dev, &gid);
137 	return mlx5_core_roce_gid_set(dev, 0,
138 				      MLX5_ROCE_VERSION_1,
139 				      0, gid.raw, mac,
140 				      false, 0, 1);
141 }
142 
143 void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
144 {
145 	struct mlx5_core_roce *roce = &dev->priv.roce;
146 
147 	if (!roce->ft)
148 		return;
149 
150 	mlx5_rdma_disable_roce_steering(dev);
151 	mlx5_rdma_del_roce_addr(dev);
152 	mlx5_nic_vport_disable_roce(dev);
153 }
154 
155 void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
156 {
157 	int err;
158 
159 	err = mlx5_nic_vport_enable_roce(dev);
160 	if (err) {
161 		mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
162 		return;
163 	}
164 
165 	err = mlx5_rdma_add_roce_addr(dev);
166 	if (err) {
167 		mlx5_core_err(dev, "Failed to add RoCE address: %d\n", err);
168 		goto disable_roce;
169 	}
170 
171 	err = mlx5_rdma_enable_roce_steering(dev);
172 	if (err) {
173 		mlx5_core_err(dev, "Failed to enable RoCE steering: %d\n", err);
174 		goto del_roce_addr;
175 	}
176 
177 	return;
178 
179 del_roce_addr:
180 	mlx5_rdma_del_roce_addr(dev);
181 disable_roce:
182 	mlx5_nic_vport_disable_roce(dev);
183 	return;
184 }
185