1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3 #include <linux/mlx5/driver.h>
4 #include "vhca_event.h"
5 #include "priv.h"
6 #include "sf.h"
7 #include "mlx5_ifc_vhca_event.h"
8 #include "vhca_event.h"
9 #include "ecpf.h"
10 
11 struct mlx5_sf_hw {
12 	u32 usr_sfnum;
13 	u8 allocated: 1;
14 	u8 pending_delete: 1;
15 };
16 
17 struct mlx5_sf_hw_table {
18 	struct mlx5_core_dev *dev;
19 	struct mlx5_sf_hw *sfs;
20 	int max_local_functions;
21 	u8 ecpu: 1;
22 	struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
23 	struct notifier_block vhca_nb;
24 };
25 
26 u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id)
27 {
28 	return sw_id + mlx5_sf_start_function_id(dev);
29 }
30 
31 static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id)
32 {
33 	return hw_id - mlx5_sf_start_function_id(dev);
34 }
35 
36 int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
37 {
38 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
39 	int sw_id = -ENOSPC;
40 	u16 hw_fn_id;
41 	int err;
42 	int i;
43 
44 	if (!table->max_local_functions)
45 		return -EOPNOTSUPP;
46 
47 	mutex_lock(&table->table_lock);
48 	/* Check if sf with same sfnum already exists or not. */
49 	for (i = 0; i < table->max_local_functions; i++) {
50 		if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) {
51 			err = -EEXIST;
52 			goto exist_err;
53 		}
54 	}
55 
56 	/* Find the free entry and allocate the entry from the array */
57 	for (i = 0; i < table->max_local_functions; i++) {
58 		if (!table->sfs[i].allocated) {
59 			table->sfs[i].usr_sfnum = usr_sfnum;
60 			table->sfs[i].allocated = true;
61 			sw_id = i;
62 			break;
63 		}
64 	}
65 	if (sw_id == -ENOSPC) {
66 		err = -ENOSPC;
67 		goto err;
68 	}
69 
70 	hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
71 	err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id);
72 	if (err)
73 		goto err;
74 
75 	err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum);
76 	if (err)
77 		goto vhca_err;
78 
79 	mutex_unlock(&table->table_lock);
80 	return sw_id;
81 
82 vhca_err:
83 	mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
84 err:
85 	table->sfs[i].allocated = false;
86 exist_err:
87 	mutex_unlock(&table->table_lock);
88 	return err;
89 }
90 
91 static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id)
92 {
93 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
94 	u16 hw_fn_id;
95 
96 	hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id);
97 	mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
98 	table->sfs[id].allocated = false;
99 	table->sfs[id].pending_delete = false;
100 }
101 
102 void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id)
103 {
104 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
105 
106 	mutex_lock(&table->table_lock);
107 	_mlx5_sf_hw_id_free(dev, id);
108 	mutex_unlock(&table->table_lock);
109 }
110 
111 void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
112 {
113 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
114 	u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
115 	u16 hw_fn_id;
116 	u8 state;
117 	int err;
118 
119 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
120 	mutex_lock(&table->table_lock);
121 	err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out));
122 	if (err)
123 		goto err;
124 	state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
125 	if (state == MLX5_VHCA_STATE_ALLOCATED) {
126 		mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
127 		table->sfs[id].allocated = false;
128 	} else {
129 		table->sfs[id].pending_delete = true;
130 	}
131 err:
132 	mutex_unlock(&table->table_lock);
133 }
134 
135 static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table)
136 {
137 	int i;
138 
139 	for (i = 0; i < table->max_local_functions; i++) {
140 		if (table->sfs[i].allocated)
141 			_mlx5_sf_hw_id_free(table->dev, i);
142 	}
143 }
144 
145 int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
146 {
147 	struct mlx5_sf_hw_table *table;
148 	struct mlx5_sf_hw *sfs;
149 	int max_functions;
150 
151 	if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev))
152 		return 0;
153 
154 	max_functions = mlx5_sf_max_functions(dev);
155 	table = kzalloc(sizeof(*table), GFP_KERNEL);
156 	if (!table)
157 		return -ENOMEM;
158 
159 	sfs = kcalloc(max_functions, sizeof(*sfs), GFP_KERNEL);
160 	if (!sfs)
161 		goto table_err;
162 
163 	mutex_init(&table->table_lock);
164 	table->dev = dev;
165 	table->sfs = sfs;
166 	table->max_local_functions = max_functions;
167 	table->ecpu = mlx5_read_embedded_cpu(dev);
168 	dev->priv.sf_hw_table = table;
169 	mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
170 	return 0;
171 
172 table_err:
173 	kfree(table);
174 	return -ENOMEM;
175 }
176 
177 void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
178 {
179 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
180 
181 	if (!table)
182 		return;
183 
184 	mutex_destroy(&table->table_lock);
185 	kfree(table->sfs);
186 	kfree(table);
187 }
188 
189 static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
190 {
191 	struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
192 	const struct mlx5_vhca_state_event *event = data;
193 	struct mlx5_sf_hw *sf_hw;
194 	u16 sw_id;
195 
196 	if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
197 		return 0;
198 
199 	sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id);
200 	sf_hw = &table->sfs[sw_id];
201 
202 	mutex_lock(&table->table_lock);
203 	/* SF driver notified through firmware that SF is finally detached.
204 	 * Hence recycle the sf hardware id for reuse.
205 	 */
206 	if (sf_hw->allocated && sf_hw->pending_delete)
207 		_mlx5_sf_hw_id_free(table->dev, sw_id);
208 	mutex_unlock(&table->table_lock);
209 	return 0;
210 }
211 
212 int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
213 {
214 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
215 
216 	if (!table)
217 		return 0;
218 
219 	table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
220 	return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
221 }
222 
223 void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
224 {
225 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
226 
227 	if (!table)
228 		return;
229 
230 	mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
231 	/* Dealloc SFs whose firmware event has been missed. */
232 	mlx5_sf_hw_dealloc_all(table);
233 }
234