1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 /* Inter-Driver Communication */
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_dcb_lib.h"
8 
9 /**
10  * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
11  * @pf: pointer to PF struct
12  *
13  * This function has to be called with a device_lock on the
14  * pf->adev.dev to avoid race conditions.
15  */
16 static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
17 {
18 	struct auxiliary_device *adev;
19 
20 	adev = pf->adev;
21 	if (!adev || !adev->dev.driver)
22 		return NULL;
23 
24 	return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
25 			    adrv.driver);
26 }
27 
28 /**
29  * ice_send_event_to_aux - send event to RDMA AUX driver
30  * @pf: pointer to PF struct
31  * @event: event struct
32  */
33 void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
34 {
35 	struct iidc_auxiliary_drv *iadrv;
36 
37 	if (WARN_ON_ONCE(!in_task()))
38 		return;
39 
40 	mutex_lock(&pf->adev_mutex);
41 	if (!pf->adev)
42 		goto finish;
43 
44 	device_lock(&pf->adev->dev);
45 	iadrv = ice_get_auxiliary_drv(pf);
46 	if (iadrv && iadrv->event_handler)
47 		iadrv->event_handler(pf, event);
48 	device_unlock(&pf->adev->dev);
49 finish:
50 	mutex_unlock(&pf->adev_mutex);
51 }
52 
53 /**
54  * ice_find_vsi - Find the VSI from VSI ID
55  * @pf: The PF pointer to search in
56  * @vsi_num: The VSI ID to search for
57  */
58 static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
59 {
60 	int i;
61 
62 	ice_for_each_vsi(pf, i)
63 		if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
64 			return  pf->vsi[i];
65 	return NULL;
66 }
67 
68 /**
69  * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
70  * @pf: PF struct
71  * @qset: Resource to be allocated
72  */
73 int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
74 {
75 	u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
76 	struct ice_vsi *vsi;
77 	struct device *dev;
78 	u32 qset_teid;
79 	u16 qs_handle;
80 	int status;
81 	int i;
82 
83 	if (WARN_ON(!pf || !qset))
84 		return -EINVAL;
85 
86 	dev = ice_pf_to_dev(pf);
87 
88 	if (!ice_is_rdma_ena(pf))
89 		return -EINVAL;
90 
91 	vsi = ice_get_main_vsi(pf);
92 	if (!vsi) {
93 		dev_err(dev, "RDMA QSet invalid VSI\n");
94 		return -EINVAL;
95 	}
96 
97 	ice_for_each_traffic_class(i)
98 		max_rdmaqs[i] = 0;
99 
100 	max_rdmaqs[qset->tc]++;
101 	qs_handle = qset->qs_handle;
102 
103 	status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
104 				  max_rdmaqs);
105 	if (status) {
106 		dev_err(dev, "Failed VSI RDMA Qset config\n");
107 		return status;
108 	}
109 
110 	status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
111 				       &qs_handle, 1, &qset_teid);
112 	if (status) {
113 		dev_err(dev, "Failed VSI RDMA Qset enable\n");
114 		return status;
115 	}
116 	vsi->qset_handle[qset->tc] = qset->qs_handle;
117 	qset->teid = qset_teid;
118 
119 	return 0;
120 }
121 EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
122 
123 /**
124  * ice_del_rdma_qset - Delete leaf node for RDMA Qset
125  * @pf: PF struct
126  * @qset: Resource to be freed
127  */
128 int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
129 {
130 	struct ice_vsi *vsi;
131 	u32 teid;
132 	u16 q_id;
133 
134 	if (WARN_ON(!pf || !qset))
135 		return -EINVAL;
136 
137 	vsi = ice_find_vsi(pf, qset->vport_id);
138 	if (!vsi) {
139 		dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
140 		return -EINVAL;
141 	}
142 
143 	q_id = qset->qs_handle;
144 	teid = qset->teid;
145 
146 	vsi->qset_handle[qset->tc] = 0;
147 
148 	return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
149 }
150 EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
151 
152 /**
153  * ice_rdma_request_reset - accept request from RDMA to perform a reset
154  * @pf: struct for PF
155  * @reset_type: type of reset
156  */
157 int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
158 {
159 	enum ice_reset_req reset;
160 
161 	if (WARN_ON(!pf))
162 		return -EINVAL;
163 
164 	switch (reset_type) {
165 	case IIDC_PFR:
166 		reset = ICE_RESET_PFR;
167 		break;
168 	case IIDC_CORER:
169 		reset = ICE_RESET_CORER;
170 		break;
171 	case IIDC_GLOBR:
172 		reset = ICE_RESET_GLOBR;
173 		break;
174 	default:
175 		dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
176 		return -EINVAL;
177 	}
178 
179 	return ice_schedule_reset(pf, reset);
180 }
181 EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
182 
183 /**
184  * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
185  * @pf: pointer to struct for PF
186  * @vsi_id: VSI HW idx to update filter on
187  * @enable: bool whether to enable or disable filters
188  */
189 int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
190 {
191 	struct ice_vsi *vsi;
192 	int status;
193 
194 	if (WARN_ON(!pf))
195 		return -EINVAL;
196 
197 	vsi = ice_find_vsi(pf, vsi_id);
198 	if (!vsi)
199 		return -EINVAL;
200 
201 	status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
202 	if (status) {
203 		dev_err(ice_pf_to_dev(pf), "Failed to  %sable RDMA filtering\n",
204 			enable ? "en" : "dis");
205 	} else {
206 		if (enable)
207 			vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
208 		else
209 			vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
210 	}
211 
212 	return status;
213 }
214 EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
215 
216 /**
217  * ice_get_qos_params - parse QoS params for RDMA consumption
218  * @pf: pointer to PF struct
219  * @qos: set of QoS values
220  */
221 void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
222 {
223 	struct ice_dcbx_cfg *dcbx_cfg;
224 	unsigned int i;
225 	u32 up2tc;
226 
227 	dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
228 	up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
229 
230 	qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
231 	for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
232 		qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
233 
234 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
235 		qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
236 
237 	qos->pfc_mode = dcbx_cfg->pfc_mode;
238 	if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
239 		for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
240 			qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
241 }
242 EXPORT_SYMBOL_GPL(ice_get_qos_params);
243 
244 /**
245  * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver
246  * @pf: board private structure to initialize
247  */
248 static int ice_reserve_rdma_qvector(struct ice_pf *pf)
249 {
250 	if (ice_is_rdma_ena(pf)) {
251 		int index;
252 
253 		index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
254 				    ICE_RES_RDMA_VEC_ID);
255 		if (index < 0)
256 			return index;
257 		pf->num_avail_sw_msix -= pf->num_rdma_msix;
258 		pf->rdma_base_vector = (u16)index;
259 	}
260 	return 0;
261 }
262 
263 /**
264  * ice_adev_release - function to be mapped to AUX dev's release op
265  * @dev: pointer to device to free
266  */
267 static void ice_adev_release(struct device *dev)
268 {
269 	struct iidc_auxiliary_dev *iadev;
270 
271 	iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
272 	kfree(iadev);
273 }
274 
275 /**
276  * ice_plug_aux_dev - allocate and register AUX device
277  * @pf: pointer to pf struct
278  */
279 int ice_plug_aux_dev(struct ice_pf *pf)
280 {
281 	struct iidc_auxiliary_dev *iadev;
282 	struct auxiliary_device *adev;
283 	int ret;
284 
285 	/* if this PF doesn't support a technology that requires auxiliary
286 	 * devices, then gracefully exit
287 	 */
288 	if (!ice_is_rdma_ena(pf))
289 		return 0;
290 
291 	iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
292 	if (!iadev)
293 		return -ENOMEM;
294 
295 	adev = &iadev->adev;
296 	iadev->pf = pf;
297 
298 	adev->id = pf->aux_idx;
299 	adev->dev.release = ice_adev_release;
300 	adev->dev.parent = &pf->pdev->dev;
301 	adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
302 
303 	ret = auxiliary_device_init(adev);
304 	if (ret) {
305 		kfree(iadev);
306 		return ret;
307 	}
308 
309 	ret = auxiliary_device_add(adev);
310 	if (ret) {
311 		auxiliary_device_uninit(adev);
312 		return ret;
313 	}
314 
315 	mutex_lock(&pf->adev_mutex);
316 	pf->adev = adev;
317 	mutex_unlock(&pf->adev_mutex);
318 
319 	return 0;
320 }
321 
322 /* ice_unplug_aux_dev - unregister and free AUX device
323  * @pf: pointer to pf struct
324  */
325 void ice_unplug_aux_dev(struct ice_pf *pf)
326 {
327 	struct auxiliary_device *adev;
328 
329 	mutex_lock(&pf->adev_mutex);
330 	adev = pf->adev;
331 	pf->adev = NULL;
332 	mutex_unlock(&pf->adev_mutex);
333 
334 	if (adev) {
335 		auxiliary_device_delete(adev);
336 		auxiliary_device_uninit(adev);
337 	}
338 }
339 
340 /**
341  * ice_init_rdma - initializes PF for RDMA use
342  * @pf: ptr to ice_pf
343  */
344 int ice_init_rdma(struct ice_pf *pf)
345 {
346 	struct device *dev = &pf->pdev->dev;
347 	int ret;
348 
349 	/* Reserve vector resources */
350 	ret = ice_reserve_rdma_qvector(pf);
351 	if (ret < 0) {
352 		dev_err(dev, "failed to reserve vectors for RDMA\n");
353 		return ret;
354 	}
355 	pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
356 	return ice_plug_aux_dev(pf);
357 }
358