1d25a0fc4SDave Ertman // SPDX-License-Identifier: GPL-2.0
2d25a0fc4SDave Ertman /* Copyright (C) 2021, Intel Corporation. */
3d25a0fc4SDave Ertman
4d25a0fc4SDave Ertman /* Inter-Driver Communication */
5d25a0fc4SDave Ertman #include "ice.h"
6d25a0fc4SDave Ertman #include "ice_lib.h"
7d25a0fc4SDave Ertman #include "ice_dcb_lib.h"
8d25a0fc4SDave Ertman
92be29286SMichal Swiatkowski static DEFINE_XARRAY_ALLOC1(ice_aux_id);
102b8db6afSMichal Swiatkowski
11d25a0fc4SDave Ertman /**
12348048e7SDave Ertman * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
13348048e7SDave Ertman * @pf: pointer to PF struct
14348048e7SDave Ertman *
15348048e7SDave Ertman * This function has to be called with a device_lock on the
16348048e7SDave Ertman * pf->adev.dev to avoid race conditions.
17348048e7SDave Ertman */
ice_get_auxiliary_drv(struct ice_pf * pf)18348048e7SDave Ertman static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
19348048e7SDave Ertman {
20348048e7SDave Ertman struct auxiliary_device *adev;
21348048e7SDave Ertman
22348048e7SDave Ertman adev = pf->adev;
23348048e7SDave Ertman if (!adev || !adev->dev.driver)
24348048e7SDave Ertman return NULL;
25348048e7SDave Ertman
26348048e7SDave Ertman return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
27348048e7SDave Ertman adrv.driver);
28348048e7SDave Ertman }
29348048e7SDave Ertman
30348048e7SDave Ertman /**
31348048e7SDave Ertman * ice_send_event_to_aux - send event to RDMA AUX driver
32348048e7SDave Ertman * @pf: pointer to PF struct
33348048e7SDave Ertman * @event: event struct
34348048e7SDave Ertman */
ice_send_event_to_aux(struct ice_pf * pf,struct iidc_event * event)35348048e7SDave Ertman void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
36348048e7SDave Ertman {
37348048e7SDave Ertman struct iidc_auxiliary_drv *iadrv;
38348048e7SDave Ertman
395a315693SAlexander Lobakin if (WARN_ON_ONCE(!in_task()))
405a315693SAlexander Lobakin return;
415a315693SAlexander Lobakin
42486b9eeeSIvan Vecera mutex_lock(&pf->adev_mutex);
43348048e7SDave Ertman if (!pf->adev)
44486b9eeeSIvan Vecera goto finish;
45348048e7SDave Ertman
46348048e7SDave Ertman device_lock(&pf->adev->dev);
47348048e7SDave Ertman iadrv = ice_get_auxiliary_drv(pf);
48348048e7SDave Ertman if (iadrv && iadrv->event_handler)
49348048e7SDave Ertman iadrv->event_handler(pf, event);
50348048e7SDave Ertman device_unlock(&pf->adev->dev);
51486b9eeeSIvan Vecera finish:
52486b9eeeSIvan Vecera mutex_unlock(&pf->adev_mutex);
53348048e7SDave Ertman }
54348048e7SDave Ertman
55348048e7SDave Ertman /**
56348048e7SDave Ertman * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
57348048e7SDave Ertman * @pf: PF struct
58348048e7SDave Ertman * @qset: Resource to be allocated
59348048e7SDave Ertman */
ice_add_rdma_qset(struct ice_pf * pf,struct iidc_rdma_qset_params * qset)60348048e7SDave Ertman int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
61348048e7SDave Ertman {
62348048e7SDave Ertman u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
63348048e7SDave Ertman struct ice_vsi *vsi;
64348048e7SDave Ertman struct device *dev;
65348048e7SDave Ertman u32 qset_teid;
66348048e7SDave Ertman u16 qs_handle;
67348048e7SDave Ertman int status;
68348048e7SDave Ertman int i;
69348048e7SDave Ertman
70348048e7SDave Ertman if (WARN_ON(!pf || !qset))
71348048e7SDave Ertman return -EINVAL;
72348048e7SDave Ertman
73348048e7SDave Ertman dev = ice_pf_to_dev(pf);
74348048e7SDave Ertman
7588f62aeaSDave Ertman if (!ice_is_rdma_ena(pf))
76348048e7SDave Ertman return -EINVAL;
77348048e7SDave Ertman
78348048e7SDave Ertman vsi = ice_get_main_vsi(pf);
79348048e7SDave Ertman if (!vsi) {
80348048e7SDave Ertman dev_err(dev, "RDMA QSet invalid VSI\n");
81348048e7SDave Ertman return -EINVAL;
82348048e7SDave Ertman }
83348048e7SDave Ertman
84348048e7SDave Ertman ice_for_each_traffic_class(i)
85348048e7SDave Ertman max_rdmaqs[i] = 0;
86348048e7SDave Ertman
87348048e7SDave Ertman max_rdmaqs[qset->tc]++;
88348048e7SDave Ertman qs_handle = qset->qs_handle;
89348048e7SDave Ertman
90348048e7SDave Ertman status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
91348048e7SDave Ertman max_rdmaqs);
92348048e7SDave Ertman if (status) {
93348048e7SDave Ertman dev_err(dev, "Failed VSI RDMA Qset config\n");
94348048e7SDave Ertman return status;
95348048e7SDave Ertman }
96348048e7SDave Ertman
97348048e7SDave Ertman status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
98348048e7SDave Ertman &qs_handle, 1, &qset_teid);
99348048e7SDave Ertman if (status) {
100348048e7SDave Ertman dev_err(dev, "Failed VSI RDMA Qset enable\n");
101348048e7SDave Ertman return status;
102348048e7SDave Ertman }
103348048e7SDave Ertman vsi->qset_handle[qset->tc] = qset->qs_handle;
104348048e7SDave Ertman qset->teid = qset_teid;
105348048e7SDave Ertman
106348048e7SDave Ertman return 0;
107348048e7SDave Ertman }
108348048e7SDave Ertman EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
109348048e7SDave Ertman
110348048e7SDave Ertman /**
111348048e7SDave Ertman * ice_del_rdma_qset - Delete leaf node for RDMA Qset
112348048e7SDave Ertman * @pf: PF struct
113348048e7SDave Ertman * @qset: Resource to be freed
114348048e7SDave Ertman */
ice_del_rdma_qset(struct ice_pf * pf,struct iidc_rdma_qset_params * qset)115348048e7SDave Ertman int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
116348048e7SDave Ertman {
117348048e7SDave Ertman struct ice_vsi *vsi;
118348048e7SDave Ertman u32 teid;
119348048e7SDave Ertman u16 q_id;
120348048e7SDave Ertman
121348048e7SDave Ertman if (WARN_ON(!pf || !qset))
122348048e7SDave Ertman return -EINVAL;
123348048e7SDave Ertman
124348048e7SDave Ertman vsi = ice_find_vsi(pf, qset->vport_id);
125348048e7SDave Ertman if (!vsi) {
126348048e7SDave Ertman dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
127348048e7SDave Ertman return -EINVAL;
128348048e7SDave Ertman }
129348048e7SDave Ertman
130348048e7SDave Ertman q_id = qset->qs_handle;
131348048e7SDave Ertman teid = qset->teid;
132348048e7SDave Ertman
133348048e7SDave Ertman vsi->qset_handle[qset->tc] = 0;
134348048e7SDave Ertman
135348048e7SDave Ertman return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
136348048e7SDave Ertman }
137348048e7SDave Ertman EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
138348048e7SDave Ertman
139348048e7SDave Ertman /**
140348048e7SDave Ertman * ice_rdma_request_reset - accept request from RDMA to perform a reset
141348048e7SDave Ertman * @pf: struct for PF
142348048e7SDave Ertman * @reset_type: type of reset
143348048e7SDave Ertman */
ice_rdma_request_reset(struct ice_pf * pf,enum iidc_reset_type reset_type)144348048e7SDave Ertman int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
145348048e7SDave Ertman {
146348048e7SDave Ertman enum ice_reset_req reset;
147348048e7SDave Ertman
148348048e7SDave Ertman if (WARN_ON(!pf))
149348048e7SDave Ertman return -EINVAL;
150348048e7SDave Ertman
151348048e7SDave Ertman switch (reset_type) {
152348048e7SDave Ertman case IIDC_PFR:
153348048e7SDave Ertman reset = ICE_RESET_PFR;
154348048e7SDave Ertman break;
155348048e7SDave Ertman case IIDC_CORER:
156348048e7SDave Ertman reset = ICE_RESET_CORER;
157348048e7SDave Ertman break;
158348048e7SDave Ertman case IIDC_GLOBR:
159348048e7SDave Ertman reset = ICE_RESET_GLOBR;
160348048e7SDave Ertman break;
161348048e7SDave Ertman default:
162348048e7SDave Ertman dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
163348048e7SDave Ertman return -EINVAL;
164348048e7SDave Ertman }
165348048e7SDave Ertman
166348048e7SDave Ertman return ice_schedule_reset(pf, reset);
167348048e7SDave Ertman }
168348048e7SDave Ertman EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
169348048e7SDave Ertman
170348048e7SDave Ertman /**
171348048e7SDave Ertman * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
172348048e7SDave Ertman * @pf: pointer to struct for PF
173348048e7SDave Ertman * @vsi_id: VSI HW idx to update filter on
174348048e7SDave Ertman * @enable: bool whether to enable or disable filters
175348048e7SDave Ertman */
ice_rdma_update_vsi_filter(struct ice_pf * pf,u16 vsi_id,bool enable)176348048e7SDave Ertman int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
177348048e7SDave Ertman {
178348048e7SDave Ertman struct ice_vsi *vsi;
179348048e7SDave Ertman int status;
180348048e7SDave Ertman
181348048e7SDave Ertman if (WARN_ON(!pf))
182348048e7SDave Ertman return -EINVAL;
183348048e7SDave Ertman
184348048e7SDave Ertman vsi = ice_find_vsi(pf, vsi_id);
185348048e7SDave Ertman if (!vsi)
186348048e7SDave Ertman return -EINVAL;
187348048e7SDave Ertman
188348048e7SDave Ertman status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
189348048e7SDave Ertman if (status) {
190348048e7SDave Ertman dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n",
191348048e7SDave Ertman enable ? "en" : "dis");
192348048e7SDave Ertman } else {
193348048e7SDave Ertman if (enable)
194348048e7SDave Ertman vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
195348048e7SDave Ertman else
196348048e7SDave Ertman vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
197348048e7SDave Ertman }
198348048e7SDave Ertman
199348048e7SDave Ertman return status;
200348048e7SDave Ertman }
201348048e7SDave Ertman EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
202348048e7SDave Ertman
203348048e7SDave Ertman /**
204348048e7SDave Ertman * ice_get_qos_params - parse QoS params for RDMA consumption
205348048e7SDave Ertman * @pf: pointer to PF struct
206348048e7SDave Ertman * @qos: set of QoS values
207348048e7SDave Ertman */
ice_get_qos_params(struct ice_pf * pf,struct iidc_qos_params * qos)208348048e7SDave Ertman void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
209348048e7SDave Ertman {
210348048e7SDave Ertman struct ice_dcbx_cfg *dcbx_cfg;
211348048e7SDave Ertman unsigned int i;
212348048e7SDave Ertman u32 up2tc;
213348048e7SDave Ertman
214348048e7SDave Ertman dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
215348048e7SDave Ertman up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
216348048e7SDave Ertman
217348048e7SDave Ertman qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
218348048e7SDave Ertman for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
219348048e7SDave Ertman qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
220348048e7SDave Ertman
221348048e7SDave Ertman for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
222348048e7SDave Ertman qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
223b794eecbSDave Ertman
224b794eecbSDave Ertman qos->pfc_mode = dcbx_cfg->pfc_mode;
225b794eecbSDave Ertman if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
226b794eecbSDave Ertman for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
227b794eecbSDave Ertman qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
228348048e7SDave Ertman }
229348048e7SDave Ertman EXPORT_SYMBOL_GPL(ice_get_qos_params);
230348048e7SDave Ertman
231348048e7SDave Ertman /**
2324aad5335SPiotr Raczynski * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
233d25a0fc4SDave Ertman * @pf: board private structure to initialize
234d25a0fc4SDave Ertman */
ice_alloc_rdma_qvectors(struct ice_pf * pf)2354aad5335SPiotr Raczynski static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
236d25a0fc4SDave Ertman {
23788f62aeaSDave Ertman if (ice_is_rdma_ena(pf)) {
2384aad5335SPiotr Raczynski int i;
23905018936SPiotr Raczynski
24005018936SPiotr Raczynski pf->msix_entries = kcalloc(pf->num_rdma_msix,
24105018936SPiotr Raczynski sizeof(*pf->msix_entries),
24205018936SPiotr Raczynski GFP_KERNEL);
2434aad5335SPiotr Raczynski if (!pf->msix_entries)
24405018936SPiotr Raczynski return -ENOMEM;
24505018936SPiotr Raczynski
24605018936SPiotr Raczynski /* RDMA is the only user of pf->msix_entries array */
24705018936SPiotr Raczynski pf->rdma_base_vector = 0;
24805018936SPiotr Raczynski
2494aad5335SPiotr Raczynski for (i = 0; i < pf->num_rdma_msix; i++) {
25005018936SPiotr Raczynski struct msix_entry *entry = &pf->msix_entries[i];
2514aad5335SPiotr Raczynski struct msi_map map;
25205018936SPiotr Raczynski
253*011670ccSPiotr Raczynski map = ice_alloc_irq(pf, false);
2544aad5335SPiotr Raczynski if (map.index < 0)
2554aad5335SPiotr Raczynski break;
2564aad5335SPiotr Raczynski
2574aad5335SPiotr Raczynski entry->entry = map.index;
2584aad5335SPiotr Raczynski entry->vector = map.virq;
25905018936SPiotr Raczynski }
260d25a0fc4SDave Ertman }
261d25a0fc4SDave Ertman return 0;
262d25a0fc4SDave Ertman }
263d25a0fc4SDave Ertman
264d25a0fc4SDave Ertman /**
2652b8db6afSMichal Swiatkowski * ice_free_rdma_qvector - free vector resources reserved for RDMA driver
2662b8db6afSMichal Swiatkowski * @pf: board private structure to initialize
2672b8db6afSMichal Swiatkowski */
ice_free_rdma_qvector(struct ice_pf * pf)2682b8db6afSMichal Swiatkowski static void ice_free_rdma_qvector(struct ice_pf *pf)
2692b8db6afSMichal Swiatkowski {
2704aad5335SPiotr Raczynski int i;
2714aad5335SPiotr Raczynski
27205018936SPiotr Raczynski if (!pf->msix_entries)
27305018936SPiotr Raczynski return;
27405018936SPiotr Raczynski
2754aad5335SPiotr Raczynski for (i = 0; i < pf->num_rdma_msix; i++) {
2764aad5335SPiotr Raczynski struct msi_map map;
2774aad5335SPiotr Raczynski
2784aad5335SPiotr Raczynski map.index = pf->msix_entries[i].entry;
2794aad5335SPiotr Raczynski map.virq = pf->msix_entries[i].vector;
2804aad5335SPiotr Raczynski ice_free_irq(pf, map);
2814aad5335SPiotr Raczynski }
2824aad5335SPiotr Raczynski
28305018936SPiotr Raczynski kfree(pf->msix_entries);
28405018936SPiotr Raczynski pf->msix_entries = NULL;
2852b8db6afSMichal Swiatkowski }
2862b8db6afSMichal Swiatkowski
2872b8db6afSMichal Swiatkowski /**
288f9f5301eSDave Ertman * ice_adev_release - function to be mapped to AUX dev's release op
289f9f5301eSDave Ertman * @dev: pointer to device to free
290f9f5301eSDave Ertman */
ice_adev_release(struct device * dev)291f9f5301eSDave Ertman static void ice_adev_release(struct device *dev)
292f9f5301eSDave Ertman {
293f9f5301eSDave Ertman struct iidc_auxiliary_dev *iadev;
294f9f5301eSDave Ertman
295f9f5301eSDave Ertman iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
296f9f5301eSDave Ertman kfree(iadev);
297f9f5301eSDave Ertman }
298f9f5301eSDave Ertman
299f9f5301eSDave Ertman /**
300f9f5301eSDave Ertman * ice_plug_aux_dev - allocate and register AUX device
301f9f5301eSDave Ertman * @pf: pointer to pf struct
302f9f5301eSDave Ertman */
ice_plug_aux_dev(struct ice_pf * pf)303f9f5301eSDave Ertman int ice_plug_aux_dev(struct ice_pf *pf)
304f9f5301eSDave Ertman {
305f9f5301eSDave Ertman struct iidc_auxiliary_dev *iadev;
306f9f5301eSDave Ertman struct auxiliary_device *adev;
307f9f5301eSDave Ertman int ret;
308f9f5301eSDave Ertman
309bfe84435SDave Ertman /* if this PF doesn't support a technology that requires auxiliary
310bfe84435SDave Ertman * devices, then gracefully exit
311bfe84435SDave Ertman */
31288f62aeaSDave Ertman if (!ice_is_rdma_ena(pf))
313bfe84435SDave Ertman return 0;
314bfe84435SDave Ertman
315f9f5301eSDave Ertman iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
316f9f5301eSDave Ertman if (!iadev)
317f9f5301eSDave Ertman return -ENOMEM;
318f9f5301eSDave Ertman
319f9f5301eSDave Ertman adev = &iadev->adev;
320f9f5301eSDave Ertman iadev->pf = pf;
321f9f5301eSDave Ertman
322f9f5301eSDave Ertman adev->id = pf->aux_idx;
323f9f5301eSDave Ertman adev->dev.release = ice_adev_release;
324f9f5301eSDave Ertman adev->dev.parent = &pf->pdev->dev;
325e523af4eSShiraz Saleem adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
326f9f5301eSDave Ertman
327f9f5301eSDave Ertman ret = auxiliary_device_init(adev);
328f9f5301eSDave Ertman if (ret) {
329f9f5301eSDave Ertman kfree(iadev);
330f9f5301eSDave Ertman return ret;
331f9f5301eSDave Ertman }
332f9f5301eSDave Ertman
333f9f5301eSDave Ertman ret = auxiliary_device_add(adev);
334f9f5301eSDave Ertman if (ret) {
335f9f5301eSDave Ertman auxiliary_device_uninit(adev);
336f9f5301eSDave Ertman return ret;
337f9f5301eSDave Ertman }
338f9f5301eSDave Ertman
339486b9eeeSIvan Vecera mutex_lock(&pf->adev_mutex);
340486b9eeeSIvan Vecera pf->adev = adev;
341486b9eeeSIvan Vecera mutex_unlock(&pf->adev_mutex);
342486b9eeeSIvan Vecera
343f9f5301eSDave Ertman return 0;
344f9f5301eSDave Ertman }
345f9f5301eSDave Ertman
346f9f5301eSDave Ertman /* ice_unplug_aux_dev - unregister and free AUX device
347f9f5301eSDave Ertman * @pf: pointer to pf struct
348f9f5301eSDave Ertman */
ice_unplug_aux_dev(struct ice_pf * pf)349f9f5301eSDave Ertman void ice_unplug_aux_dev(struct ice_pf *pf)
350f9f5301eSDave Ertman {
351486b9eeeSIvan Vecera struct auxiliary_device *adev;
352f9f5301eSDave Ertman
353486b9eeeSIvan Vecera mutex_lock(&pf->adev_mutex);
354486b9eeeSIvan Vecera adev = pf->adev;
355f9f5301eSDave Ertman pf->adev = NULL;
356486b9eeeSIvan Vecera mutex_unlock(&pf->adev_mutex);
357486b9eeeSIvan Vecera
358486b9eeeSIvan Vecera if (adev) {
359486b9eeeSIvan Vecera auxiliary_device_delete(adev);
360486b9eeeSIvan Vecera auxiliary_device_uninit(adev);
361486b9eeeSIvan Vecera }
362f9f5301eSDave Ertman }
363f9f5301eSDave Ertman
364f9f5301eSDave Ertman /**
365d25a0fc4SDave Ertman * ice_init_rdma - initializes PF for RDMA use
366d25a0fc4SDave Ertman * @pf: ptr to ice_pf
367d25a0fc4SDave Ertman */
ice_init_rdma(struct ice_pf * pf)368d25a0fc4SDave Ertman int ice_init_rdma(struct ice_pf *pf)
369d25a0fc4SDave Ertman {
370d25a0fc4SDave Ertman struct device *dev = &pf->pdev->dev;
371d25a0fc4SDave Ertman int ret;
372d25a0fc4SDave Ertman
3732b8db6afSMichal Swiatkowski if (!ice_is_rdma_ena(pf)) {
3742b8db6afSMichal Swiatkowski dev_warn(dev, "RDMA is not supported on this device\n");
3752b8db6afSMichal Swiatkowski return 0;
3762b8db6afSMichal Swiatkowski }
3772b8db6afSMichal Swiatkowski
3782be29286SMichal Swiatkowski ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
3792be29286SMichal Swiatkowski GFP_KERNEL);
3802be29286SMichal Swiatkowski if (ret) {
3812b8db6afSMichal Swiatkowski dev_err(dev, "Failed to allocate device ID for AUX driver\n");
3822b8db6afSMichal Swiatkowski return -ENOMEM;
3832b8db6afSMichal Swiatkowski }
3842b8db6afSMichal Swiatkowski
385d25a0fc4SDave Ertman /* Reserve vector resources */
3864aad5335SPiotr Raczynski ret = ice_alloc_rdma_qvectors(pf);
387f9f5301eSDave Ertman if (ret < 0) {
388d25a0fc4SDave Ertman dev_err(dev, "failed to reserve vectors for RDMA\n");
3892b8db6afSMichal Swiatkowski goto err_reserve_rdma_qvector;
390d25a0fc4SDave Ertman }
391e523af4eSShiraz Saleem pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
3922b8db6afSMichal Swiatkowski ret = ice_plug_aux_dev(pf);
3932b8db6afSMichal Swiatkowski if (ret)
3942b8db6afSMichal Swiatkowski goto err_plug_aux_dev;
3952b8db6afSMichal Swiatkowski return 0;
3962b8db6afSMichal Swiatkowski
3972b8db6afSMichal Swiatkowski err_plug_aux_dev:
3982b8db6afSMichal Swiatkowski ice_free_rdma_qvector(pf);
3992b8db6afSMichal Swiatkowski err_reserve_rdma_qvector:
4002b8db6afSMichal Swiatkowski pf->adev = NULL;
4012be29286SMichal Swiatkowski xa_erase(&ice_aux_id, pf->aux_idx);
4022b8db6afSMichal Swiatkowski return ret;
4032b8db6afSMichal Swiatkowski }
4042b8db6afSMichal Swiatkowski
4052b8db6afSMichal Swiatkowski /**
4062b8db6afSMichal Swiatkowski * ice_deinit_rdma - deinitialize RDMA on PF
4072b8db6afSMichal Swiatkowski * @pf: ptr to ice_pf
4082b8db6afSMichal Swiatkowski */
ice_deinit_rdma(struct ice_pf * pf)4092b8db6afSMichal Swiatkowski void ice_deinit_rdma(struct ice_pf *pf)
4102b8db6afSMichal Swiatkowski {
4112b8db6afSMichal Swiatkowski if (!ice_is_rdma_ena(pf))
4122b8db6afSMichal Swiatkowski return;
4132b8db6afSMichal Swiatkowski
4142b8db6afSMichal Swiatkowski ice_unplug_aux_dev(pf);
4152b8db6afSMichal Swiatkowski ice_free_rdma_qvector(pf);
4162be29286SMichal Swiatkowski xa_erase(&ice_aux_id, pf->aux_idx);
417f9f5301eSDave Ertman }
418