Lines Matching refs:pf

18 static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)  in ice_get_auxiliary_drv()  argument
22 adev = pf->adev; in ice_get_auxiliary_drv()
35 void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) in ice_send_event_to_aux() argument
42 mutex_lock(&pf->adev_mutex); in ice_send_event_to_aux()
43 if (!pf->adev) in ice_send_event_to_aux()
46 device_lock(&pf->adev->dev); in ice_send_event_to_aux()
47 iadrv = ice_get_auxiliary_drv(pf); in ice_send_event_to_aux()
49 iadrv->event_handler(pf, event); in ice_send_event_to_aux()
50 device_unlock(&pf->adev->dev); in ice_send_event_to_aux()
52 mutex_unlock(&pf->adev_mutex); in ice_send_event_to_aux()
60 int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) in ice_add_rdma_qset() argument
70 if (WARN_ON(!pf || !qset)) in ice_add_rdma_qset()
73 dev = ice_pf_to_dev(pf); in ice_add_rdma_qset()
75 if (!ice_is_rdma_ena(pf)) in ice_add_rdma_qset()
78 vsi = ice_get_main_vsi(pf); in ice_add_rdma_qset()
115 int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) in ice_del_rdma_qset() argument
121 if (WARN_ON(!pf || !qset)) in ice_del_rdma_qset()
124 vsi = ice_find_vsi(pf, qset->vport_id); in ice_del_rdma_qset()
126 dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n"); in ice_del_rdma_qset()
144 int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type) in ice_rdma_request_reset() argument
148 if (WARN_ON(!pf)) in ice_rdma_request_reset()
162 dev_err(ice_pf_to_dev(pf), "incorrect reset request\n"); in ice_rdma_request_reset()
166 return ice_schedule_reset(pf, reset); in ice_rdma_request_reset()
176 int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable) in ice_rdma_update_vsi_filter() argument
181 if (WARN_ON(!pf)) in ice_rdma_update_vsi_filter()
184 vsi = ice_find_vsi(pf, vsi_id); in ice_rdma_update_vsi_filter()
188 status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable); in ice_rdma_update_vsi_filter()
190 dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n", in ice_rdma_update_vsi_filter()
208 void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) in ice_get_qos_params() argument
214 dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; in ice_get_qos_params()
215 up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); in ice_get_qos_params()
235 static int ice_alloc_rdma_qvectors(struct ice_pf *pf) in ice_alloc_rdma_qvectors() argument
237 if (ice_is_rdma_ena(pf)) { in ice_alloc_rdma_qvectors()
240 pf->msix_entries = kcalloc(pf->num_rdma_msix, in ice_alloc_rdma_qvectors()
241 sizeof(*pf->msix_entries), in ice_alloc_rdma_qvectors()
243 if (!pf->msix_entries) in ice_alloc_rdma_qvectors()
247 pf->rdma_base_vector = 0; in ice_alloc_rdma_qvectors()
249 for (i = 0; i < pf->num_rdma_msix; i++) { in ice_alloc_rdma_qvectors()
250 struct msix_entry *entry = &pf->msix_entries[i]; in ice_alloc_rdma_qvectors()
253 map = ice_alloc_irq(pf, false); in ice_alloc_rdma_qvectors()
268 static void ice_free_rdma_qvector(struct ice_pf *pf) in ice_free_rdma_qvector() argument
272 if (!pf->msix_entries) in ice_free_rdma_qvector()
275 for (i = 0; i < pf->num_rdma_msix; i++) { in ice_free_rdma_qvector()
278 map.index = pf->msix_entries[i].entry; in ice_free_rdma_qvector()
279 map.virq = pf->msix_entries[i].vector; in ice_free_rdma_qvector()
280 ice_free_irq(pf, map); in ice_free_rdma_qvector()
283 kfree(pf->msix_entries); in ice_free_rdma_qvector()
284 pf->msix_entries = NULL; in ice_free_rdma_qvector()
303 int ice_plug_aux_dev(struct ice_pf *pf) in ice_plug_aux_dev() argument
312 if (!ice_is_rdma_ena(pf)) in ice_plug_aux_dev()
320 iadev->pf = pf; in ice_plug_aux_dev()
322 adev->id = pf->aux_idx; in ice_plug_aux_dev()
324 adev->dev.parent = &pf->pdev->dev; in ice_plug_aux_dev()
325 adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp"; in ice_plug_aux_dev()
339 mutex_lock(&pf->adev_mutex); in ice_plug_aux_dev()
340 pf->adev = adev; in ice_plug_aux_dev()
341 mutex_unlock(&pf->adev_mutex); in ice_plug_aux_dev()
349 void ice_unplug_aux_dev(struct ice_pf *pf) in ice_unplug_aux_dev() argument
353 mutex_lock(&pf->adev_mutex); in ice_unplug_aux_dev()
354 adev = pf->adev; in ice_unplug_aux_dev()
355 pf->adev = NULL; in ice_unplug_aux_dev()
356 mutex_unlock(&pf->adev_mutex); in ice_unplug_aux_dev()
368 int ice_init_rdma(struct ice_pf *pf) in ice_init_rdma() argument
370 struct device *dev = &pf->pdev->dev; in ice_init_rdma()
373 if (!ice_is_rdma_ena(pf)) { in ice_init_rdma()
378 ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX), in ice_init_rdma()
386 ret = ice_alloc_rdma_qvectors(pf); in ice_init_rdma()
391 pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; in ice_init_rdma()
392 ret = ice_plug_aux_dev(pf); in ice_init_rdma()
398 ice_free_rdma_qvector(pf); in ice_init_rdma()
400 pf->adev = NULL; in ice_init_rdma()
401 xa_erase(&ice_aux_id, pf->aux_idx); in ice_init_rdma()
409 void ice_deinit_rdma(struct ice_pf *pf) in ice_deinit_rdma() argument
411 if (!ice_is_rdma_ena(pf)) in ice_deinit_rdma()
414 ice_unplug_aux_dev(pf); in ice_deinit_rdma()
415 ice_free_rdma_qvector(pf); in ice_deinit_rdma()
416 xa_erase(&ice_aux_id, pf->aux_idx); in ice_deinit_rdma()