Lines Matching refs:mvdev

36 _mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev);
38 int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod) in mlx5vf_cmd_suspend_vhca() argument
40 struct mlx5_vf_migration_file *migf = mvdev->saving_migf; in mlx5vf_cmd_suspend_vhca()
45 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_cmd_suspend_vhca()
46 if (mvdev->mdev_detach) in mlx5vf_cmd_suspend_vhca()
62 MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id); in mlx5vf_cmd_suspend_vhca()
65 err = mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out); in mlx5vf_cmd_suspend_vhca()
72 int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod) in mlx5vf_cmd_resume_vhca() argument
77 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_cmd_resume_vhca()
78 if (mvdev->mdev_detach) in mlx5vf_cmd_resume_vhca()
82 MLX5_SET(resume_vhca_in, in, vhca_id, mvdev->vhca_id); in mlx5vf_cmd_resume_vhca()
85 return mlx5_cmd_exec_inout(mvdev->mdev, resume_vhca, in, out); in mlx5vf_cmd_resume_vhca()
88 int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_cmd_query_vhca_migration_state() argument
96 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_cmd_query_vhca_migration_state()
97 if (mvdev->mdev_detach) in mlx5vf_cmd_query_vhca_migration_state()
107 ret = wait_for_completion_interruptible(&mvdev->saving_migf->save_comp); in mlx5vf_cmd_query_vhca_migration_state()
110 if (mvdev->saving_migf->state == in mlx5vf_cmd_query_vhca_migration_state()
118 complete(&mvdev->saving_migf->save_comp); in mlx5vf_cmd_query_vhca_migration_state()
127 MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id); in mlx5vf_cmd_query_vhca_migration_state()
132 ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in, in mlx5vf_cmd_query_vhca_migration_state()
135 complete(&mvdev->saving_migf->save_comp); in mlx5vf_cmd_query_vhca_migration_state()
145 static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev) in set_tracker_error() argument
148 mvdev->tracker.is_err = true; in set_tracker_error()
149 complete(&mvdev->tracker_comp); in set_tracker_error()
155 struct mlx5vf_pci_core_device *mvdev = in mlx5fv_vf_event() local
160 mutex_lock(&mvdev->state_mutex); in mlx5fv_vf_event()
161 mvdev->mdev_detach = false; in mlx5fv_vf_event()
162 mlx5vf_state_mutex_unlock(mvdev); in mlx5fv_vf_event()
165 mlx5vf_cmd_close_migratable(mvdev); in mlx5fv_vf_event()
166 mutex_lock(&mvdev->state_mutex); in mlx5fv_vf_event()
167 mvdev->mdev_detach = true; in mlx5fv_vf_event()
168 mlx5vf_state_mutex_unlock(mvdev); in mlx5fv_vf_event()
177 void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev) in mlx5vf_cmd_close_migratable() argument
179 if (!mvdev->migrate_cap) in mlx5vf_cmd_close_migratable()
183 set_tracker_error(mvdev); in mlx5vf_cmd_close_migratable()
184 mutex_lock(&mvdev->state_mutex); in mlx5vf_cmd_close_migratable()
185 mlx5vf_disable_fds(mvdev); in mlx5vf_cmd_close_migratable()
186 _mlx5vf_free_page_tracker_resources(mvdev); in mlx5vf_cmd_close_migratable()
187 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_cmd_close_migratable()
190 void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev) in mlx5vf_cmd_remove_migratable() argument
192 if (!mvdev->migrate_cap) in mlx5vf_cmd_remove_migratable()
195 mlx5_sriov_blocking_notifier_unregister(mvdev->mdev, mvdev->vf_id, in mlx5vf_cmd_remove_migratable()
196 &mvdev->nb); in mlx5vf_cmd_remove_migratable()
197 destroy_workqueue(mvdev->cb_wq); in mlx5vf_cmd_remove_migratable()
200 void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_cmd_set_migratable() argument
204 struct pci_dev *pdev = mvdev->core_device.pdev; in mlx5vf_cmd_set_migratable()
210 mvdev->mdev = mlx5_vf_get_core_dev(pdev); in mlx5vf_cmd_set_migratable()
211 if (!mvdev->mdev) in mlx5vf_cmd_set_migratable()
214 if (!MLX5_CAP_GEN(mvdev->mdev, migration)) in mlx5vf_cmd_set_migratable()
217 mvdev->vf_id = pci_iov_vf_id(pdev); in mlx5vf_cmd_set_migratable()
218 if (mvdev->vf_id < 0) in mlx5vf_cmd_set_migratable()
221 ret = mlx5vf_is_migratable(mvdev->mdev, mvdev->vf_id + 1); in mlx5vf_cmd_set_migratable()
225 if (mlx5vf_cmd_get_vhca_id(mvdev->mdev, mvdev->vf_id + 1, in mlx5vf_cmd_set_migratable()
226 &mvdev->vhca_id)) in mlx5vf_cmd_set_migratable()
229 mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0); in mlx5vf_cmd_set_migratable()
230 if (!mvdev->cb_wq) in mlx5vf_cmd_set_migratable()
233 mutex_init(&mvdev->state_mutex); in mlx5vf_cmd_set_migratable()
234 spin_lock_init(&mvdev->reset_lock); in mlx5vf_cmd_set_migratable()
235 mvdev->nb.notifier_call = mlx5fv_vf_event; in mlx5vf_cmd_set_migratable()
236 ret = mlx5_sriov_blocking_notifier_register(mvdev->mdev, mvdev->vf_id, in mlx5vf_cmd_set_migratable()
237 &mvdev->nb); in mlx5vf_cmd_set_migratable()
239 destroy_workqueue(mvdev->cb_wq); in mlx5vf_cmd_set_migratable()
243 mvdev->migrate_cap = 1; in mlx5vf_cmd_set_migratable()
244 mvdev->core_device.vdev.migration_flags = in mlx5vf_cmd_set_migratable()
247 mvdev->core_device.vdev.mig_ops = mig_ops; in mlx5vf_cmd_set_migratable()
248 init_completion(&mvdev->tracker_comp); in mlx5vf_cmd_set_migratable()
249 if (MLX5_CAP_GEN(mvdev->mdev, adv_virtualization)) in mlx5vf_cmd_set_migratable()
250 mvdev->core_device.vdev.log_ops = log_ops; in mlx5vf_cmd_set_migratable()
252 if (MLX5_CAP_GEN_2(mvdev->mdev, migration_multi_load) && in mlx5vf_cmd_set_migratable()
253 MLX5_CAP_GEN_2(mvdev->mdev, migration_tracking_state)) in mlx5vf_cmd_set_migratable()
254 mvdev->core_device.vdev.migration_flags |= in mlx5vf_cmd_set_migratable()
258 mlx5_vf_put_core_dev(mvdev->mdev); in mlx5vf_cmd_set_migratable()
347 struct mlx5vf_pci_core_device *mvdev = buf->migf->mvdev; in mlx5vf_dma_data_buffer() local
348 struct mlx5_core_dev *mdev = mvdev->mdev; in mlx5vf_dma_data_buffer()
351 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_dma_data_buffer()
352 if (mvdev->mdev_detach) in mlx5vf_dma_data_buffer()
379 lockdep_assert_held(&migf->mvdev->state_mutex); in mlx5vf_free_data_buffer()
380 WARN_ON(migf->mvdev->mdev_detach); in mlx5vf_free_data_buffer()
383 mlx5_core_destroy_mkey(migf->mvdev->mdev, buf->mkey); in mlx5vf_free_data_buffer()
384 dma_unmap_sgtable(migf->mvdev->mdev->device, &buf->table.sgt, in mlx5vf_free_data_buffer()
442 lockdep_assert_held(&migf->mvdev->state_mutex); in mlx5vf_get_data_buffer()
443 if (migf->mvdev->mdev_detach) in mlx5vf_get_data_buffer()
573 queue_work(migf->mvdev->cb_wq, &async_data->work); in mlx5vf_save_callback()
576 int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_cmd_save_vhca_state() argument
587 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_cmd_save_vhca_state()
588 if (mvdev->mdev_detach) in mlx5vf_cmd_save_vhca_state()
605 MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id); in mlx5vf_cmd_save_vhca_state()
620 if (MLX5VF_PRE_COPY_SUPP(mvdev)) { in mlx5vf_cmd_save_vhca_state()
659 int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev, in mlx5vf_cmd_load_vhca_state() argument
667 lockdep_assert_held(&mvdev->state_mutex); in mlx5vf_cmd_load_vhca_state()
668 if (mvdev->mdev_detach) in mlx5vf_cmd_load_vhca_state()
680 MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id); in mlx5vf_cmd_load_vhca_state()
683 return mlx5_cmd_exec_inout(mvdev->mdev, load_vhca_state, in, out); in mlx5vf_cmd_load_vhca_state()
690 lockdep_assert_held(&migf->mvdev->state_mutex); in mlx5vf_cmd_alloc_pd()
691 if (migf->mvdev->mdev_detach) in mlx5vf_cmd_alloc_pd()
694 err = mlx5_core_alloc_pd(migf->mvdev->mdev, &migf->pdn); in mlx5vf_cmd_alloc_pd()
700 lockdep_assert_held(&migf->mvdev->state_mutex); in mlx5vf_cmd_dealloc_pd()
701 if (migf->mvdev->mdev_detach) in mlx5vf_cmd_dealloc_pd()
704 mlx5_core_dealloc_pd(migf->mvdev->mdev, migf->pdn); in mlx5vf_cmd_dealloc_pd()
711 lockdep_assert_held(&migf->mvdev->state_mutex); in mlx5fv_cmd_clean_migf_resources()
712 WARN_ON(migf->mvdev->mdev_detach); in mlx5fv_cmd_clean_migf_resources()
736 struct mlx5vf_pci_core_device *mvdev, in mlx5vf_create_tracker() argument
741 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; in mlx5vf_create_tracker()
774 MLX5_SET(page_track, obj_context, vhca_id, mvdev->vhca_id); in mlx5vf_create_tracker()
909 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_event_notifier() local
927 set_tracker_error(mvdev); in mlx5vf_event_notifier()
939 struct mlx5vf_pci_core_device *mvdev = in mlx5vf_cq_complete() local
943 complete(&mvdev->tracker_comp); in mlx5vf_cq_complete()
1321 _mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev) in _mlx5vf_free_page_tracker_resources() argument
1323 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; in _mlx5vf_free_page_tracker_resources()
1324 struct mlx5_core_dev *mdev = mvdev->mdev; in _mlx5vf_free_page_tracker_resources()
1326 lockdep_assert_held(&mvdev->state_mutex); in _mlx5vf_free_page_tracker_resources()
1328 if (!mvdev->log_active) in _mlx5vf_free_page_tracker_resources()
1331 WARN_ON(mvdev->mdev_detach); in _mlx5vf_free_page_tracker_resources()
1341 mvdev->log_active = false; in _mlx5vf_free_page_tracker_resources()
1346 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_stop_page_tracker() local
1349 mutex_lock(&mvdev->state_mutex); in mlx5vf_stop_page_tracker()
1350 if (!mvdev->log_active) in mlx5vf_stop_page_tracker()
1353 _mlx5vf_free_page_tracker_resources(mvdev); in mlx5vf_stop_page_tracker()
1354 mvdev->log_active = false; in mlx5vf_stop_page_tracker()
1356 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_stop_page_tracker()
1364 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_start_page_tracker() local
1366 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; in mlx5vf_start_page_tracker()
1376 mutex_lock(&mvdev->state_mutex); in mlx5vf_start_page_tracker()
1377 if (mvdev->mdev_detach) { in mlx5vf_start_page_tracker()
1382 if (mvdev->log_active) { in mlx5vf_start_page_tracker()
1387 mdev = mvdev->mdev; in mlx5vf_start_page_tracker()
1443 err = mlx5vf_create_tracker(mdev, mvdev, ranges, nnodes); in mlx5vf_start_page_tracker()
1450 mvdev->log_active = true; in mlx5vf_start_page_tracker()
1451 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_start_page_tracker()
1467 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_start_page_tracker()
1570 struct mlx5vf_pci_core_device *mvdev = container_of( in mlx5vf_tracker_read_and_clear() local
1572 struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; in mlx5vf_tracker_read_and_clear()
1577 mutex_lock(&mvdev->state_mutex); in mlx5vf_tracker_read_and_clear()
1578 if (!mvdev->log_active) { in mlx5vf_tracker_read_and_clear()
1583 if (mvdev->mdev_detach) { in mlx5vf_tracker_read_and_clear()
1588 mdev = mvdev->mdev; in mlx5vf_tracker_read_and_clear()
1605 wait_for_completion(&mvdev->tracker_comp); in mlx5vf_tracker_read_and_clear()
1622 mlx5vf_state_mutex_unlock(mvdev); in mlx5vf_tracker_read_and_clear()