1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 4 */ 5 6 #ifndef MLX5_VFIO_CMD_H 7 #define MLX5_VFIO_CMD_H 8 9 #include <linux/kernel.h> 10 #include <linux/vfio_pci_core.h> 11 #include <linux/mlx5/driver.h> 12 #include <linux/mlx5/cq.h> 13 #include <linux/mlx5/qp.h> 14 15 struct mlx5vf_async_data { 16 struct mlx5_async_work cb_work; 17 struct work_struct work; 18 int status; 19 u32 pdn; 20 u32 mkey; 21 void *out; 22 }; 23 24 struct mlx5_vf_migration_file { 25 struct file *filp; 26 struct mutex lock; 27 u8 disabled:1; 28 u8 is_err:1; 29 30 struct sg_append_table table; 31 size_t total_length; 32 size_t allocated_length; 33 34 /* Optimize mlx5vf_get_migration_page() for sequential access */ 35 struct scatterlist *last_offset_sg; 36 unsigned int sg_last_entry; 37 unsigned long last_offset; 38 struct mlx5vf_pci_core_device *mvdev; 39 wait_queue_head_t poll_wait; 40 struct mlx5_async_ctx async_ctx; 41 struct mlx5vf_async_data async_data; 42 }; 43 44 struct mlx5_vhca_cq_buf { 45 struct mlx5_frag_buf_ctrl fbc; 46 struct mlx5_frag_buf frag_buf; 47 int cqe_size; 48 int nent; 49 }; 50 51 struct mlx5_vhca_cq { 52 struct mlx5_vhca_cq_buf buf; 53 struct mlx5_db db; 54 struct mlx5_core_cq mcq; 55 size_t ncqe; 56 }; 57 58 struct mlx5_vhca_recv_buf { 59 u32 npages; 60 struct page **page_list; 61 dma_addr_t *dma_addrs; 62 u32 next_rq_offset; 63 u32 mkey; 64 }; 65 66 struct mlx5_vhca_qp { 67 struct mlx5_frag_buf buf; 68 struct mlx5_db db; 69 struct mlx5_vhca_recv_buf recv_buf; 70 u32 tracked_page_size; 71 u32 max_msg_size; 72 u32 qpn; 73 struct { 74 unsigned int pc; 75 unsigned int cc; 76 unsigned int wqe_cnt; 77 __be32 *db; 78 struct mlx5_frag_buf_ctrl fbc; 79 } rq; 80 }; 81 82 struct mlx5_vhca_page_tracker { 83 u32 id; 84 u32 pdn; 85 u8 is_err:1; 86 struct mlx5_uars_page *uar; 87 struct mlx5_vhca_cq cq; 88 struct mlx5_vhca_qp *host_qp; 89 struct mlx5_vhca_qp *fw_qp; 90 struct mlx5_nb nb; 91 int status; 92 }; 93 94 struct mlx5vf_pci_core_device { 95 struct vfio_pci_core_device core_device; 96 int vf_id; 97 u16 vhca_id; 98 u8 migrate_cap:1; 99 u8 deferred_reset:1; 100 u8 mdev_detach:1; 101 u8 log_active:1; 102 struct completion tracker_comp; 103 /* protect migration state */ 104 struct mutex state_mutex; 105 enum vfio_device_mig_state mig_state; 106 /* protect the reset_done flow */ 107 spinlock_t reset_lock; 108 struct mlx5_vf_migration_file *resuming_migf; 109 struct mlx5_vf_migration_file *saving_migf; 110 struct mlx5_vhca_page_tracker tracker; 111 struct workqueue_struct *cb_wq; 112 struct notifier_block nb; 113 struct mlx5_core_dev *mdev; 114 }; 115 116 int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod); 117 int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod); 118 int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev, 119 size_t *state_size); 120 void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev, 121 const struct vfio_migration_ops *mig_ops, 122 const struct vfio_log_ops *log_ops); 123 void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev); 124 void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev); 125 int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev, 126 struct mlx5_vf_migration_file *migf); 127 int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev, 128 struct mlx5_vf_migration_file *migf); 129 void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev); 130 void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev); 131 void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work); 132 int mlx5vf_start_page_tracker(struct vfio_device *vdev, 133 struct rb_root_cached *ranges, u32 nnodes, u64 *page_size); 134 int mlx5vf_stop_page_tracker(struct vfio_device *vdev); 135 int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova, 136 unsigned long length, struct iova_bitmap *dirty); 137 #endif /* MLX5_VFIO_CMD_H */ 138