xref: /openbmc/linux/drivers/vfio/pci/mlx5/cmd.h (revision b1c3d2be)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4  */
5 
6 #ifndef MLX5_VFIO_CMD_H
7 #define MLX5_VFIO_CMD_H
8 
9 #include <linux/kernel.h>
10 #include <linux/vfio_pci_core.h>
11 #include <linux/mlx5/driver.h>
12 #include <linux/mlx5/cq.h>
13 #include <linux/mlx5/qp.h>
14 
15 #define MLX5VF_PRE_COPY_SUPP(mvdev) \
16 	((mvdev)->core_device.vdev.migration_flags & VFIO_MIGRATION_PRE_COPY)
17 
18 enum mlx5_vf_migf_state {
19 	MLX5_MIGF_STATE_ERROR = 1,
20 	MLX5_MIGF_STATE_PRE_COPY_ERROR,
21 	MLX5_MIGF_STATE_PRE_COPY,
22 	MLX5_MIGF_STATE_SAVE_LAST,
23 	MLX5_MIGF_STATE_COMPLETE,
24 };
25 
26 enum mlx5_vf_load_state {
27 	MLX5_VF_LOAD_STATE_READ_IMAGE_NO_HEADER,
28 	MLX5_VF_LOAD_STATE_READ_HEADER,
29 	MLX5_VF_LOAD_STATE_PREP_IMAGE,
30 	MLX5_VF_LOAD_STATE_READ_IMAGE,
31 	MLX5_VF_LOAD_STATE_LOAD_IMAGE,
32 };
33 
34 struct mlx5_vf_migration_header {
35 	__le64 image_size;
36 	/* For future use in case we may need to change the kernel protocol */
37 	__le64 flags;
38 };
39 
40 struct mlx5_vhca_data_buffer {
41 	struct sg_append_table table;
42 	loff_t start_pos;
43 	u64 length;
44 	u64 allocated_length;
45 	u64 header_image_size;
46 	u32 mkey;
47 	enum dma_data_direction dma_dir;
48 	u8 dmaed:1;
49 	struct list_head buf_elm;
50 	struct mlx5_vf_migration_file *migf;
51 	/* Optimize mlx5vf_get_migration_page() for sequential access */
52 	struct scatterlist *last_offset_sg;
53 	unsigned int sg_last_entry;
54 	unsigned long last_offset;
55 };
56 
57 struct mlx5vf_async_data {
58 	struct mlx5_async_work cb_work;
59 	struct work_struct work;
60 	struct mlx5_vhca_data_buffer *buf;
61 	struct mlx5_vhca_data_buffer *header_buf;
62 	int status;
63 	u8 last_chunk:1;
64 	void *out;
65 };
66 
67 struct mlx5_vf_migration_file {
68 	struct file *filp;
69 	struct mutex lock;
70 	enum mlx5_vf_migf_state state;
71 
72 	enum mlx5_vf_load_state load_state;
73 	u32 pdn;
74 	loff_t max_pos;
75 	struct mlx5_vhca_data_buffer *buf;
76 	struct mlx5_vhca_data_buffer *buf_header;
77 	spinlock_t list_lock;
78 	struct list_head buf_list;
79 	struct list_head avail_list;
80 	struct mlx5vf_pci_core_device *mvdev;
81 	wait_queue_head_t poll_wait;
82 	struct completion save_comp;
83 	struct mlx5_async_ctx async_ctx;
84 	struct mlx5vf_async_data async_data;
85 };
86 
87 struct mlx5_vhca_cq_buf {
88 	struct mlx5_frag_buf_ctrl fbc;
89 	struct mlx5_frag_buf frag_buf;
90 	int cqe_size;
91 	int nent;
92 };
93 
94 struct mlx5_vhca_cq {
95 	struct mlx5_vhca_cq_buf buf;
96 	struct mlx5_db db;
97 	struct mlx5_core_cq mcq;
98 	size_t ncqe;
99 };
100 
101 struct mlx5_vhca_recv_buf {
102 	u32 npages;
103 	struct page **page_list;
104 	dma_addr_t *dma_addrs;
105 	u32 next_rq_offset;
106 	u32 mkey;
107 };
108 
109 struct mlx5_vhca_qp {
110 	struct mlx5_frag_buf buf;
111 	struct mlx5_db db;
112 	struct mlx5_vhca_recv_buf recv_buf;
113 	u32 tracked_page_size;
114 	u32 max_msg_size;
115 	u32 qpn;
116 	struct {
117 		unsigned int pc;
118 		unsigned int cc;
119 		unsigned int wqe_cnt;
120 		__be32 *db;
121 		struct mlx5_frag_buf_ctrl fbc;
122 	} rq;
123 };
124 
125 struct mlx5_vhca_page_tracker {
126 	u32 id;
127 	u32 pdn;
128 	u8 is_err:1;
129 	struct mlx5_uars_page *uar;
130 	struct mlx5_vhca_cq cq;
131 	struct mlx5_vhca_qp *host_qp;
132 	struct mlx5_vhca_qp *fw_qp;
133 	struct mlx5_nb nb;
134 	int status;
135 };
136 
137 struct mlx5vf_pci_core_device {
138 	struct vfio_pci_core_device core_device;
139 	int vf_id;
140 	u16 vhca_id;
141 	u8 migrate_cap:1;
142 	u8 deferred_reset:1;
143 	u8 mdev_detach:1;
144 	u8 log_active:1;
145 	struct completion tracker_comp;
146 	/* protect migration state */
147 	struct mutex state_mutex;
148 	enum vfio_device_mig_state mig_state;
149 	/* protect the reset_done flow */
150 	spinlock_t reset_lock;
151 	struct mlx5_vf_migration_file *resuming_migf;
152 	struct mlx5_vf_migration_file *saving_migf;
153 	struct mlx5_vhca_page_tracker tracker;
154 	struct workqueue_struct *cb_wq;
155 	struct notifier_block nb;
156 	struct mlx5_core_dev *mdev;
157 };
158 
159 enum {
160 	MLX5VF_QUERY_INC = (1UL << 0),
161 	MLX5VF_QUERY_FINAL = (1UL << 1),
162 };
163 
164 int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
165 int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod);
166 int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
167 					  size_t *state_size, u8 query_flags);
168 void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev,
169 			       const struct vfio_migration_ops *mig_ops,
170 			       const struct vfio_log_ops *log_ops);
171 void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev);
172 void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev);
173 int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
174 			       struct mlx5_vf_migration_file *migf,
175 			       struct mlx5_vhca_data_buffer *buf, bool inc,
176 			       bool track);
177 int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
178 			       struct mlx5_vf_migration_file *migf,
179 			       struct mlx5_vhca_data_buffer *buf);
180 int mlx5vf_cmd_alloc_pd(struct mlx5_vf_migration_file *migf);
181 void mlx5vf_cmd_dealloc_pd(struct mlx5_vf_migration_file *migf);
182 void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf);
183 struct mlx5_vhca_data_buffer *
184 mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
185 			 size_t length, enum dma_data_direction dma_dir);
186 void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf);
187 struct mlx5_vhca_data_buffer *
188 mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
189 		       size_t length, enum dma_data_direction dma_dir);
190 void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf);
191 int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
192 			       unsigned int npages);
193 struct page *mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
194 				       unsigned long offset);
195 void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
196 void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev);
197 void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work);
198 int mlx5vf_start_page_tracker(struct vfio_device *vdev,
199 		struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
200 int mlx5vf_stop_page_tracker(struct vfio_device *vdev);
201 int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova,
202 			unsigned long length, struct iova_bitmap *dirty);
203 #endif /* MLX5_VFIO_CMD_H */
204