xref: /openbmc/qemu/migration/multifd-qpl.c (revision 34e104b8)
1 /*
2  * Multifd qpl compression accelerator implementation
3  *
4  * Copyright (c) 2023 Intel Corporation
5  *
6  * Authors:
7  *  Yuan Liu<yuan1.liu@intel.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qemu/module.h"
15 #include "qapi/error.h"
16 #include "multifd.h"
17 #include "qpl/qpl.h"
18 
19 typedef struct {
20     /* the QPL hardware path job */
21     qpl_job *job;
22     /* indicates if fallback to software path is required */
23     bool fallback_sw_path;
24     /* output data from the software path */
25     uint8_t *sw_output;
26     /* output data length from the software path */
27     uint32_t sw_output_len;
28 } QplHwJob;
29 
30 typedef struct {
31     /* array of hardware jobs, the number of jobs equals the number pages */
32     QplHwJob *hw_jobs;
33     /* the QPL software job for the slow path and software fallback */
34     qpl_job *sw_job;
35     /* the number of pages that the QPL needs to process at one time */
36     uint32_t page_num;
37     /* array of compressed page buffers */
38     uint8_t *zbuf;
39     /* array of compressed page lengths */
40     uint32_t *zlen;
41     /* the status of the hardware device */
42     bool hw_avail;
43 } QplData;
44 
45 /**
46  * check_hw_avail: check if IAA hardware is available
47  *
48  * If the IAA hardware does not exist or is unavailable,
49  * the QPL hardware job initialization will fail.
50  *
51  * Returns true if IAA hardware is available, otherwise false.
52  *
53  * @job_size: indicates the hardware job size if hardware is available
54  */
55 static bool check_hw_avail(uint32_t *job_size)
56 {
57     qpl_path_t path = qpl_path_hardware;
58     uint32_t size = 0;
59     qpl_job *job;
60 
61     if (qpl_get_job_size(path, &size) != QPL_STS_OK) {
62         return false;
63     }
64     assert(size > 0);
65     job = g_malloc0(size);
66     if (qpl_init_job(path, job) != QPL_STS_OK) {
67         g_free(job);
68         return false;
69     }
70     g_free(job);
71     *job_size = size;
72     return true;
73 }
74 
75 /**
76  * multifd_qpl_free_sw_job: clean up software job
77  *
78  * Free the software job resources.
79  *
80  * @qpl: pointer to the QplData structure
81  */
82 static void multifd_qpl_free_sw_job(QplData *qpl)
83 {
84     assert(qpl);
85     if (qpl->sw_job) {
86         qpl_fini_job(qpl->sw_job);
87         g_free(qpl->sw_job);
88         qpl->sw_job = NULL;
89     }
90 }
91 
92 /**
93  * multifd_qpl_free_jobs: clean up hardware jobs
94  *
95  * Free all hardware job resources.
96  *
97  * @qpl: pointer to the QplData structure
98  */
99 static void multifd_qpl_free_hw_job(QplData *qpl)
100 {
101     assert(qpl);
102     if (qpl->hw_jobs) {
103         for (int i = 0; i < qpl->page_num; i++) {
104             qpl_fini_job(qpl->hw_jobs[i].job);
105             g_free(qpl->hw_jobs[i].job);
106             qpl->hw_jobs[i].job = NULL;
107         }
108         g_free(qpl->hw_jobs);
109         qpl->hw_jobs = NULL;
110     }
111 }
112 
113 /**
114  * multifd_qpl_init_sw_job: initialize a software job
115  *
116  * Use the QPL software path to initialize a job
117  *
118  * @qpl: pointer to the QplData structure
119  * @errp: pointer to an error
120  */
121 static int multifd_qpl_init_sw_job(QplData *qpl, Error **errp)
122 {
123     qpl_path_t path = qpl_path_software;
124     uint32_t size = 0;
125     qpl_job *job = NULL;
126     qpl_status status;
127 
128     status = qpl_get_job_size(path, &size);
129     if (status != QPL_STS_OK) {
130         error_setg(errp, "qpl_get_job_size failed with error %d", status);
131         return -1;
132     }
133     job = g_malloc0(size);
134     status = qpl_init_job(path, job);
135     if (status != QPL_STS_OK) {
136         error_setg(errp, "qpl_init_job failed with error %d", status);
137         g_free(job);
138         return -1;
139     }
140     qpl->sw_job = job;
141     return 0;
142 }
143 
144 /**
145  * multifd_qpl_init_jobs: initialize hardware jobs
146  *
147  * Use the QPL hardware path to initialize jobs
148  *
149  * @qpl: pointer to the QplData structure
150  * @size: the size of QPL hardware path job
151  * @errp: pointer to an error
152  */
153 static void multifd_qpl_init_hw_job(QplData *qpl, uint32_t size, Error **errp)
154 {
155     qpl_path_t path = qpl_path_hardware;
156     qpl_job *job = NULL;
157     qpl_status status;
158 
159     qpl->hw_jobs = g_new0(QplHwJob, qpl->page_num);
160     for (int i = 0; i < qpl->page_num; i++) {
161         job = g_malloc0(size);
162         status = qpl_init_job(path, job);
163         /* the job initialization should succeed after check_hw_avail */
164         assert(status == QPL_STS_OK);
165         qpl->hw_jobs[i].job = job;
166     }
167 }
168 
169 /**
170  * multifd_qpl_init: initialize QplData structure
171  *
172  * Allocate and initialize a QplData structure
173  *
174  * Returns a QplData pointer on success or NULL on error
175  *
176  * @num: the number of pages
177  * @size: the page size
178  * @errp: pointer to an error
179  */
180 static QplData *multifd_qpl_init(uint32_t num, uint32_t size, Error **errp)
181 {
182     uint32_t job_size = 0;
183     QplData *qpl;
184 
185     qpl = g_new0(QplData, 1);
186     qpl->page_num = num;
187     if (multifd_qpl_init_sw_job(qpl, errp) != 0) {
188         g_free(qpl);
189         return NULL;
190     }
191     qpl->hw_avail = check_hw_avail(&job_size);
192     if (qpl->hw_avail) {
193         multifd_qpl_init_hw_job(qpl, job_size, errp);
194     }
195     qpl->zbuf = g_malloc0(size * num);
196     qpl->zlen = g_new0(uint32_t, num);
197     return qpl;
198 }
199 
200 /**
201  * multifd_qpl_deinit: clean up QplData structure
202  *
203  * Free jobs, buffers and the QplData structure
204  *
205  * @qpl: pointer to the QplData structure
206  */
207 static void multifd_qpl_deinit(QplData *qpl)
208 {
209     if (qpl) {
210         multifd_qpl_free_sw_job(qpl);
211         multifd_qpl_free_hw_job(qpl);
212         g_free(qpl->zbuf);
213         g_free(qpl->zlen);
214         g_free(qpl);
215     }
216 }
217 
218 /**
219  * multifd_qpl_send_setup: set up send side
220  *
221  * Set up the channel with QPL compression.
222  *
223  * Returns 0 on success or -1 on error
224  *
225  * @p: Params for the channel being used
226  * @errp: pointer to an error
227  */
228 static int multifd_qpl_send_setup(MultiFDSendParams *p, Error **errp)
229 {
230     QplData *qpl;
231 
232     qpl = multifd_qpl_init(p->page_count, p->page_size, errp);
233     if (!qpl) {
234         return -1;
235     }
236     p->compress_data = qpl;
237 
238     /*
239      * the page will be compressed independently and sent using an IOV. The
240      * additional two IOVs are used to store packet header and compressed data
241      * length
242      */
243     p->iov = g_new0(struct iovec, p->page_count + 2);
244     return 0;
245 }
246 
247 /**
248  * multifd_qpl_send_cleanup: clean up send side
249  *
250  * Close the channel and free memory.
251  *
252  * @p: Params for the channel being used
253  * @errp: pointer to an error
254  */
255 static void multifd_qpl_send_cleanup(MultiFDSendParams *p, Error **errp)
256 {
257     multifd_qpl_deinit(p->compress_data);
258     p->compress_data = NULL;
259     g_free(p->iov);
260     p->iov = NULL;
261 }
262 
263 /**
264  * multifd_qpl_send_prepare: prepare data to be able to send
265  *
266  * Create a compressed buffer with all the pages that we are going to
267  * send.
268  *
269  * Returns 0 on success or -1 on error
270  *
271  * @p: Params for the channel being used
272  * @errp: pointer to an error
273  */
274 static int multifd_qpl_send_prepare(MultiFDSendParams *p, Error **errp)
275 {
276     /* Implement in next patch */
277     return -1;
278 }
279 
280 /**
281  * multifd_qpl_recv_setup: set up receive side
282  *
283  * Create the compressed channel and buffer.
284  *
285  * Returns 0 on success or -1 on error
286  *
287  * @p: Params for the channel being used
288  * @errp: pointer to an error
289  */
290 static int multifd_qpl_recv_setup(MultiFDRecvParams *p, Error **errp)
291 {
292     QplData *qpl;
293 
294     qpl = multifd_qpl_init(p->page_count, p->page_size, errp);
295     if (!qpl) {
296         return -1;
297     }
298     p->compress_data = qpl;
299     return 0;
300 }
301 
302 /**
303  * multifd_qpl_recv_cleanup: set up receive side
304  *
305  * Close the channel and free memory.
306  *
307  * @p: Params for the channel being used
308  */
309 static void multifd_qpl_recv_cleanup(MultiFDRecvParams *p)
310 {
311     multifd_qpl_deinit(p->compress_data);
312     p->compress_data = NULL;
313 }
314 
315 /**
316  * multifd_qpl_recv: read the data from the channel into actual pages
317  *
318  * Read the compressed buffer, and uncompress it into the actual
319  * pages.
320  *
321  * Returns 0 on success or -1 on error
322  *
323  * @p: Params for the channel being used
324  * @errp: pointer to an error
325  */
326 static int multifd_qpl_recv(MultiFDRecvParams *p, Error **errp)
327 {
328     /* Implement in next patch */
329     return -1;
330 }
331 
332 static MultiFDMethods multifd_qpl_ops = {
333     .send_setup = multifd_qpl_send_setup,
334     .send_cleanup = multifd_qpl_send_cleanup,
335     .send_prepare = multifd_qpl_send_prepare,
336     .recv_setup = multifd_qpl_recv_setup,
337     .recv_cleanup = multifd_qpl_recv_cleanup,
338     .recv = multifd_qpl_recv,
339 };
340 
341 static void multifd_qpl_register(void)
342 {
343     multifd_register_ops(MULTIFD_COMPRESSION_QPL, &multifd_qpl_ops);
344 }
345 
346 migration_init(multifd_qpl_register);
347