1 /* 2 * Multifd UADK compression accelerator implementation 3 * 4 * Copyright (c) 2024 Huawei Technologies R & D (UK) Ltd 5 * 6 * Authors: 7 * Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 */ 12 13 #include "qemu/osdep.h" 14 #include "qemu/module.h" 15 #include "qapi/error.h" 16 #include "exec/ramblock.h" 17 #include "migration.h" 18 #include "multifd.h" 19 #include "options.h" 20 #include "qemu/error-report.h" 21 #include "uadk/wd_comp.h" 22 #include "uadk/wd_sched.h" 23 24 struct wd_data { 25 handle_t handle; 26 uint8_t *buf; 27 uint32_t *buf_hdr; 28 }; 29 30 static bool uadk_hw_init(void) 31 { 32 char alg[] = "zlib"; 33 int ret; 34 35 ret = wd_comp_init2(alg, SCHED_POLICY_RR, TASK_HW); 36 if (ret && ret != -WD_EEXIST) { 37 return false; 38 } else { 39 return true; 40 } 41 } 42 43 static struct wd_data *multifd_uadk_init_sess(uint32_t count, 44 uint32_t page_size, 45 bool compress, Error **errp) 46 { 47 struct wd_comp_sess_setup ss = {0}; 48 struct sched_params param = {0}; 49 uint32_t size = count * page_size; 50 struct wd_data *wd; 51 52 wd = g_new0(struct wd_data, 1); 53 54 if (uadk_hw_init()) { 55 ss.alg_type = WD_ZLIB; 56 if (compress) { 57 ss.op_type = WD_DIR_COMPRESS; 58 /* Add an additional page for handling output > input */ 59 size += page_size; 60 } else { 61 ss.op_type = WD_DIR_DECOMPRESS; 62 } 63 /* We use default level 1 compression and 4K window size */ 64 param.type = ss.op_type; 65 ss.sched_param = ¶m; 66 67 wd->handle = wd_comp_alloc_sess(&ss); 68 if (!wd->handle) { 69 error_setg(errp, "multifd: failed wd_comp_alloc_sess"); 70 goto out; 71 } 72 } else { 73 /* For CI test use */ 74 warn_report_once("UADK hardware not available. Switch to no compression mode"); 75 } 76 77 wd->buf = g_try_malloc(size); 78 if (!wd->buf) { 79 error_setg(errp, "multifd: out of mem for uadk buf"); 80 goto out_free_sess; 81 } 82 wd->buf_hdr = g_new0(uint32_t, count); 83 return wd; 84 85 out_free_sess: 86 if (wd->handle) { 87 wd_comp_free_sess(wd->handle); 88 } 89 out: 90 wd_comp_uninit2(); 91 g_free(wd); 92 return NULL; 93 } 94 95 static void multifd_uadk_uninit_sess(struct wd_data *wd) 96 { 97 if (wd->handle) { 98 wd_comp_free_sess(wd->handle); 99 } 100 wd_comp_uninit2(); 101 g_free(wd->buf); 102 g_free(wd->buf_hdr); 103 g_free(wd); 104 } 105 106 /** 107 * multifd_uadk_send_setup: setup send side 108 * 109 * Returns 0 for success or -1 for error 110 * 111 * @p: Params for the channel that we are using 112 * @errp: pointer to an error 113 */ 114 static int multifd_uadk_send_setup(MultiFDSendParams *p, Error **errp) 115 { 116 struct wd_data *wd; 117 118 wd = multifd_uadk_init_sess(p->page_count, p->page_size, true, errp); 119 if (!wd) { 120 return -1; 121 } 122 123 p->compress_data = wd; 124 assert(p->iov == NULL); 125 /* 126 * Each page will be compressed independently and sent using an IOV. The 127 * additional two IOVs are used to store packet header and compressed data 128 * length 129 */ 130 131 p->iov = g_new0(struct iovec, p->page_count + 2); 132 return 0; 133 } 134 135 /** 136 * multifd_uadk_send_cleanup: cleanup send side 137 * 138 * Close the channel and return memory. 139 * 140 * @p: Params for the channel that we are using 141 * @errp: pointer to an error 142 */ 143 static void multifd_uadk_send_cleanup(MultiFDSendParams *p, Error **errp) 144 { 145 struct wd_data *wd = p->compress_data; 146 147 multifd_uadk_uninit_sess(wd); 148 p->compress_data = NULL; 149 } 150 151 static inline void prepare_next_iov(MultiFDSendParams *p, void *base, 152 uint32_t len) 153 { 154 p->iov[p->iovs_num].iov_base = (uint8_t *)base; 155 p->iov[p->iovs_num].iov_len = len; 156 p->next_packet_size += len; 157 p->iovs_num++; 158 } 159 160 /** 161 * multifd_uadk_send_prepare: prepare data to be able to send 162 * 163 * Create a compressed buffer with all the pages that we are going to 164 * send. 165 * 166 * Returns 0 for success or -1 for error 167 * 168 * @p: Params for the channel that we are using 169 * @errp: pointer to an error 170 */ 171 static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp) 172 { 173 struct wd_data *uadk_data = p->compress_data; 174 uint32_t hdr_size; 175 uint8_t *buf = uadk_data->buf; 176 int ret = 0; 177 178 if (!multifd_send_prepare_common(p)) { 179 goto out; 180 } 181 182 hdr_size = p->pages->normal_num * sizeof(uint32_t); 183 /* prepare the header that stores the lengths of all compressed data */ 184 prepare_next_iov(p, uadk_data->buf_hdr, hdr_size); 185 186 for (int i = 0; i < p->pages->normal_num; i++) { 187 struct wd_comp_req creq = { 188 .op_type = WD_DIR_COMPRESS, 189 .src = p->pages->block->host + p->pages->offset[i], 190 .src_len = p->page_size, 191 .dst = buf, 192 /* Set dst_len to double the src in case compressed out >= page_size */ 193 .dst_len = p->page_size * 2, 194 }; 195 196 if (uadk_data->handle) { 197 ret = wd_do_comp_sync(uadk_data->handle, &creq); 198 if (ret || creq.status) { 199 error_setg(errp, "multifd %u: failed compression, ret %d status %d", 200 p->id, ret, creq.status); 201 return -1; 202 } 203 if (creq.dst_len < p->page_size) { 204 uadk_data->buf_hdr[i] = cpu_to_be32(creq.dst_len); 205 prepare_next_iov(p, buf, creq.dst_len); 206 buf += creq.dst_len; 207 } 208 } 209 /* 210 * Send raw data if no UADK hardware or if compressed out >= page_size. 211 * We might be better off sending raw data if output is slightly less 212 * than page_size as well because at the receive end we can skip the 213 * decompression. But it is tricky to find the right number here. 214 */ 215 if (!uadk_data->handle || creq.dst_len >= p->page_size) { 216 uadk_data->buf_hdr[i] = cpu_to_be32(p->page_size); 217 prepare_next_iov(p, p->pages->block->host + p->pages->offset[i], 218 p->page_size); 219 buf += p->page_size; 220 } 221 } 222 out: 223 p->flags |= MULTIFD_FLAG_UADK; 224 multifd_send_fill_packet(p); 225 return 0; 226 } 227 228 /** 229 * multifd_uadk_recv_setup: setup receive side 230 * 231 * Create the compressed channel and buffer. 232 * 233 * Returns 0 for success or -1 for error 234 * 235 * @p: Params for the channel that we are using 236 * @errp: pointer to an error 237 */ 238 static int multifd_uadk_recv_setup(MultiFDRecvParams *p, Error **errp) 239 { 240 struct wd_data *wd; 241 242 wd = multifd_uadk_init_sess(p->page_count, p->page_size, false, errp); 243 if (!wd) { 244 return -1; 245 } 246 p->compress_data = wd; 247 return 0; 248 } 249 250 /** 251 * multifd_uadk_recv_cleanup: cleanup receive side 252 * 253 * Close the channel and return memory. 254 * 255 * @p: Params for the channel that we are using 256 */ 257 static void multifd_uadk_recv_cleanup(MultiFDRecvParams *p) 258 { 259 struct wd_data *wd = p->compress_data; 260 261 multifd_uadk_uninit_sess(wd); 262 p->compress_data = NULL; 263 } 264 265 /** 266 * multifd_uadk_recv: read the data from the channel into actual pages 267 * 268 * Read the compressed buffer, and uncompress it into the actual 269 * pages. 270 * 271 * Returns 0 for success or -1 for error 272 * 273 * @p: Params for the channel that we are using 274 * @errp: pointer to an error 275 */ 276 static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp) 277 { 278 struct wd_data *uadk_data = p->compress_data; 279 uint32_t in_size = p->next_packet_size; 280 uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; 281 uint32_t hdr_len = p->normal_num * sizeof(uint32_t); 282 uint32_t data_len = 0; 283 uint8_t *buf = uadk_data->buf; 284 int ret = 0; 285 286 if (flags != MULTIFD_FLAG_UADK) { 287 error_setg(errp, "multifd %u: flags received %x flags expected %x", 288 p->id, flags, MULTIFD_FLAG_ZLIB); 289 return -1; 290 } 291 292 multifd_recv_zero_page_process(p); 293 if (!p->normal_num) { 294 assert(in_size == 0); 295 return 0; 296 } 297 298 /* read compressed data lengths */ 299 assert(hdr_len < in_size); 300 ret = qio_channel_read_all(p->c, (void *) uadk_data->buf_hdr, 301 hdr_len, errp); 302 if (ret != 0) { 303 return ret; 304 } 305 306 for (int i = 0; i < p->normal_num; i++) { 307 uadk_data->buf_hdr[i] = be32_to_cpu(uadk_data->buf_hdr[i]); 308 data_len += uadk_data->buf_hdr[i]; 309 assert(uadk_data->buf_hdr[i] <= p->page_size); 310 } 311 312 /* read compressed data */ 313 assert(in_size == hdr_len + data_len); 314 ret = qio_channel_read_all(p->c, (void *)buf, data_len, errp); 315 if (ret != 0) { 316 return ret; 317 } 318 319 for (int i = 0; i < p->normal_num; i++) { 320 struct wd_comp_req creq = { 321 .op_type = WD_DIR_DECOMPRESS, 322 .src = buf, 323 .src_len = uadk_data->buf_hdr[i], 324 .dst = p->host + p->normal[i], 325 .dst_len = p->page_size, 326 }; 327 328 if (uadk_data->buf_hdr[i] == p->page_size) { 329 memcpy(p->host + p->normal[i], buf, p->page_size); 330 buf += p->page_size; 331 continue; 332 } 333 334 if (unlikely(!uadk_data->handle)) { 335 error_setg(errp, "multifd %u: UADK HW not available for decompression", 336 p->id); 337 return -1; 338 } 339 340 ret = wd_do_comp_sync(uadk_data->handle, &creq); 341 if (ret || creq.status) { 342 error_setg(errp, "multifd %u: failed decompression, ret %d status %d", 343 p->id, ret, creq.status); 344 return -1; 345 } 346 if (creq.dst_len != p->page_size) { 347 error_setg(errp, "multifd %u: decompressed length error", p->id); 348 return -1; 349 } 350 buf += uadk_data->buf_hdr[i]; 351 } 352 353 return 0; 354 } 355 356 static MultiFDMethods multifd_uadk_ops = { 357 .send_setup = multifd_uadk_send_setup, 358 .send_cleanup = multifd_uadk_send_cleanup, 359 .send_prepare = multifd_uadk_send_prepare, 360 .recv_setup = multifd_uadk_recv_setup, 361 .recv_cleanup = multifd_uadk_recv_cleanup, 362 .recv = multifd_uadk_recv, 363 }; 364 365 static void multifd_uadk_register(void) 366 { 367 multifd_register_ops(MULTIFD_COMPRESSION_UADK, &multifd_uadk_ops); 368 } 369 migration_init(multifd_uadk_register); 370