1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * An implementation of host initiated guest snapshot. 4 * 5 * Copyright (C) 2013, Microsoft, Inc. 6 * Author : K. Y. Srinivasan <kys@microsoft.com> 7 */ 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/net.h> 11 #include <linux/nls.h> 12 #include <linux/connector.h> 13 #include <linux/workqueue.h> 14 #include <linux/hyperv.h> 15 #include <asm/hyperv-tlfs.h> 16 17 #include "hyperv_vmbus.h" 18 #include "hv_utils_transport.h" 19 20 #define VSS_MAJOR 5 21 #define VSS_MINOR 0 22 #define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR) 23 24 #define VSS_VER_COUNT 1 25 static const int vss_versions[] = { 26 VSS_VERSION 27 }; 28 29 #define FW_VER_COUNT 1 30 static const int fw_versions[] = { 31 UTIL_FW_VERSION 32 }; 33 34 /* 35 * Timeout values are based on expecations from host 36 */ 37 #define VSS_FREEZE_TIMEOUT (15 * 60) 38 39 /* 40 * Global state maintained for transaction that is being processed. For a class 41 * of integration services, including the "VSS service", the specified protocol 42 * is a "request/response" protocol which means that there can only be single 43 * outstanding transaction from the host at any given point in time. We use 44 * this to simplify memory management in this driver - we cache and process 45 * only one message at a time. 46 * 47 * While the request/response protocol is guaranteed by the host, we further 48 * ensure this by serializing packet processing in this driver - we do not 49 * read additional packets from the VMBUs until the current packet is fully 50 * handled. 51 */ 52 53 static struct { 54 int state; /* hvutil_device_state */ 55 int recv_len; /* number of bytes received. */ 56 struct vmbus_channel *recv_channel; /* chn we got the request */ 57 u64 recv_req_id; /* request ID. */ 58 struct hv_vss_msg *msg; /* current message */ 59 } vss_transaction; 60 61 62 static void vss_respond_to_host(int error); 63 64 /* 65 * This state maintains the version number registered by the daemon. 66 */ 67 static int dm_reg_value; 68 69 static const char vss_devname[] = "vmbus/hv_vss"; 70 static __u8 *recv_buffer; 71 static struct hvutil_transport *hvt; 72 73 static void vss_timeout_func(struct work_struct *dummy); 74 static void vss_handle_request(struct work_struct *dummy); 75 76 static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func); 77 static DECLARE_WORK(vss_handle_request_work, vss_handle_request); 78 79 static void vss_poll_wrapper(void *channel) 80 { 81 /* Transaction is finished, reset the state here to avoid races. */ 82 vss_transaction.state = HVUTIL_READY; 83 tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event); 84 } 85 86 /* 87 * Callback when data is received from user mode. 88 */ 89 90 static void vss_timeout_func(struct work_struct *dummy) 91 { 92 /* 93 * Timeout waiting for userspace component to reply happened. 94 */ 95 pr_warn("VSS: timeout waiting for daemon to reply\n"); 96 vss_respond_to_host(HV_E_FAIL); 97 98 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper); 99 } 100 101 static void vss_register_done(void) 102 { 103 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper); 104 pr_debug("VSS: userspace daemon registered\n"); 105 } 106 107 static int vss_handle_handshake(struct hv_vss_msg *vss_msg) 108 { 109 u32 our_ver = VSS_OP_REGISTER1; 110 111 switch (vss_msg->vss_hdr.operation) { 112 case VSS_OP_REGISTER: 113 /* Daemon doesn't expect us to reply */ 114 dm_reg_value = VSS_OP_REGISTER; 115 break; 116 case VSS_OP_REGISTER1: 117 /* Daemon expects us to reply with our own version */ 118 if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver), 119 vss_register_done)) 120 return -EFAULT; 121 dm_reg_value = VSS_OP_REGISTER1; 122 break; 123 default: 124 return -EINVAL; 125 } 126 pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value); 127 return 0; 128 } 129 130 static int vss_on_msg(void *msg, int len) 131 { 132 struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg; 133 134 if (len != sizeof(*vss_msg)) { 135 pr_debug("VSS: Message size does not match length\n"); 136 return -EINVAL; 137 } 138 139 if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER || 140 vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) { 141 /* 142 * Don't process registration messages if we're in the middle 143 * of a transaction processing. 144 */ 145 if (vss_transaction.state > HVUTIL_READY) { 146 pr_debug("VSS: Got unexpected registration request\n"); 147 return -EINVAL; 148 } 149 150 return vss_handle_handshake(vss_msg); 151 } else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) { 152 vss_transaction.state = HVUTIL_USERSPACE_RECV; 153 154 if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP) 155 vss_transaction.msg->vss_cf.flags = 156 VSS_HBU_NO_AUTO_RECOVERY; 157 158 if (cancel_delayed_work_sync(&vss_timeout_work)) { 159 vss_respond_to_host(vss_msg->error); 160 /* Transaction is finished, reset the state. */ 161 hv_poll_channel(vss_transaction.recv_channel, 162 vss_poll_wrapper); 163 } 164 } else { 165 /* This is a spurious call! */ 166 pr_debug("VSS: Transaction not active\n"); 167 return -EINVAL; 168 } 169 return 0; 170 } 171 172 static void vss_send_op(void) 173 { 174 int op = vss_transaction.msg->vss_hdr.operation; 175 int rc; 176 struct hv_vss_msg *vss_msg; 177 178 /* The transaction state is wrong. */ 179 if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) { 180 pr_debug("VSS: Unexpected attempt to send to daemon\n"); 181 return; 182 } 183 184 vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL); 185 if (!vss_msg) 186 return; 187 188 vss_msg->vss_hdr.operation = op; 189 190 vss_transaction.state = HVUTIL_USERSPACE_REQ; 191 192 schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ? 193 VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ); 194 195 rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL); 196 if (rc) { 197 pr_warn("VSS: failed to communicate to the daemon: %d\n", rc); 198 if (cancel_delayed_work_sync(&vss_timeout_work)) { 199 vss_respond_to_host(HV_E_FAIL); 200 vss_transaction.state = HVUTIL_READY; 201 } 202 } 203 204 kfree(vss_msg); 205 } 206 207 static void vss_handle_request(struct work_struct *dummy) 208 { 209 switch (vss_transaction.msg->vss_hdr.operation) { 210 /* 211 * Initiate a "freeze/thaw" operation in the guest. 212 * We respond to the host once the operation is complete. 213 * 214 * We send the message to the user space daemon and the operation is 215 * performed in the daemon. 216 */ 217 case VSS_OP_THAW: 218 case VSS_OP_FREEZE: 219 case VSS_OP_HOT_BACKUP: 220 if (vss_transaction.state < HVUTIL_READY) { 221 /* Userspace is not registered yet */ 222 pr_debug("VSS: Not ready for request.\n"); 223 vss_respond_to_host(HV_E_FAIL); 224 return; 225 } 226 227 pr_debug("VSS: Received request for op code: %d\n", 228 vss_transaction.msg->vss_hdr.operation); 229 vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED; 230 vss_send_op(); 231 return; 232 case VSS_OP_GET_DM_INFO: 233 vss_transaction.msg->dm_info.flags = 0; 234 break; 235 default: 236 break; 237 } 238 239 vss_respond_to_host(0); 240 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper); 241 } 242 243 /* 244 * Send a response back to the host. 245 */ 246 247 static void 248 vss_respond_to_host(int error) 249 { 250 struct icmsg_hdr *icmsghdrp; 251 u32 buf_len; 252 struct vmbus_channel *channel; 253 u64 req_id; 254 255 /* 256 * Copy the global state for completing the transaction. Note that 257 * only one transaction can be active at a time. 258 */ 259 260 buf_len = vss_transaction.recv_len; 261 channel = vss_transaction.recv_channel; 262 req_id = vss_transaction.recv_req_id; 263 264 icmsghdrp = (struct icmsg_hdr *) 265 &recv_buffer[sizeof(struct vmbuspipe_hdr)]; 266 267 if (channel->onchannel_callback == NULL) 268 /* 269 * We have raced with util driver being unloaded; 270 * silently return. 271 */ 272 return; 273 274 icmsghdrp->status = error; 275 276 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; 277 278 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, 279 VM_PKT_DATA_INBAND, 0); 280 281 } 282 283 /* 284 * This callback is invoked when we get a VSS message from the host. 285 * The host ensures that only one VSS transaction can be active at a time. 286 */ 287 288 void hv_vss_onchannelcallback(void *context) 289 { 290 struct vmbus_channel *channel = context; 291 u32 recvlen; 292 u64 requestid; 293 struct hv_vss_msg *vss_msg; 294 int vss_srv_version; 295 296 struct icmsg_hdr *icmsghdrp; 297 298 if (vss_transaction.state > HVUTIL_READY) 299 return; 300 301 if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, &requestid)) { 302 pr_err_ratelimited("VSS request received. Could not read into recv buf\n"); 303 return; 304 } 305 306 if (!recvlen) 307 return; 308 309 /* Ensure recvlen is big enough to read header data */ 310 if (recvlen < ICMSG_HDR) { 311 pr_err_ratelimited("VSS request received. Packet length too small: %d\n", 312 recvlen); 313 return; 314 } 315 316 icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)]; 317 318 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 319 if (vmbus_prep_negotiate_resp(icmsghdrp, 320 recv_buffer, recvlen, 321 fw_versions, FW_VER_COUNT, 322 vss_versions, VSS_VER_COUNT, 323 NULL, &vss_srv_version)) { 324 325 pr_info("VSS IC version %d.%d\n", 326 vss_srv_version >> 16, 327 vss_srv_version & 0xFFFF); 328 } 329 } else if (icmsghdrp->icmsgtype == ICMSGTYPE_VSS) { 330 /* Ensure recvlen is big enough to contain hv_vss_msg */ 331 if (recvlen < ICMSG_HDR + sizeof(struct hv_vss_msg)) { 332 pr_err_ratelimited("Invalid VSS msg. Packet length too small: %u\n", 333 recvlen); 334 return; 335 } 336 vss_msg = (struct hv_vss_msg *)&recv_buffer[ICMSG_HDR]; 337 338 /* 339 * Stash away this global state for completing the 340 * transaction; note transactions are serialized. 341 */ 342 343 vss_transaction.recv_len = recvlen; 344 vss_transaction.recv_req_id = requestid; 345 vss_transaction.msg = (struct hv_vss_msg *)vss_msg; 346 347 schedule_work(&vss_handle_request_work); 348 return; 349 } else { 350 pr_err_ratelimited("VSS request received. Invalid msg type: %d\n", 351 icmsghdrp->icmsgtype); 352 return; 353 } 354 355 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | 356 ICMSGHDRFLAG_RESPONSE; 357 vmbus_sendpacket(channel, recv_buffer, recvlen, requestid, 358 VM_PKT_DATA_INBAND, 0); 359 } 360 361 static void vss_on_reset(void) 362 { 363 if (cancel_delayed_work_sync(&vss_timeout_work)) 364 vss_respond_to_host(HV_E_FAIL); 365 vss_transaction.state = HVUTIL_DEVICE_INIT; 366 } 367 368 int 369 hv_vss_init(struct hv_util_service *srv) 370 { 371 if (vmbus_proto_version < VERSION_WIN8_1) { 372 pr_warn("Integration service 'Backup (volume snapshot)'" 373 " not supported on this host version.\n"); 374 return -ENOTSUPP; 375 } 376 recv_buffer = srv->recv_buffer; 377 vss_transaction.recv_channel = srv->channel; 378 379 /* 380 * When this driver loads, the user level daemon that 381 * processes the host requests may not yet be running. 382 * Defer processing channel callbacks until the daemon 383 * has registered. 384 */ 385 vss_transaction.state = HVUTIL_DEVICE_INIT; 386 387 hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL, 388 vss_on_msg, vss_on_reset); 389 if (!hvt) { 390 pr_warn("VSS: Failed to initialize transport\n"); 391 return -EFAULT; 392 } 393 394 return 0; 395 } 396 397 static void hv_vss_cancel_work(void) 398 { 399 cancel_delayed_work_sync(&vss_timeout_work); 400 cancel_work_sync(&vss_handle_request_work); 401 } 402 403 int hv_vss_pre_suspend(void) 404 { 405 struct vmbus_channel *channel = vss_transaction.recv_channel; 406 struct hv_vss_msg *vss_msg; 407 408 /* 409 * Fake a THAW message for the user space daemon in case the daemon 410 * has frozen the file systems. It doesn't matter if there is already 411 * a message pending to be delivered to the user space since we force 412 * vss_transaction.state to be HVUTIL_READY, so the user space daemon's 413 * write() will fail with EINVAL (see vss_on_msg()), and the daemon 414 * will reset the device by closing and re-opening it. 415 */ 416 vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL); 417 if (!vss_msg) 418 return -ENOMEM; 419 420 tasklet_disable(&channel->callback_event); 421 422 vss_msg->vss_hdr.operation = VSS_OP_THAW; 423 424 /* Cancel any possible pending work. */ 425 hv_vss_cancel_work(); 426 427 /* We don't care about the return value. */ 428 hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL); 429 430 kfree(vss_msg); 431 432 vss_transaction.state = HVUTIL_READY; 433 434 /* tasklet_enable() will be called in hv_vss_pre_resume(). */ 435 return 0; 436 } 437 438 int hv_vss_pre_resume(void) 439 { 440 struct vmbus_channel *channel = vss_transaction.recv_channel; 441 442 tasklet_enable(&channel->callback_event); 443 444 return 0; 445 } 446 447 void hv_vss_deinit(void) 448 { 449 vss_transaction.state = HVUTIL_DEVICE_DYING; 450 451 hv_vss_cancel_work(); 452 453 hvutil_transport_destroy(hvt); 454 } 455