1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * An implementation of host initiated guest snapshot. 4 * 5 * Copyright (C) 2013, Microsoft, Inc. 6 * Author : K. Y. Srinivasan <kys@microsoft.com> 7 */ 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/net.h> 11 #include <linux/nls.h> 12 #include <linux/connector.h> 13 #include <linux/workqueue.h> 14 #include <linux/hyperv.h> 15 #include <asm/hyperv-tlfs.h> 16 17 #include "hyperv_vmbus.h" 18 #include "hv_utils_transport.h" 19 20 #define VSS_MAJOR 5 21 #define VSS_MINOR 0 22 #define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR) 23 24 #define VSS_VER_COUNT 1 25 static const int vss_versions[] = { 26 VSS_VERSION 27 }; 28 29 #define FW_VER_COUNT 1 30 static const int fw_versions[] = { 31 UTIL_FW_VERSION 32 }; 33 34 /* 35 * Timeout values are based on expecations from host 36 */ 37 #define VSS_FREEZE_TIMEOUT (15 * 60) 38 39 /* 40 * Global state maintained for transaction that is being processed. For a class 41 * of integration services, including the "VSS service", the specified protocol 42 * is a "request/response" protocol which means that there can only be single 43 * outstanding transaction from the host at any given point in time. We use 44 * this to simplify memory management in this driver - we cache and process 45 * only one message at a time. 46 * 47 * While the request/response protocol is guaranteed by the host, we further 48 * ensure this by serializing packet processing in this driver - we do not 49 * read additional packets from the VMBUs until the current packet is fully 50 * handled. 51 */ 52 53 static struct { 54 int state; /* hvutil_device_state */ 55 int recv_len; /* number of bytes received. */ 56 struct vmbus_channel *recv_channel; /* chn we got the request */ 57 u64 recv_req_id; /* request ID. */ 58 struct hv_vss_msg *msg; /* current message */ 59 } vss_transaction; 60 61 62 static void vss_respond_to_host(int error); 63 64 /* 65 * This state maintains the version number registered by the daemon. 66 */ 67 static int dm_reg_value; 68 69 static const char vss_devname[] = "vmbus/hv_vss"; 70 static __u8 *recv_buffer; 71 static struct hvutil_transport *hvt; 72 73 static void vss_timeout_func(struct work_struct *dummy); 74 static void vss_handle_request(struct work_struct *dummy); 75 76 static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func); 77 static DECLARE_WORK(vss_handle_request_work, vss_handle_request); 78 79 static void vss_poll_wrapper(void *channel) 80 { 81 /* Transaction is finished, reset the state here to avoid races. */ 82 vss_transaction.state = HVUTIL_READY; 83 hv_vss_onchannelcallback(channel); 84 } 85 86 /* 87 * Callback when data is received from user mode. 88 */ 89 90 static void vss_timeout_func(struct work_struct *dummy) 91 { 92 /* 93 * Timeout waiting for userspace component to reply happened. 94 */ 95 pr_warn("VSS: timeout waiting for daemon to reply\n"); 96 vss_respond_to_host(HV_E_FAIL); 97 98 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper); 99 } 100 101 static void vss_register_done(void) 102 { 103 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper); 104 pr_debug("VSS: userspace daemon registered\n"); 105 } 106 107 static int vss_handle_handshake(struct hv_vss_msg *vss_msg) 108 { 109 u32 our_ver = VSS_OP_REGISTER1; 110 111 switch (vss_msg->vss_hdr.operation) { 112 case VSS_OP_REGISTER: 113 /* Daemon doesn't expect us to reply */ 114 dm_reg_value = VSS_OP_REGISTER; 115 break; 116 case VSS_OP_REGISTER1: 117 /* Daemon expects us to reply with our own version */ 118 if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver), 119 vss_register_done)) 120 return -EFAULT; 121 dm_reg_value = VSS_OP_REGISTER1; 122 break; 123 default: 124 return -EINVAL; 125 } 126 pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value); 127 return 0; 128 } 129 130 static int vss_on_msg(void *msg, int len) 131 { 132 struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg; 133 134 if (len != sizeof(*vss_msg)) { 135 pr_debug("VSS: Message size does not match length\n"); 136 return -EINVAL; 137 } 138 139 if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER || 140 vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) { 141 /* 142 * Don't process registration messages if we're in the middle 143 * of a transaction processing. 144 */ 145 if (vss_transaction.state > HVUTIL_READY) { 146 pr_debug("VSS: Got unexpected registration request\n"); 147 return -EINVAL; 148 } 149 150 return vss_handle_handshake(vss_msg); 151 } else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) { 152 vss_transaction.state = HVUTIL_USERSPACE_RECV; 153 154 if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP) 155 vss_transaction.msg->vss_cf.flags = 156 VSS_HBU_NO_AUTO_RECOVERY; 157 158 if (cancel_delayed_work_sync(&vss_timeout_work)) { 159 vss_respond_to_host(vss_msg->error); 160 /* Transaction is finished, reset the state. */ 161 hv_poll_channel(vss_transaction.recv_channel, 162 vss_poll_wrapper); 163 } 164 } else { 165 /* This is a spurious call! */ 166 pr_debug("VSS: Transaction not active\n"); 167 return -EINVAL; 168 } 169 return 0; 170 } 171 172 static void vss_send_op(void) 173 { 174 int op = vss_transaction.msg->vss_hdr.operation; 175 int rc; 176 struct hv_vss_msg *vss_msg; 177 178 /* The transaction state is wrong. */ 179 if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) { 180 pr_debug("VSS: Unexpected attempt to send to daemon\n"); 181 return; 182 } 183 184 vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL); 185 if (!vss_msg) 186 return; 187 188 vss_msg->vss_hdr.operation = op; 189 190 vss_transaction.state = HVUTIL_USERSPACE_REQ; 191 192 schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ? 193 VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ); 194 195 rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL); 196 if (rc) { 197 pr_warn("VSS: failed to communicate to the daemon: %d\n", rc); 198 if (cancel_delayed_work_sync(&vss_timeout_work)) { 199 vss_respond_to_host(HV_E_FAIL); 200 vss_transaction.state = HVUTIL_READY; 201 } 202 } 203 204 kfree(vss_msg); 205 } 206 207 static void vss_handle_request(struct work_struct *dummy) 208 { 209 switch (vss_transaction.msg->vss_hdr.operation) { 210 /* 211 * Initiate a "freeze/thaw" operation in the guest. 212 * We respond to the host once the operation is complete. 213 * 214 * We send the message to the user space daemon and the operation is 215 * performed in the daemon. 216 */ 217 case VSS_OP_THAW: 218 case VSS_OP_FREEZE: 219 case VSS_OP_HOT_BACKUP: 220 if (vss_transaction.state < HVUTIL_READY) { 221 /* Userspace is not registered yet */ 222 pr_debug("VSS: Not ready for request.\n"); 223 vss_respond_to_host(HV_E_FAIL); 224 return; 225 } 226 227 pr_debug("VSS: Received request for op code: %d\n", 228 vss_transaction.msg->vss_hdr.operation); 229 vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED; 230 vss_send_op(); 231 return; 232 case VSS_OP_GET_DM_INFO: 233 vss_transaction.msg->dm_info.flags = 0; 234 break; 235 default: 236 break; 237 } 238 239 vss_respond_to_host(0); 240 hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper); 241 } 242 243 /* 244 * Send a response back to the host. 245 */ 246 247 static void 248 vss_respond_to_host(int error) 249 { 250 struct icmsg_hdr *icmsghdrp; 251 u32 buf_len; 252 struct vmbus_channel *channel; 253 u64 req_id; 254 255 /* 256 * Copy the global state for completing the transaction. Note that 257 * only one transaction can be active at a time. 258 */ 259 260 buf_len = vss_transaction.recv_len; 261 channel = vss_transaction.recv_channel; 262 req_id = vss_transaction.recv_req_id; 263 264 icmsghdrp = (struct icmsg_hdr *) 265 &recv_buffer[sizeof(struct vmbuspipe_hdr)]; 266 267 if (channel->onchannel_callback == NULL) 268 /* 269 * We have raced with util driver being unloaded; 270 * silently return. 271 */ 272 return; 273 274 icmsghdrp->status = error; 275 276 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; 277 278 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, 279 VM_PKT_DATA_INBAND, 0); 280 281 } 282 283 /* 284 * This callback is invoked when we get a VSS message from the host. 285 * The host ensures that only one VSS transaction can be active at a time. 286 */ 287 288 void hv_vss_onchannelcallback(void *context) 289 { 290 struct vmbus_channel *channel = context; 291 u32 recvlen; 292 u64 requestid; 293 struct hv_vss_msg *vss_msg; 294 int vss_srv_version; 295 296 struct icmsg_hdr *icmsghdrp; 297 298 if (vss_transaction.state > HVUTIL_READY) 299 return; 300 301 vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, 302 &requestid); 303 304 if (recvlen > 0) { 305 icmsghdrp = (struct icmsg_hdr *)&recv_buffer[ 306 sizeof(struct vmbuspipe_hdr)]; 307 308 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 309 if (vmbus_prep_negotiate_resp(icmsghdrp, 310 recv_buffer, fw_versions, FW_VER_COUNT, 311 vss_versions, VSS_VER_COUNT, 312 NULL, &vss_srv_version)) { 313 314 pr_info("VSS IC version %d.%d\n", 315 vss_srv_version >> 16, 316 vss_srv_version & 0xFFFF); 317 } 318 } else { 319 vss_msg = (struct hv_vss_msg *)&recv_buffer[ 320 sizeof(struct vmbuspipe_hdr) + 321 sizeof(struct icmsg_hdr)]; 322 323 /* 324 * Stash away this global state for completing the 325 * transaction; note transactions are serialized. 326 */ 327 328 vss_transaction.recv_len = recvlen; 329 vss_transaction.recv_req_id = requestid; 330 vss_transaction.msg = (struct hv_vss_msg *)vss_msg; 331 332 schedule_work(&vss_handle_request_work); 333 return; 334 } 335 336 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION 337 | ICMSGHDRFLAG_RESPONSE; 338 339 vmbus_sendpacket(channel, recv_buffer, 340 recvlen, requestid, 341 VM_PKT_DATA_INBAND, 0); 342 } 343 344 } 345 346 static void vss_on_reset(void) 347 { 348 if (cancel_delayed_work_sync(&vss_timeout_work)) 349 vss_respond_to_host(HV_E_FAIL); 350 vss_transaction.state = HVUTIL_DEVICE_INIT; 351 } 352 353 int 354 hv_vss_init(struct hv_util_service *srv) 355 { 356 if (vmbus_proto_version < VERSION_WIN8_1) { 357 pr_warn("Integration service 'Backup (volume snapshot)'" 358 " not supported on this host version.\n"); 359 return -ENOTSUPP; 360 } 361 recv_buffer = srv->recv_buffer; 362 vss_transaction.recv_channel = srv->channel; 363 364 /* 365 * When this driver loads, the user level daemon that 366 * processes the host requests may not yet be running. 367 * Defer processing channel callbacks until the daemon 368 * has registered. 369 */ 370 vss_transaction.state = HVUTIL_DEVICE_INIT; 371 372 hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL, 373 vss_on_msg, vss_on_reset); 374 if (!hvt) { 375 pr_warn("VSS: Failed to initialize transport\n"); 376 return -EFAULT; 377 } 378 379 return 0; 380 } 381 382 static void hv_vss_cancel_work(void) 383 { 384 cancel_delayed_work_sync(&vss_timeout_work); 385 cancel_work_sync(&vss_handle_request_work); 386 } 387 388 int hv_vss_pre_suspend(void) 389 { 390 struct vmbus_channel *channel = vss_transaction.recv_channel; 391 struct hv_vss_msg *vss_msg; 392 393 /* 394 * Fake a THAW message for the user space daemon in case the daemon 395 * has frozen the file systems. It doesn't matter if there is already 396 * a message pending to be delivered to the user space since we force 397 * vss_transaction.state to be HVUTIL_READY, so the user space daemon's 398 * write() will fail with EINVAL (see vss_on_msg()), and the daemon 399 * will reset the device by closing and re-opening it. 400 */ 401 vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL); 402 if (!vss_msg) 403 return -ENOMEM; 404 405 tasklet_disable(&channel->callback_event); 406 407 vss_msg->vss_hdr.operation = VSS_OP_THAW; 408 409 /* Cancel any possible pending work. */ 410 hv_vss_cancel_work(); 411 412 /* We don't care about the return value. */ 413 hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL); 414 415 kfree(vss_msg); 416 417 vss_transaction.state = HVUTIL_READY; 418 419 /* tasklet_enable() will be called in hv_vss_pre_resume(). */ 420 return 0; 421 } 422 423 int hv_vss_pre_resume(void) 424 { 425 struct vmbus_channel *channel = vss_transaction.recv_channel; 426 427 tasklet_enable(&channel->callback_event); 428 429 return 0; 430 } 431 432 void hv_vss_deinit(void) 433 { 434 vss_transaction.state = HVUTIL_DEVICE_DYING; 435 436 hv_vss_cancel_work(); 437 438 hvutil_transport_destroy(hvt); 439 } 440