12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2ae0078fcSDexuan Cui /* 3ae0078fcSDexuan Cui * Hyper-V transport for vsock 4ae0078fcSDexuan Cui * 5ae0078fcSDexuan Cui * Hyper-V Sockets supplies a byte-stream based communication mechanism 6ae0078fcSDexuan Cui * between the host and the VM. This driver implements the necessary 7ae0078fcSDexuan Cui * support in the VM by introducing the new vsock transport. 8ae0078fcSDexuan Cui * 9ae0078fcSDexuan Cui * Copyright (c) 2017, Microsoft Corporation. 10ae0078fcSDexuan Cui */ 11ae0078fcSDexuan Cui #include <linux/module.h> 12ae0078fcSDexuan Cui #include <linux/vmalloc.h> 13ae0078fcSDexuan Cui #include <linux/hyperv.h> 14ae0078fcSDexuan Cui #include <net/sock.h> 15ae0078fcSDexuan Cui #include <net/af_vsock.h> 1677ffe333SHimadri Pandya #include <asm/hyperv-tlfs.h> 17ae0078fcSDexuan Cui 18ac383f58SSunil Muthuswamy /* Older (VMBUS version 'VERSION_WIN10' or before) Windows hosts have some 1977ffe333SHimadri Pandya * stricter requirements on the hv_sock ring buffer size of six 4K pages. 2077ffe333SHimadri Pandya * hyperv-tlfs defines HV_HYP_PAGE_SIZE as 4K. Newer hosts don't have this 2177ffe333SHimadri Pandya * limitation; but, keep the defaults the same for compat. 22ae0078fcSDexuan Cui */ 2377ffe333SHimadri Pandya #define RINGBUFFER_HVS_RCV_SIZE (HV_HYP_PAGE_SIZE * 6) 2477ffe333SHimadri Pandya #define RINGBUFFER_HVS_SND_SIZE (HV_HYP_PAGE_SIZE * 6) 2577ffe333SHimadri Pandya #define RINGBUFFER_HVS_MAX_SIZE (HV_HYP_PAGE_SIZE * 64) 26ae0078fcSDexuan Cui 27ae0078fcSDexuan Cui /* The MTU is 16KB per the host side's design */ 28ae0078fcSDexuan Cui #define HVS_MTU_SIZE (1024 * 16) 29ae0078fcSDexuan Cui 30a9eeb998SSunil Muthuswamy /* How long to wait for graceful shutdown of a connection */ 31a9eeb998SSunil Muthuswamy #define HVS_CLOSE_TIMEOUT (8 * HZ) 32a9eeb998SSunil Muthuswamy 33ae0078fcSDexuan Cui struct vmpipe_proto_header { 34ae0078fcSDexuan Cui u32 pkt_type; 35ae0078fcSDexuan Cui u32 data_size; 36ae0078fcSDexuan Cui }; 37ae0078fcSDexuan Cui 38ae0078fcSDexuan Cui /* For recv, we use the VMBus in-place packet iterator APIs to directly copy 39ae0078fcSDexuan Cui * data from the ringbuffer into the userspace buffer. 40ae0078fcSDexuan Cui */ 41ae0078fcSDexuan Cui struct hvs_recv_buf { 42ae0078fcSDexuan Cui /* The header before the payload data */ 43ae0078fcSDexuan Cui struct vmpipe_proto_header hdr; 44ae0078fcSDexuan Cui 45ae0078fcSDexuan Cui /* The payload */ 46ae0078fcSDexuan Cui u8 data[HVS_MTU_SIZE]; 47ae0078fcSDexuan Cui }; 48ae0078fcSDexuan Cui 49ae0078fcSDexuan Cui /* We can send up to HVS_MTU_SIZE bytes of payload to the host, but let's use 5014a1eaa8SSunil Muthuswamy * a smaller size, i.e. HVS_SEND_BUF_SIZE, to maximize concurrency between the 5114a1eaa8SSunil Muthuswamy * guest and the host processing as one VMBUS packet is the smallest processing 5214a1eaa8SSunil Muthuswamy * unit. 53ae0078fcSDexuan Cui * 54ae0078fcSDexuan Cui * Note: the buffer can be eliminated in the future when we add new VMBus 55ae0078fcSDexuan Cui * ringbuffer APIs that allow us to directly copy data from userspace buffer 56ae0078fcSDexuan Cui * to VMBus ringbuffer. 57ae0078fcSDexuan Cui */ 5877ffe333SHimadri Pandya #define HVS_SEND_BUF_SIZE \ 5977ffe333SHimadri Pandya (HV_HYP_PAGE_SIZE - sizeof(struct vmpipe_proto_header)) 60ae0078fcSDexuan Cui 61ae0078fcSDexuan Cui struct hvs_send_buf { 62ae0078fcSDexuan Cui /* The header before the payload data */ 63ae0078fcSDexuan Cui struct vmpipe_proto_header hdr; 64ae0078fcSDexuan Cui 65ae0078fcSDexuan Cui /* The payload */ 66ae0078fcSDexuan Cui u8 data[HVS_SEND_BUF_SIZE]; 67ae0078fcSDexuan Cui }; 68ae0078fcSDexuan Cui 69ae0078fcSDexuan Cui #define HVS_HEADER_LEN (sizeof(struct vmpacket_descriptor) + \ 70ae0078fcSDexuan Cui sizeof(struct vmpipe_proto_header)) 71ae0078fcSDexuan Cui 72ae0078fcSDexuan Cui /* See 'prev_indices' in hv_ringbuffer_read(), hv_ringbuffer_write(), and 73ae0078fcSDexuan Cui * __hv_pkt_iter_next(). 74ae0078fcSDexuan Cui */ 75ae0078fcSDexuan Cui #define VMBUS_PKT_TRAILER_SIZE (sizeof(u64)) 76ae0078fcSDexuan Cui 77ae0078fcSDexuan Cui #define HVS_PKT_LEN(payload_len) (HVS_HEADER_LEN + \ 78ae0078fcSDexuan Cui ALIGN((payload_len), 8) + \ 79ae0078fcSDexuan Cui VMBUS_PKT_TRAILER_SIZE) 80ae0078fcSDexuan Cui 81*066f3377SAndrea Parri (Microsoft) /* Upper bound on the size of a VMbus packet for hv_sock */ 82*066f3377SAndrea Parri (Microsoft) #define HVS_MAX_PKT_SIZE HVS_PKT_LEN(HVS_MTU_SIZE) 83*066f3377SAndrea Parri (Microsoft) 84ae0078fcSDexuan Cui union hvs_service_id { 85ce103204SAndy Shevchenko guid_t srv_id; 86ae0078fcSDexuan Cui 87ae0078fcSDexuan Cui struct { 88ae0078fcSDexuan Cui unsigned int svm_port; 89ce103204SAndy Shevchenko unsigned char b[sizeof(guid_t) - sizeof(unsigned int)]; 90ae0078fcSDexuan Cui }; 91ae0078fcSDexuan Cui }; 92ae0078fcSDexuan Cui 93ae0078fcSDexuan Cui /* Per-socket state (accessed via vsk->trans) */ 94ae0078fcSDexuan Cui struct hvsock { 95ae0078fcSDexuan Cui struct vsock_sock *vsk; 96ae0078fcSDexuan Cui 97ce103204SAndy Shevchenko guid_t vm_srv_id; 98ce103204SAndy Shevchenko guid_t host_srv_id; 99ae0078fcSDexuan Cui 100ae0078fcSDexuan Cui struct vmbus_channel *chan; 101ae0078fcSDexuan Cui struct vmpacket_descriptor *recv_desc; 102ae0078fcSDexuan Cui 103ae0078fcSDexuan Cui /* The length of the payload not delivered to userland yet */ 104ae0078fcSDexuan Cui u32 recv_data_len; 105ae0078fcSDexuan Cui /* The offset of the payload */ 106ae0078fcSDexuan Cui u32 recv_data_off; 107ae0078fcSDexuan Cui 108ae0078fcSDexuan Cui /* Have we sent the zero-length packet (FIN)? */ 109ae0078fcSDexuan Cui bool fin_sent; 110ae0078fcSDexuan Cui }; 111ae0078fcSDexuan Cui 112ae0078fcSDexuan Cui /* In the VM, we support Hyper-V Sockets with AF_VSOCK, and the endpoint is 113ae0078fcSDexuan Cui * <cid, port> (see struct sockaddr_vm). Note: cid is not really used here: 114ae0078fcSDexuan Cui * when we write apps to connect to the host, we can only use VMADDR_CID_ANY 115ae0078fcSDexuan Cui * or VMADDR_CID_HOST (both are equivalent) as the remote cid, and when we 116ae0078fcSDexuan Cui * write apps to bind() & listen() in the VM, we can only use VMADDR_CID_ANY 117ae0078fcSDexuan Cui * as the local cid. 118ae0078fcSDexuan Cui * 119ae0078fcSDexuan Cui * On the host, Hyper-V Sockets are supported by Winsock AF_HYPERV: 120ae0078fcSDexuan Cui * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user- 121ae0078fcSDexuan Cui * guide/make-integration-service, and the endpoint is <VmID, ServiceId> with 122ae0078fcSDexuan Cui * the below sockaddr: 123ae0078fcSDexuan Cui * 124ae0078fcSDexuan Cui * struct SOCKADDR_HV 125ae0078fcSDexuan Cui * { 126ae0078fcSDexuan Cui * ADDRESS_FAMILY Family; 127ae0078fcSDexuan Cui * USHORT Reserved; 128ae0078fcSDexuan Cui * GUID VmId; 129ae0078fcSDexuan Cui * GUID ServiceId; 130ae0078fcSDexuan Cui * }; 131ae0078fcSDexuan Cui * Note: VmID is not used by Linux VM and actually it isn't transmitted via 132ae0078fcSDexuan Cui * VMBus, because here it's obvious the host and the VM can easily identify 133ae0078fcSDexuan Cui * each other. Though the VmID is useful on the host, especially in the case 134ae0078fcSDexuan Cui * of Windows container, Linux VM doesn't need it at all. 135ae0078fcSDexuan Cui * 136ae0078fcSDexuan Cui * To make use of the AF_VSOCK infrastructure in Linux VM, we have to limit 137ae0078fcSDexuan Cui * the available GUID space of SOCKADDR_HV so that we can create a mapping 138ae0078fcSDexuan Cui * between AF_VSOCK port and SOCKADDR_HV Service GUID. The rule of writing 139ae0078fcSDexuan Cui * Hyper-V Sockets apps on the host and in Linux VM is: 140ae0078fcSDexuan Cui * 141ae0078fcSDexuan Cui **************************************************************************** 142ae0078fcSDexuan Cui * The only valid Service GUIDs, from the perspectives of both the host and * 143ae0078fcSDexuan Cui * Linux VM, that can be connected by the other end, must conform to this * 144c742c59eSSunil Muthuswamy * format: <port>-facb-11e6-bd58-64006a7986d3. * 145ae0078fcSDexuan Cui **************************************************************************** 146ae0078fcSDexuan Cui * 147ae0078fcSDexuan Cui * When we write apps on the host to connect(), the GUID ServiceID is used. 148ae0078fcSDexuan Cui * When we write apps in Linux VM to connect(), we only need to specify the 149ae0078fcSDexuan Cui * port and the driver will form the GUID and use that to request the host. 150ae0078fcSDexuan Cui * 151ae0078fcSDexuan Cui */ 152ae0078fcSDexuan Cui 153ae0078fcSDexuan Cui /* 00000000-facb-11e6-bd58-64006a7986d3 */ 154ce103204SAndy Shevchenko static const guid_t srv_id_template = 155ce103204SAndy Shevchenko GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58, 156ae0078fcSDexuan Cui 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3); 157ae0078fcSDexuan Cui 158c0cfa2d8SStefano Garzarella static bool hvs_check_transport(struct vsock_sock *vsk); 159c0cfa2d8SStefano Garzarella 160ce103204SAndy Shevchenko static bool is_valid_srv_id(const guid_t *id) 161ae0078fcSDexuan Cui { 162ce103204SAndy Shevchenko return !memcmp(&id->b[4], &srv_id_template.b[4], sizeof(guid_t) - 4); 163ae0078fcSDexuan Cui } 164ae0078fcSDexuan Cui 165ce103204SAndy Shevchenko static unsigned int get_port_by_srv_id(const guid_t *svr_id) 166ae0078fcSDexuan Cui { 167ae0078fcSDexuan Cui return *((unsigned int *)svr_id); 168ae0078fcSDexuan Cui } 169ae0078fcSDexuan Cui 170ce103204SAndy Shevchenko static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id) 171ae0078fcSDexuan Cui { 172ae0078fcSDexuan Cui unsigned int port = get_port_by_srv_id(svr_id); 173ae0078fcSDexuan Cui 174ae0078fcSDexuan Cui vsock_addr_init(addr, VMADDR_CID_ANY, port); 175ae0078fcSDexuan Cui } 176ae0078fcSDexuan Cui 177ae0078fcSDexuan Cui static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan) 178ae0078fcSDexuan Cui { 179ae0078fcSDexuan Cui set_channel_pending_send_size(chan, 180ae0078fcSDexuan Cui HVS_PKT_LEN(HVS_SEND_BUF_SIZE)); 181ae0078fcSDexuan Cui 182ae0078fcSDexuan Cui virt_mb(); 183ae0078fcSDexuan Cui } 184ae0078fcSDexuan Cui 185ae0078fcSDexuan Cui static bool hvs_channel_readable(struct vmbus_channel *chan) 186ae0078fcSDexuan Cui { 187ae0078fcSDexuan Cui u32 readable = hv_get_bytes_to_read(&chan->inbound); 188ae0078fcSDexuan Cui 189ae0078fcSDexuan Cui /* 0-size payload means FIN */ 190ae0078fcSDexuan Cui return readable >= HVS_PKT_LEN(0); 191ae0078fcSDexuan Cui } 192ae0078fcSDexuan Cui 193ae0078fcSDexuan Cui static int hvs_channel_readable_payload(struct vmbus_channel *chan) 194ae0078fcSDexuan Cui { 195ae0078fcSDexuan Cui u32 readable = hv_get_bytes_to_read(&chan->inbound); 196ae0078fcSDexuan Cui 197ae0078fcSDexuan Cui if (readable > HVS_PKT_LEN(0)) { 198ae0078fcSDexuan Cui /* At least we have 1 byte to read. We don't need to return 199ae0078fcSDexuan Cui * the exact readable bytes: see vsock_stream_recvmsg() -> 200ae0078fcSDexuan Cui * vsock_stream_has_data(). 201ae0078fcSDexuan Cui */ 202ae0078fcSDexuan Cui return 1; 203ae0078fcSDexuan Cui } 204ae0078fcSDexuan Cui 205ae0078fcSDexuan Cui if (readable == HVS_PKT_LEN(0)) { 206ae0078fcSDexuan Cui /* 0-size payload means FIN */ 207ae0078fcSDexuan Cui return 0; 208ae0078fcSDexuan Cui } 209ae0078fcSDexuan Cui 210ae0078fcSDexuan Cui /* No payload or FIN */ 211ae0078fcSDexuan Cui return -1; 212ae0078fcSDexuan Cui } 213ae0078fcSDexuan Cui 214ae0078fcSDexuan Cui static size_t hvs_channel_writable_bytes(struct vmbus_channel *chan) 215ae0078fcSDexuan Cui { 216ae0078fcSDexuan Cui u32 writeable = hv_get_bytes_to_write(&chan->outbound); 217ae0078fcSDexuan Cui size_t ret; 218ae0078fcSDexuan Cui 219ae0078fcSDexuan Cui /* The ringbuffer mustn't be 100% full, and we should reserve a 220ae0078fcSDexuan Cui * zero-length-payload packet for the FIN: see hv_ringbuffer_write() 221ae0078fcSDexuan Cui * and hvs_shutdown(). 222ae0078fcSDexuan Cui */ 223ae0078fcSDexuan Cui if (writeable <= HVS_PKT_LEN(1) + HVS_PKT_LEN(0)) 224ae0078fcSDexuan Cui return 0; 225ae0078fcSDexuan Cui 226ae0078fcSDexuan Cui ret = writeable - HVS_PKT_LEN(1) - HVS_PKT_LEN(0); 227ae0078fcSDexuan Cui 228ae0078fcSDexuan Cui return round_down(ret, 8); 229ae0078fcSDexuan Cui } 230ae0078fcSDexuan Cui 231c0e084e3SKees Cook static int __hvs_send_data(struct vmbus_channel *chan, 232c0e084e3SKees Cook struct vmpipe_proto_header *hdr, 233c0e084e3SKees Cook size_t to_write) 234c0e084e3SKees Cook { 235c0e084e3SKees Cook hdr->pkt_type = 1; 236c0e084e3SKees Cook hdr->data_size = to_write; 237c0e084e3SKees Cook return vmbus_sendpacket(chan, hdr, sizeof(*hdr) + to_write, 238c0e084e3SKees Cook 0, VM_PKT_DATA_INBAND, 0); 239c0e084e3SKees Cook } 240c0e084e3SKees Cook 241ae0078fcSDexuan Cui static int hvs_send_data(struct vmbus_channel *chan, 242ae0078fcSDexuan Cui struct hvs_send_buf *send_buf, size_t to_write) 243ae0078fcSDexuan Cui { 244c0e084e3SKees Cook return __hvs_send_data(chan, &send_buf->hdr, to_write); 245ae0078fcSDexuan Cui } 246ae0078fcSDexuan Cui 247ae0078fcSDexuan Cui static void hvs_channel_cb(void *ctx) 248ae0078fcSDexuan Cui { 249ae0078fcSDexuan Cui struct sock *sk = (struct sock *)ctx; 250ae0078fcSDexuan Cui struct vsock_sock *vsk = vsock_sk(sk); 251ae0078fcSDexuan Cui struct hvsock *hvs = vsk->trans; 252ae0078fcSDexuan Cui struct vmbus_channel *chan = hvs->chan; 253ae0078fcSDexuan Cui 254ae0078fcSDexuan Cui if (hvs_channel_readable(chan)) 255ae0078fcSDexuan Cui sk->sk_data_ready(sk); 256ae0078fcSDexuan Cui 257ae0078fcSDexuan Cui if (hv_get_bytes_to_write(&chan->outbound) > 0) 258ae0078fcSDexuan Cui sk->sk_write_space(sk); 259ae0078fcSDexuan Cui } 260ae0078fcSDexuan Cui 261a9eeb998SSunil Muthuswamy static void hvs_do_close_lock_held(struct vsock_sock *vsk, 262a9eeb998SSunil Muthuswamy bool cancel_timeout) 263a9eeb998SSunil Muthuswamy { 264a9eeb998SSunil Muthuswamy struct sock *sk = sk_vsock(vsk); 265a9eeb998SSunil Muthuswamy 266a9eeb998SSunil Muthuswamy sock_set_flag(sk, SOCK_DONE); 267a9eeb998SSunil Muthuswamy vsk->peer_shutdown = SHUTDOWN_MASK; 268a9eeb998SSunil Muthuswamy if (vsock_stream_has_data(vsk) <= 0) 269a9eeb998SSunil Muthuswamy sk->sk_state = TCP_CLOSING; 270a9eeb998SSunil Muthuswamy sk->sk_state_change(sk); 271a9eeb998SSunil Muthuswamy if (vsk->close_work_scheduled && 272a9eeb998SSunil Muthuswamy (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) { 273a9eeb998SSunil Muthuswamy vsk->close_work_scheduled = false; 274a9eeb998SSunil Muthuswamy vsock_remove_sock(vsk); 275a9eeb998SSunil Muthuswamy 276a9eeb998SSunil Muthuswamy /* Release the reference taken while scheduling the timeout */ 277a9eeb998SSunil Muthuswamy sock_put(sk); 278a9eeb998SSunil Muthuswamy } 279a9eeb998SSunil Muthuswamy } 280a9eeb998SSunil Muthuswamy 281ae0078fcSDexuan Cui static void hvs_close_connection(struct vmbus_channel *chan) 282ae0078fcSDexuan Cui { 283ae0078fcSDexuan Cui struct sock *sk = get_per_channel_state(chan); 284ae0078fcSDexuan Cui 285b4562ca7SDexuan Cui lock_sock(sk); 286a9eeb998SSunil Muthuswamy hvs_do_close_lock_held(vsock_sk(sk), true); 287b4562ca7SDexuan Cui release_sock(sk); 288685703b4SDexuan Cui 289685703b4SDexuan Cui /* Release the refcnt for the channel that's opened in 290685703b4SDexuan Cui * hvs_open_connection(). 291685703b4SDexuan Cui */ 292685703b4SDexuan Cui sock_put(sk); 293ae0078fcSDexuan Cui } 294ae0078fcSDexuan Cui 295ae0078fcSDexuan Cui static void hvs_open_connection(struct vmbus_channel *chan) 296ae0078fcSDexuan Cui { 297ce103204SAndy Shevchenko guid_t *if_instance, *if_type; 298ae0078fcSDexuan Cui unsigned char conn_from_host; 299ae0078fcSDexuan Cui 300ae0078fcSDexuan Cui struct sockaddr_vm addr; 301ae0078fcSDexuan Cui struct sock *sk, *new = NULL; 302ac383f58SSunil Muthuswamy struct vsock_sock *vnew = NULL; 303ac383f58SSunil Muthuswamy struct hvsock *hvs = NULL; 304ac383f58SSunil Muthuswamy struct hvsock *hvs_new = NULL; 305ac383f58SSunil Muthuswamy int rcvbuf; 306ae0078fcSDexuan Cui int ret; 307ac383f58SSunil Muthuswamy int sndbuf; 308ae0078fcSDexuan Cui 309ae0078fcSDexuan Cui if_type = &chan->offermsg.offer.if_type; 310ae0078fcSDexuan Cui if_instance = &chan->offermsg.offer.if_instance; 311ae0078fcSDexuan Cui conn_from_host = chan->offermsg.offer.u.pipe.user_def[0]; 312c742c59eSSunil Muthuswamy if (!is_valid_srv_id(if_type)) 313ae0078fcSDexuan Cui return; 314ae0078fcSDexuan Cui 315ae0078fcSDexuan Cui hvs_addr_init(&addr, conn_from_host ? if_type : if_instance); 316ae0078fcSDexuan Cui sk = vsock_find_bound_socket(&addr); 317ae0078fcSDexuan Cui if (!sk) 318ae0078fcSDexuan Cui return; 319ae0078fcSDexuan Cui 320b4562ca7SDexuan Cui lock_sock(sk); 3213b4477d2SStefan Hajnoczi if ((conn_from_host && sk->sk_state != TCP_LISTEN) || 3223b4477d2SStefan Hajnoczi (!conn_from_host && sk->sk_state != TCP_SYN_SENT)) 323ae0078fcSDexuan Cui goto out; 324ae0078fcSDexuan Cui 325ae0078fcSDexuan Cui if (conn_from_host) { 326ae0078fcSDexuan Cui if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) 327ae0078fcSDexuan Cui goto out; 328ae0078fcSDexuan Cui 329b9ca2f5fSStefano Garzarella new = vsock_create_connected(sk); 330ae0078fcSDexuan Cui if (!new) 331ae0078fcSDexuan Cui goto out; 332ae0078fcSDexuan Cui 3333b4477d2SStefan Hajnoczi new->sk_state = TCP_SYN_SENT; 334ae0078fcSDexuan Cui vnew = vsock_sk(new); 335c0cfa2d8SStefano Garzarella 336c0cfa2d8SStefano Garzarella hvs_addr_init(&vnew->local_addr, if_type); 337c0cfa2d8SStefano Garzarella 338c742c59eSSunil Muthuswamy /* Remote peer is always the host */ 339c742c59eSSunil Muthuswamy vsock_addr_init(&vnew->remote_addr, 340c742c59eSSunil Muthuswamy VMADDR_CID_HOST, VMADDR_PORT_ANY); 341c742c59eSSunil Muthuswamy vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance); 342c0cfa2d8SStefano Garzarella ret = vsock_assign_transport(vnew, vsock_sk(sk)); 343c0cfa2d8SStefano Garzarella /* Transport assigned (looking at remote_addr) must be the 344c0cfa2d8SStefano Garzarella * same where we received the request. 345c0cfa2d8SStefano Garzarella */ 346c0cfa2d8SStefano Garzarella if (ret || !hvs_check_transport(vnew)) { 347c0cfa2d8SStefano Garzarella sock_put(new); 348c0cfa2d8SStefano Garzarella goto out; 349c0cfa2d8SStefano Garzarella } 350ae0078fcSDexuan Cui hvs_new = vnew->trans; 351ae0078fcSDexuan Cui hvs_new->chan = chan; 352ae0078fcSDexuan Cui } else { 353ae0078fcSDexuan Cui hvs = vsock_sk(sk)->trans; 354ae0078fcSDexuan Cui hvs->chan = chan; 355ae0078fcSDexuan Cui } 356ae0078fcSDexuan Cui 357ae0078fcSDexuan Cui set_channel_read_mode(chan, HV_CALL_DIRECT); 358ac383f58SSunil Muthuswamy 359ac383f58SSunil Muthuswamy /* Use the socket buffer sizes as hints for the VMBUS ring size. For 360ac383f58SSunil Muthuswamy * server side sockets, 'sk' is the parent socket and thus, this will 361ac383f58SSunil Muthuswamy * allow the child sockets to inherit the size from the parent. Keep 362ac383f58SSunil Muthuswamy * the mins to the default value and align to page size as per VMBUS 363ac383f58SSunil Muthuswamy * requirements. 364ac383f58SSunil Muthuswamy * For the max, the socket core library will limit the socket buffer 365ac383f58SSunil Muthuswamy * size that can be set by the user, but, since currently, the hv_sock 366ac383f58SSunil Muthuswamy * VMBUS ring buffer is physically contiguous allocation, restrict it 367ac383f58SSunil Muthuswamy * further. 368ac383f58SSunil Muthuswamy * Older versions of hv_sock host side code cannot handle bigger VMBUS 369ac383f58SSunil Muthuswamy * ring buffer size. Use the version number to limit the change to newer 370ac383f58SSunil Muthuswamy * versions. 371ac383f58SSunil Muthuswamy */ 372ac383f58SSunil Muthuswamy if (vmbus_proto_version < VERSION_WIN10_V5) { 373ac383f58SSunil Muthuswamy sndbuf = RINGBUFFER_HVS_SND_SIZE; 374ac383f58SSunil Muthuswamy rcvbuf = RINGBUFFER_HVS_RCV_SIZE; 375ac383f58SSunil Muthuswamy } else { 376ac383f58SSunil Muthuswamy sndbuf = max_t(int, sk->sk_sndbuf, RINGBUFFER_HVS_SND_SIZE); 377ac383f58SSunil Muthuswamy sndbuf = min_t(int, sndbuf, RINGBUFFER_HVS_MAX_SIZE); 37877ffe333SHimadri Pandya sndbuf = ALIGN(sndbuf, HV_HYP_PAGE_SIZE); 379ac383f58SSunil Muthuswamy rcvbuf = max_t(int, sk->sk_rcvbuf, RINGBUFFER_HVS_RCV_SIZE); 380ac383f58SSunil Muthuswamy rcvbuf = min_t(int, rcvbuf, RINGBUFFER_HVS_MAX_SIZE); 38177ffe333SHimadri Pandya rcvbuf = ALIGN(rcvbuf, HV_HYP_PAGE_SIZE); 382ac383f58SSunil Muthuswamy } 383ac383f58SSunil Muthuswamy 384*066f3377SAndrea Parri (Microsoft) chan->max_pkt_size = HVS_MAX_PKT_SIZE; 385*066f3377SAndrea Parri (Microsoft) 386ac383f58SSunil Muthuswamy ret = vmbus_open(chan, sndbuf, rcvbuf, NULL, 0, hvs_channel_cb, 387ac383f58SSunil Muthuswamy conn_from_host ? new : sk); 388ae0078fcSDexuan Cui if (ret != 0) { 389ae0078fcSDexuan Cui if (conn_from_host) { 390ae0078fcSDexuan Cui hvs_new->chan = NULL; 391ae0078fcSDexuan Cui sock_put(new); 392ae0078fcSDexuan Cui } else { 393ae0078fcSDexuan Cui hvs->chan = NULL; 394ae0078fcSDexuan Cui } 395ae0078fcSDexuan Cui goto out; 396ae0078fcSDexuan Cui } 397ae0078fcSDexuan Cui 398ae0078fcSDexuan Cui set_per_channel_state(chan, conn_from_host ? new : sk); 399685703b4SDexuan Cui 400685703b4SDexuan Cui /* This reference will be dropped by hvs_close_connection(). */ 401685703b4SDexuan Cui sock_hold(conn_from_host ? new : sk); 402ae0078fcSDexuan Cui vmbus_set_chn_rescind_callback(chan, hvs_close_connection); 403ae0078fcSDexuan Cui 404cb359b60SSunil Muthuswamy /* Set the pending send size to max packet size to always get 405cb359b60SSunil Muthuswamy * notifications from the host when there is enough writable space. 406cb359b60SSunil Muthuswamy * The host is optimized to send notifications only when the pending 407cb359b60SSunil Muthuswamy * size boundary is crossed, and not always. 408cb359b60SSunil Muthuswamy */ 409cb359b60SSunil Muthuswamy hvs_set_channel_pending_send_size(chan); 410cb359b60SSunil Muthuswamy 411ae0078fcSDexuan Cui if (conn_from_host) { 4123b4477d2SStefan Hajnoczi new->sk_state = TCP_ESTABLISHED; 4137976a11bSEric Dumazet sk_acceptq_added(sk); 414ae0078fcSDexuan Cui 415ae0078fcSDexuan Cui hvs_new->vm_srv_id = *if_type; 416ae0078fcSDexuan Cui hvs_new->host_srv_id = *if_instance; 417ae0078fcSDexuan Cui 418ae0078fcSDexuan Cui vsock_insert_connected(vnew); 419ae0078fcSDexuan Cui 420ae0078fcSDexuan Cui vsock_enqueue_accept(sk, new); 421ae0078fcSDexuan Cui } else { 4223b4477d2SStefan Hajnoczi sk->sk_state = TCP_ESTABLISHED; 423ae0078fcSDexuan Cui sk->sk_socket->state = SS_CONNECTED; 424ae0078fcSDexuan Cui 425ae0078fcSDexuan Cui vsock_insert_connected(vsock_sk(sk)); 426ae0078fcSDexuan Cui } 427ae0078fcSDexuan Cui 428ae0078fcSDexuan Cui sk->sk_state_change(sk); 429ae0078fcSDexuan Cui 430ae0078fcSDexuan Cui out: 431ae0078fcSDexuan Cui /* Release refcnt obtained when we called vsock_find_bound_socket() */ 432ae0078fcSDexuan Cui sock_put(sk); 433b4562ca7SDexuan Cui 434b4562ca7SDexuan Cui release_sock(sk); 435ae0078fcSDexuan Cui } 436ae0078fcSDexuan Cui 437ae0078fcSDexuan Cui static u32 hvs_get_local_cid(void) 438ae0078fcSDexuan Cui { 439ae0078fcSDexuan Cui return VMADDR_CID_ANY; 440ae0078fcSDexuan Cui } 441ae0078fcSDexuan Cui 442ae0078fcSDexuan Cui static int hvs_sock_init(struct vsock_sock *vsk, struct vsock_sock *psk) 443ae0078fcSDexuan Cui { 444ae0078fcSDexuan Cui struct hvsock *hvs; 445ac383f58SSunil Muthuswamy struct sock *sk = sk_vsock(vsk); 446ae0078fcSDexuan Cui 447ae0078fcSDexuan Cui hvs = kzalloc(sizeof(*hvs), GFP_KERNEL); 448ae0078fcSDexuan Cui if (!hvs) 449ae0078fcSDexuan Cui return -ENOMEM; 450ae0078fcSDexuan Cui 451ae0078fcSDexuan Cui vsk->trans = hvs; 452ae0078fcSDexuan Cui hvs->vsk = vsk; 453ac383f58SSunil Muthuswamy sk->sk_sndbuf = RINGBUFFER_HVS_SND_SIZE; 454ac383f58SSunil Muthuswamy sk->sk_rcvbuf = RINGBUFFER_HVS_RCV_SIZE; 455ae0078fcSDexuan Cui return 0; 456ae0078fcSDexuan Cui } 457ae0078fcSDexuan Cui 458ae0078fcSDexuan Cui static int hvs_connect(struct vsock_sock *vsk) 459ae0078fcSDexuan Cui { 460ae0078fcSDexuan Cui union hvs_service_id vm, host; 461ae0078fcSDexuan Cui struct hvsock *h = vsk->trans; 462ae0078fcSDexuan Cui 463ae0078fcSDexuan Cui vm.srv_id = srv_id_template; 464ae0078fcSDexuan Cui vm.svm_port = vsk->local_addr.svm_port; 465ae0078fcSDexuan Cui h->vm_srv_id = vm.srv_id; 466ae0078fcSDexuan Cui 467ae0078fcSDexuan Cui host.srv_id = srv_id_template; 468ae0078fcSDexuan Cui host.svm_port = vsk->remote_addr.svm_port; 469ae0078fcSDexuan Cui h->host_srv_id = host.srv_id; 470ae0078fcSDexuan Cui 471ae0078fcSDexuan Cui return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id); 472ae0078fcSDexuan Cui } 473ae0078fcSDexuan Cui 474a9eeb998SSunil Muthuswamy static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode) 475a9eeb998SSunil Muthuswamy { 476a9eeb998SSunil Muthuswamy struct vmpipe_proto_header hdr; 477a9eeb998SSunil Muthuswamy 478a9eeb998SSunil Muthuswamy if (hvs->fin_sent || !hvs->chan) 479a9eeb998SSunil Muthuswamy return; 480a9eeb998SSunil Muthuswamy 481a9eeb998SSunil Muthuswamy /* It can't fail: see hvs_channel_writable_bytes(). */ 482c0e084e3SKees Cook (void)__hvs_send_data(hvs->chan, &hdr, 0); 483a9eeb998SSunil Muthuswamy hvs->fin_sent = true; 484a9eeb998SSunil Muthuswamy } 485a9eeb998SSunil Muthuswamy 486ae0078fcSDexuan Cui static int hvs_shutdown(struct vsock_sock *vsk, int mode) 487ae0078fcSDexuan Cui { 488ae0078fcSDexuan Cui if (!(mode & SEND_SHUTDOWN)) 489ae0078fcSDexuan Cui return 0; 490ae0078fcSDexuan Cui 491a9eeb998SSunil Muthuswamy hvs_shutdown_lock_held(vsk->trans, mode); 492ae0078fcSDexuan Cui return 0; 493ae0078fcSDexuan Cui } 494ae0078fcSDexuan Cui 495a9eeb998SSunil Muthuswamy static void hvs_close_timeout(struct work_struct *work) 496a9eeb998SSunil Muthuswamy { 497a9eeb998SSunil Muthuswamy struct vsock_sock *vsk = 498a9eeb998SSunil Muthuswamy container_of(work, struct vsock_sock, close_work.work); 499a9eeb998SSunil Muthuswamy struct sock *sk = sk_vsock(vsk); 500a9eeb998SSunil Muthuswamy 501a9eeb998SSunil Muthuswamy sock_hold(sk); 502a9eeb998SSunil Muthuswamy lock_sock(sk); 503a9eeb998SSunil Muthuswamy if (!sock_flag(sk, SOCK_DONE)) 504a9eeb998SSunil Muthuswamy hvs_do_close_lock_held(vsk, false); 505a9eeb998SSunil Muthuswamy 506a9eeb998SSunil Muthuswamy vsk->close_work_scheduled = false; 507a9eeb998SSunil Muthuswamy release_sock(sk); 508a9eeb998SSunil Muthuswamy sock_put(sk); 509a9eeb998SSunil Muthuswamy } 510a9eeb998SSunil Muthuswamy 511a9eeb998SSunil Muthuswamy /* Returns true, if it is safe to remove socket; false otherwise */ 512a9eeb998SSunil Muthuswamy static bool hvs_close_lock_held(struct vsock_sock *vsk) 513a9eeb998SSunil Muthuswamy { 514a9eeb998SSunil Muthuswamy struct sock *sk = sk_vsock(vsk); 515a9eeb998SSunil Muthuswamy 516a9eeb998SSunil Muthuswamy if (!(sk->sk_state == TCP_ESTABLISHED || 517a9eeb998SSunil Muthuswamy sk->sk_state == TCP_CLOSING)) 518a9eeb998SSunil Muthuswamy return true; 519a9eeb998SSunil Muthuswamy 520a9eeb998SSunil Muthuswamy if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK) 521a9eeb998SSunil Muthuswamy hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK); 522a9eeb998SSunil Muthuswamy 523a9eeb998SSunil Muthuswamy if (sock_flag(sk, SOCK_DONE)) 524a9eeb998SSunil Muthuswamy return true; 525a9eeb998SSunil Muthuswamy 526a9eeb998SSunil Muthuswamy /* This reference will be dropped by the delayed close routine */ 527a9eeb998SSunil Muthuswamy sock_hold(sk); 528a9eeb998SSunil Muthuswamy INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout); 529a9eeb998SSunil Muthuswamy vsk->close_work_scheduled = true; 530a9eeb998SSunil Muthuswamy schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT); 531a9eeb998SSunil Muthuswamy return false; 532a9eeb998SSunil Muthuswamy } 533a9eeb998SSunil Muthuswamy 534ae0078fcSDexuan Cui static void hvs_release(struct vsock_sock *vsk) 535ae0078fcSDexuan Cui { 536a9eeb998SSunil Muthuswamy bool remove_sock; 537ae0078fcSDexuan Cui 538a9eeb998SSunil Muthuswamy remove_sock = hvs_close_lock_held(vsk); 539a9eeb998SSunil Muthuswamy if (remove_sock) 540a9eeb998SSunil Muthuswamy vsock_remove_sock(vsk); 541ae0078fcSDexuan Cui } 542ae0078fcSDexuan Cui 543ae0078fcSDexuan Cui static void hvs_destruct(struct vsock_sock *vsk) 544ae0078fcSDexuan Cui { 545ae0078fcSDexuan Cui struct hvsock *hvs = vsk->trans; 546ae0078fcSDexuan Cui struct vmbus_channel *chan = hvs->chan; 547ae0078fcSDexuan Cui 548ae0078fcSDexuan Cui if (chan) 549ae0078fcSDexuan Cui vmbus_hvsock_device_unregister(chan); 550ae0078fcSDexuan Cui 551ae0078fcSDexuan Cui kfree(hvs); 552ae0078fcSDexuan Cui } 553ae0078fcSDexuan Cui 554ae0078fcSDexuan Cui static int hvs_dgram_bind(struct vsock_sock *vsk, struct sockaddr_vm *addr) 555ae0078fcSDexuan Cui { 556ae0078fcSDexuan Cui return -EOPNOTSUPP; 557ae0078fcSDexuan Cui } 558ae0078fcSDexuan Cui 559ae0078fcSDexuan Cui static int hvs_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg, 560ae0078fcSDexuan Cui size_t len, int flags) 561ae0078fcSDexuan Cui { 562ae0078fcSDexuan Cui return -EOPNOTSUPP; 563ae0078fcSDexuan Cui } 564ae0078fcSDexuan Cui 565ae0078fcSDexuan Cui static int hvs_dgram_enqueue(struct vsock_sock *vsk, 566ae0078fcSDexuan Cui struct sockaddr_vm *remote, struct msghdr *msg, 567ae0078fcSDexuan Cui size_t dgram_len) 568ae0078fcSDexuan Cui { 569ae0078fcSDexuan Cui return -EOPNOTSUPP; 570ae0078fcSDexuan Cui } 571ae0078fcSDexuan Cui 572ae0078fcSDexuan Cui static bool hvs_dgram_allow(u32 cid, u32 port) 573ae0078fcSDexuan Cui { 574ae0078fcSDexuan Cui return false; 575ae0078fcSDexuan Cui } 576ae0078fcSDexuan Cui 577ae0078fcSDexuan Cui static int hvs_update_recv_data(struct hvsock *hvs) 578ae0078fcSDexuan Cui { 579ae0078fcSDexuan Cui struct hvs_recv_buf *recv_buf; 580ae0078fcSDexuan Cui u32 payload_len; 581ae0078fcSDexuan Cui 582ae0078fcSDexuan Cui recv_buf = (struct hvs_recv_buf *)(hvs->recv_desc + 1); 583ae0078fcSDexuan Cui payload_len = recv_buf->hdr.data_size; 584ae0078fcSDexuan Cui 585ae0078fcSDexuan Cui if (payload_len > HVS_MTU_SIZE) 586ae0078fcSDexuan Cui return -EIO; 587ae0078fcSDexuan Cui 588ae0078fcSDexuan Cui if (payload_len == 0) 589ae0078fcSDexuan Cui hvs->vsk->peer_shutdown |= SEND_SHUTDOWN; 590ae0078fcSDexuan Cui 591ae0078fcSDexuan Cui hvs->recv_data_len = payload_len; 592ae0078fcSDexuan Cui hvs->recv_data_off = 0; 593ae0078fcSDexuan Cui 594ae0078fcSDexuan Cui return 0; 595ae0078fcSDexuan Cui } 596ae0078fcSDexuan Cui 597ae0078fcSDexuan Cui static ssize_t hvs_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg, 598ae0078fcSDexuan Cui size_t len, int flags) 599ae0078fcSDexuan Cui { 600ae0078fcSDexuan Cui struct hvsock *hvs = vsk->trans; 601ae0078fcSDexuan Cui bool need_refill = !hvs->recv_desc; 602ae0078fcSDexuan Cui struct hvs_recv_buf *recv_buf; 603ae0078fcSDexuan Cui u32 to_read; 604ae0078fcSDexuan Cui int ret; 605ae0078fcSDexuan Cui 606ae0078fcSDexuan Cui if (flags & MSG_PEEK) 607ae0078fcSDexuan Cui return -EOPNOTSUPP; 608ae0078fcSDexuan Cui 609ae0078fcSDexuan Cui if (need_refill) { 610*066f3377SAndrea Parri (Microsoft) hvs->recv_desc = hv_pkt_iter_first(hvs->chan); 61171abb94fSAndrea Parri (Microsoft) if (!hvs->recv_desc) 61271abb94fSAndrea Parri (Microsoft) return -ENOBUFS; 613ae0078fcSDexuan Cui ret = hvs_update_recv_data(hvs); 614ae0078fcSDexuan Cui if (ret) 615ae0078fcSDexuan Cui return ret; 616ae0078fcSDexuan Cui } 617ae0078fcSDexuan Cui 618ae0078fcSDexuan Cui recv_buf = (struct hvs_recv_buf *)(hvs->recv_desc + 1); 619ae0078fcSDexuan Cui to_read = min_t(u32, len, hvs->recv_data_len); 620ae0078fcSDexuan Cui ret = memcpy_to_msg(msg, recv_buf->data + hvs->recv_data_off, to_read); 621ae0078fcSDexuan Cui if (ret != 0) 622ae0078fcSDexuan Cui return ret; 623ae0078fcSDexuan Cui 624ae0078fcSDexuan Cui hvs->recv_data_len -= to_read; 625ae0078fcSDexuan Cui if (hvs->recv_data_len == 0) { 626*066f3377SAndrea Parri (Microsoft) hvs->recv_desc = hv_pkt_iter_next(hvs->chan, hvs->recv_desc); 627ae0078fcSDexuan Cui if (hvs->recv_desc) { 628ae0078fcSDexuan Cui ret = hvs_update_recv_data(hvs); 629ae0078fcSDexuan Cui if (ret) 630ae0078fcSDexuan Cui return ret; 631ae0078fcSDexuan Cui } 632ae0078fcSDexuan Cui } else { 633ae0078fcSDexuan Cui hvs->recv_data_off += to_read; 634ae0078fcSDexuan Cui } 635ae0078fcSDexuan Cui 636ae0078fcSDexuan Cui return to_read; 637ae0078fcSDexuan Cui } 638ae0078fcSDexuan Cui 639ae0078fcSDexuan Cui static ssize_t hvs_stream_enqueue(struct vsock_sock *vsk, struct msghdr *msg, 640ae0078fcSDexuan Cui size_t len) 641ae0078fcSDexuan Cui { 642ae0078fcSDexuan Cui struct hvsock *hvs = vsk->trans; 643ae0078fcSDexuan Cui struct vmbus_channel *chan = hvs->chan; 644ae0078fcSDexuan Cui struct hvs_send_buf *send_buf; 64514a1eaa8SSunil Muthuswamy ssize_t to_write, max_writable; 64614a1eaa8SSunil Muthuswamy ssize_t ret = 0; 64714a1eaa8SSunil Muthuswamy ssize_t bytes_written = 0; 648ae0078fcSDexuan Cui 64977ffe333SHimadri Pandya BUILD_BUG_ON(sizeof(*send_buf) != HV_HYP_PAGE_SIZE); 650ae0078fcSDexuan Cui 651ae0078fcSDexuan Cui send_buf = kmalloc(sizeof(*send_buf), GFP_KERNEL); 652ae0078fcSDexuan Cui if (!send_buf) 653ae0078fcSDexuan Cui return -ENOMEM; 654ae0078fcSDexuan Cui 65514a1eaa8SSunil Muthuswamy /* Reader(s) could be draining data from the channel as we write. 65614a1eaa8SSunil Muthuswamy * Maximize bandwidth, by iterating until the channel is found to be 65714a1eaa8SSunil Muthuswamy * full. 65814a1eaa8SSunil Muthuswamy */ 65914a1eaa8SSunil Muthuswamy while (len) { 660ae0078fcSDexuan Cui max_writable = hvs_channel_writable_bytes(chan); 66114a1eaa8SSunil Muthuswamy if (!max_writable) 66214a1eaa8SSunil Muthuswamy break; 663ae0078fcSDexuan Cui to_write = min_t(ssize_t, len, max_writable); 664ae0078fcSDexuan Cui to_write = min_t(ssize_t, to_write, HVS_SEND_BUF_SIZE); 66514a1eaa8SSunil Muthuswamy /* memcpy_from_msg is safe for loop as it advances the offsets 66614a1eaa8SSunil Muthuswamy * within the message iterator. 66714a1eaa8SSunil Muthuswamy */ 668ae0078fcSDexuan Cui ret = memcpy_from_msg(send_buf->data, msg, to_write); 669ae0078fcSDexuan Cui if (ret < 0) 670ae0078fcSDexuan Cui goto out; 671ae0078fcSDexuan Cui 672ae0078fcSDexuan Cui ret = hvs_send_data(hvs->chan, send_buf, to_write); 673ae0078fcSDexuan Cui if (ret < 0) 674ae0078fcSDexuan Cui goto out; 675ae0078fcSDexuan Cui 67614a1eaa8SSunil Muthuswamy bytes_written += to_write; 67714a1eaa8SSunil Muthuswamy len -= to_write; 67814a1eaa8SSunil Muthuswamy } 679ae0078fcSDexuan Cui out: 68014a1eaa8SSunil Muthuswamy /* If any data has been sent, return that */ 68114a1eaa8SSunil Muthuswamy if (bytes_written) 68214a1eaa8SSunil Muthuswamy ret = bytes_written; 683ae0078fcSDexuan Cui kfree(send_buf); 684ae0078fcSDexuan Cui return ret; 685ae0078fcSDexuan Cui } 686ae0078fcSDexuan Cui 687ae0078fcSDexuan Cui static s64 hvs_stream_has_data(struct vsock_sock *vsk) 688ae0078fcSDexuan Cui { 689ae0078fcSDexuan Cui struct hvsock *hvs = vsk->trans; 690ae0078fcSDexuan Cui s64 ret; 691ae0078fcSDexuan Cui 692ae0078fcSDexuan Cui if (hvs->recv_data_len > 0) 693ae0078fcSDexuan Cui return 1; 694ae0078fcSDexuan Cui 695ae0078fcSDexuan Cui switch (hvs_channel_readable_payload(hvs->chan)) { 696ae0078fcSDexuan Cui case 1: 697ae0078fcSDexuan Cui ret = 1; 698ae0078fcSDexuan Cui break; 699ae0078fcSDexuan Cui case 0: 700ae0078fcSDexuan Cui vsk->peer_shutdown |= SEND_SHUTDOWN; 701ae0078fcSDexuan Cui ret = 0; 702ae0078fcSDexuan Cui break; 703ae0078fcSDexuan Cui default: /* -1 */ 704ae0078fcSDexuan Cui ret = 0; 705ae0078fcSDexuan Cui break; 706ae0078fcSDexuan Cui } 707ae0078fcSDexuan Cui 708ae0078fcSDexuan Cui return ret; 709ae0078fcSDexuan Cui } 710ae0078fcSDexuan Cui 711ae0078fcSDexuan Cui static s64 hvs_stream_has_space(struct vsock_sock *vsk) 712ae0078fcSDexuan Cui { 713ae0078fcSDexuan Cui struct hvsock *hvs = vsk->trans; 714ae0078fcSDexuan Cui 715cb359b60SSunil Muthuswamy return hvs_channel_writable_bytes(hvs->chan); 716ae0078fcSDexuan Cui } 717ae0078fcSDexuan Cui 718ae0078fcSDexuan Cui static u64 hvs_stream_rcvhiwat(struct vsock_sock *vsk) 719ae0078fcSDexuan Cui { 720ae0078fcSDexuan Cui return HVS_MTU_SIZE + 1; 721ae0078fcSDexuan Cui } 722ae0078fcSDexuan Cui 723ae0078fcSDexuan Cui static bool hvs_stream_is_active(struct vsock_sock *vsk) 724ae0078fcSDexuan Cui { 725ae0078fcSDexuan Cui struct hvsock *hvs = vsk->trans; 726ae0078fcSDexuan Cui 727ae0078fcSDexuan Cui return hvs->chan != NULL; 728ae0078fcSDexuan Cui } 729ae0078fcSDexuan Cui 730ae0078fcSDexuan Cui static bool hvs_stream_allow(u32 cid, u32 port) 731ae0078fcSDexuan Cui { 732ae0078fcSDexuan Cui if (cid == VMADDR_CID_HOST) 733ae0078fcSDexuan Cui return true; 734ae0078fcSDexuan Cui 735ae0078fcSDexuan Cui return false; 736ae0078fcSDexuan Cui } 737ae0078fcSDexuan Cui 738ae0078fcSDexuan Cui static 739ae0078fcSDexuan Cui int hvs_notify_poll_in(struct vsock_sock *vsk, size_t target, bool *readable) 740ae0078fcSDexuan Cui { 741ae0078fcSDexuan Cui struct hvsock *hvs = vsk->trans; 742ae0078fcSDexuan Cui 743ae0078fcSDexuan Cui *readable = hvs_channel_readable(hvs->chan); 744ae0078fcSDexuan Cui return 0; 745ae0078fcSDexuan Cui } 746ae0078fcSDexuan Cui 747ae0078fcSDexuan Cui static 748ae0078fcSDexuan Cui int hvs_notify_poll_out(struct vsock_sock *vsk, size_t target, bool *writable) 749ae0078fcSDexuan Cui { 750ae0078fcSDexuan Cui *writable = hvs_stream_has_space(vsk) > 0; 751ae0078fcSDexuan Cui 752ae0078fcSDexuan Cui return 0; 753ae0078fcSDexuan Cui } 754ae0078fcSDexuan Cui 755ae0078fcSDexuan Cui static 756ae0078fcSDexuan Cui int hvs_notify_recv_init(struct vsock_sock *vsk, size_t target, 757ae0078fcSDexuan Cui struct vsock_transport_recv_notify_data *d) 758ae0078fcSDexuan Cui { 759ae0078fcSDexuan Cui return 0; 760ae0078fcSDexuan Cui } 761ae0078fcSDexuan Cui 762ae0078fcSDexuan Cui static 763ae0078fcSDexuan Cui int hvs_notify_recv_pre_block(struct vsock_sock *vsk, size_t target, 764ae0078fcSDexuan Cui struct vsock_transport_recv_notify_data *d) 765ae0078fcSDexuan Cui { 766ae0078fcSDexuan Cui return 0; 767ae0078fcSDexuan Cui } 768ae0078fcSDexuan Cui 769ae0078fcSDexuan Cui static 770ae0078fcSDexuan Cui int hvs_notify_recv_pre_dequeue(struct vsock_sock *vsk, size_t target, 771ae0078fcSDexuan Cui struct vsock_transport_recv_notify_data *d) 772ae0078fcSDexuan Cui { 773ae0078fcSDexuan Cui return 0; 774ae0078fcSDexuan Cui } 775ae0078fcSDexuan Cui 776ae0078fcSDexuan Cui static 777ae0078fcSDexuan Cui int hvs_notify_recv_post_dequeue(struct vsock_sock *vsk, size_t target, 778ae0078fcSDexuan Cui ssize_t copied, bool data_read, 779ae0078fcSDexuan Cui struct vsock_transport_recv_notify_data *d) 780ae0078fcSDexuan Cui { 781ae0078fcSDexuan Cui return 0; 782ae0078fcSDexuan Cui } 783ae0078fcSDexuan Cui 784ae0078fcSDexuan Cui static 785ae0078fcSDexuan Cui int hvs_notify_send_init(struct vsock_sock *vsk, 786ae0078fcSDexuan Cui struct vsock_transport_send_notify_data *d) 787ae0078fcSDexuan Cui { 788ae0078fcSDexuan Cui return 0; 789ae0078fcSDexuan Cui } 790ae0078fcSDexuan Cui 791ae0078fcSDexuan Cui static 792ae0078fcSDexuan Cui int hvs_notify_send_pre_block(struct vsock_sock *vsk, 793ae0078fcSDexuan Cui struct vsock_transport_send_notify_data *d) 794ae0078fcSDexuan Cui { 795ae0078fcSDexuan Cui return 0; 796ae0078fcSDexuan Cui } 797ae0078fcSDexuan Cui 798ae0078fcSDexuan Cui static 799ae0078fcSDexuan Cui int hvs_notify_send_pre_enqueue(struct vsock_sock *vsk, 800ae0078fcSDexuan Cui struct vsock_transport_send_notify_data *d) 801ae0078fcSDexuan Cui { 802ae0078fcSDexuan Cui return 0; 803ae0078fcSDexuan Cui } 804ae0078fcSDexuan Cui 805ae0078fcSDexuan Cui static 806ae0078fcSDexuan Cui int hvs_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written, 807ae0078fcSDexuan Cui struct vsock_transport_send_notify_data *d) 808ae0078fcSDexuan Cui { 809ae0078fcSDexuan Cui return 0; 810ae0078fcSDexuan Cui } 811ae0078fcSDexuan Cui 812ae0078fcSDexuan Cui static struct vsock_transport hvs_transport = { 8136a2c0962SStefano Garzarella .module = THIS_MODULE, 8146a2c0962SStefano Garzarella 815ae0078fcSDexuan Cui .get_local_cid = hvs_get_local_cid, 816ae0078fcSDexuan Cui 817ae0078fcSDexuan Cui .init = hvs_sock_init, 818ae0078fcSDexuan Cui .destruct = hvs_destruct, 819ae0078fcSDexuan Cui .release = hvs_release, 820ae0078fcSDexuan Cui .connect = hvs_connect, 821ae0078fcSDexuan Cui .shutdown = hvs_shutdown, 822ae0078fcSDexuan Cui 823ae0078fcSDexuan Cui .dgram_bind = hvs_dgram_bind, 824ae0078fcSDexuan Cui .dgram_dequeue = hvs_dgram_dequeue, 825ae0078fcSDexuan Cui .dgram_enqueue = hvs_dgram_enqueue, 826ae0078fcSDexuan Cui .dgram_allow = hvs_dgram_allow, 827ae0078fcSDexuan Cui 828ae0078fcSDexuan Cui .stream_dequeue = hvs_stream_dequeue, 829ae0078fcSDexuan Cui .stream_enqueue = hvs_stream_enqueue, 830ae0078fcSDexuan Cui .stream_has_data = hvs_stream_has_data, 831ae0078fcSDexuan Cui .stream_has_space = hvs_stream_has_space, 832ae0078fcSDexuan Cui .stream_rcvhiwat = hvs_stream_rcvhiwat, 833ae0078fcSDexuan Cui .stream_is_active = hvs_stream_is_active, 834ae0078fcSDexuan Cui .stream_allow = hvs_stream_allow, 835ae0078fcSDexuan Cui 836ae0078fcSDexuan Cui .notify_poll_in = hvs_notify_poll_in, 837ae0078fcSDexuan Cui .notify_poll_out = hvs_notify_poll_out, 838ae0078fcSDexuan Cui .notify_recv_init = hvs_notify_recv_init, 839ae0078fcSDexuan Cui .notify_recv_pre_block = hvs_notify_recv_pre_block, 840ae0078fcSDexuan Cui .notify_recv_pre_dequeue = hvs_notify_recv_pre_dequeue, 841ae0078fcSDexuan Cui .notify_recv_post_dequeue = hvs_notify_recv_post_dequeue, 842ae0078fcSDexuan Cui .notify_send_init = hvs_notify_send_init, 843ae0078fcSDexuan Cui .notify_send_pre_block = hvs_notify_send_pre_block, 844ae0078fcSDexuan Cui .notify_send_pre_enqueue = hvs_notify_send_pre_enqueue, 845ae0078fcSDexuan Cui .notify_send_post_enqueue = hvs_notify_send_post_enqueue, 846ae0078fcSDexuan Cui 847ae0078fcSDexuan Cui }; 848ae0078fcSDexuan Cui 849c0cfa2d8SStefano Garzarella static bool hvs_check_transport(struct vsock_sock *vsk) 850c0cfa2d8SStefano Garzarella { 851c0cfa2d8SStefano Garzarella return vsk->transport == &hvs_transport; 852c0cfa2d8SStefano Garzarella } 853c0cfa2d8SStefano Garzarella 854ae0078fcSDexuan Cui static int hvs_probe(struct hv_device *hdev, 855ae0078fcSDexuan Cui const struct hv_vmbus_device_id *dev_id) 856ae0078fcSDexuan Cui { 857ae0078fcSDexuan Cui struct vmbus_channel *chan = hdev->channel; 858ae0078fcSDexuan Cui 859ae0078fcSDexuan Cui hvs_open_connection(chan); 860ae0078fcSDexuan Cui 861ae0078fcSDexuan Cui /* Always return success to suppress the unnecessary error message 862ae0078fcSDexuan Cui * in vmbus_probe(): on error the host will rescind the device in 863ae0078fcSDexuan Cui * 30 seconds and we can do cleanup at that time in 864ae0078fcSDexuan Cui * vmbus_onoffer_rescind(). 865ae0078fcSDexuan Cui */ 866ae0078fcSDexuan Cui return 0; 867ae0078fcSDexuan Cui } 868ae0078fcSDexuan Cui 869ae0078fcSDexuan Cui static int hvs_remove(struct hv_device *hdev) 870ae0078fcSDexuan Cui { 871ae0078fcSDexuan Cui struct vmbus_channel *chan = hdev->channel; 872ae0078fcSDexuan Cui 873ae0078fcSDexuan Cui vmbus_close(chan); 874ae0078fcSDexuan Cui 875ae0078fcSDexuan Cui return 0; 876ae0078fcSDexuan Cui } 877ae0078fcSDexuan Cui 8782194c2ebSDexuan Cui /* hv_sock connections can not persist across hibernation, and all the hv_sock 8792194c2ebSDexuan Cui * channels are forced to be rescinded before hibernation: see 8802194c2ebSDexuan Cui * vmbus_bus_suspend(). Here the dummy hvs_suspend() and hvs_resume() 8812194c2ebSDexuan Cui * are only needed because hibernation requires that every vmbus device's 8822194c2ebSDexuan Cui * driver should have a .suspend and .resume callback: see vmbus_suspend(). 8832194c2ebSDexuan Cui */ 8842194c2ebSDexuan Cui static int hvs_suspend(struct hv_device *hv_dev) 8852194c2ebSDexuan Cui { 8862194c2ebSDexuan Cui /* Dummy */ 8872194c2ebSDexuan Cui return 0; 8882194c2ebSDexuan Cui } 8892194c2ebSDexuan Cui 8902194c2ebSDexuan Cui static int hvs_resume(struct hv_device *dev) 8912194c2ebSDexuan Cui { 8922194c2ebSDexuan Cui /* Dummy */ 8932194c2ebSDexuan Cui return 0; 8942194c2ebSDexuan Cui } 8952194c2ebSDexuan Cui 896ae0078fcSDexuan Cui /* This isn't really used. See vmbus_match() and vmbus_probe() */ 897ae0078fcSDexuan Cui static const struct hv_vmbus_device_id id_table[] = { 898ae0078fcSDexuan Cui {}, 899ae0078fcSDexuan Cui }; 900ae0078fcSDexuan Cui 901ae0078fcSDexuan Cui static struct hv_driver hvs_drv = { 902ae0078fcSDexuan Cui .name = "hv_sock", 903ae0078fcSDexuan Cui .hvsock = true, 904ae0078fcSDexuan Cui .id_table = id_table, 905ae0078fcSDexuan Cui .probe = hvs_probe, 906ae0078fcSDexuan Cui .remove = hvs_remove, 9072194c2ebSDexuan Cui .suspend = hvs_suspend, 9082194c2ebSDexuan Cui .resume = hvs_resume, 909ae0078fcSDexuan Cui }; 910ae0078fcSDexuan Cui 911ae0078fcSDexuan Cui static int __init hvs_init(void) 912ae0078fcSDexuan Cui { 913ae0078fcSDexuan Cui int ret; 914ae0078fcSDexuan Cui 915ae0078fcSDexuan Cui if (vmbus_proto_version < VERSION_WIN10) 916ae0078fcSDexuan Cui return -ENODEV; 917ae0078fcSDexuan Cui 918ae0078fcSDexuan Cui ret = vmbus_driver_register(&hvs_drv); 919ae0078fcSDexuan Cui if (ret != 0) 920ae0078fcSDexuan Cui return ret; 921ae0078fcSDexuan Cui 922c0cfa2d8SStefano Garzarella ret = vsock_core_register(&hvs_transport, VSOCK_TRANSPORT_F_G2H); 923ae0078fcSDexuan Cui if (ret) { 924ae0078fcSDexuan Cui vmbus_driver_unregister(&hvs_drv); 925ae0078fcSDexuan Cui return ret; 926ae0078fcSDexuan Cui } 927ae0078fcSDexuan Cui 928ae0078fcSDexuan Cui return 0; 929ae0078fcSDexuan Cui } 930ae0078fcSDexuan Cui 931ae0078fcSDexuan Cui static void __exit hvs_exit(void) 932ae0078fcSDexuan Cui { 933c0cfa2d8SStefano Garzarella vsock_core_unregister(&hvs_transport); 934ae0078fcSDexuan Cui vmbus_driver_unregister(&hvs_drv); 935ae0078fcSDexuan Cui } 936ae0078fcSDexuan Cui 937ae0078fcSDexuan Cui module_init(hvs_init); 938ae0078fcSDexuan Cui module_exit(hvs_exit); 939ae0078fcSDexuan Cui 940ae0078fcSDexuan Cui MODULE_DESCRIPTION("Hyper-V Sockets"); 941ae0078fcSDexuan Cui MODULE_VERSION("1.0.0"); 942ae0078fcSDexuan Cui MODULE_LICENSE("GPL"); 943ae0078fcSDexuan Cui MODULE_ALIAS_NETPROTO(PF_VSOCK); 944