1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_VIRTIO_VSOCK_H 3 #define _LINUX_VIRTIO_VSOCK_H 4 5 #include <uapi/linux/virtio_vsock.h> 6 #include <linux/socket.h> 7 #include <net/sock.h> 8 #include <net/af_vsock.h> 9 10 #define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr)) 11 12 struct virtio_vsock_skb_cb { 13 bool reply; 14 bool tap_delivered; 15 }; 16 17 #define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb)) 18 19 static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb) 20 { 21 return (struct virtio_vsock_hdr *)skb->head; 22 } 23 24 static inline bool virtio_vsock_skb_reply(struct sk_buff *skb) 25 { 26 return VIRTIO_VSOCK_SKB_CB(skb)->reply; 27 } 28 29 static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb) 30 { 31 VIRTIO_VSOCK_SKB_CB(skb)->reply = true; 32 } 33 34 static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb) 35 { 36 return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered; 37 } 38 39 static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb) 40 { 41 VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true; 42 } 43 44 static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb) 45 { 46 VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false; 47 } 48 49 static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb) 50 { 51 u32 len; 52 53 len = le32_to_cpu(virtio_vsock_hdr(skb)->len); 54 55 if (len > 0) 56 skb_put(skb, len); 57 } 58 59 static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) 60 { 61 struct sk_buff *skb; 62 63 if (size < VIRTIO_VSOCK_SKB_HEADROOM) 64 return NULL; 65 66 skb = alloc_skb(size, mask); 67 if (!skb) 68 return NULL; 69 70 skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM); 71 return skb; 72 } 73 74 static inline void 75 virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb) 76 { 77 spin_lock_bh(&list->lock); 78 __skb_queue_head(list, skb); 79 spin_unlock_bh(&list->lock); 80 } 81 82 static inline void 83 virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb) 84 { 85 spin_lock_bh(&list->lock); 86 __skb_queue_tail(list, skb); 87 spin_unlock_bh(&list->lock); 88 } 89 90 static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list) 91 { 92 struct sk_buff *skb; 93 94 spin_lock_bh(&list->lock); 95 skb = __skb_dequeue(list); 96 spin_unlock_bh(&list->lock); 97 98 return skb; 99 } 100 101 static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list) 102 { 103 spin_lock_bh(&list->lock); 104 __skb_queue_purge(list); 105 spin_unlock_bh(&list->lock); 106 } 107 108 static inline size_t virtio_vsock_skb_len(struct sk_buff *skb) 109 { 110 return (size_t)(skb_end_pointer(skb) - skb->head); 111 } 112 113 #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) 114 #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL 115 #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) 116 117 enum { 118 VSOCK_VQ_RX = 0, /* for host to guest data */ 119 VSOCK_VQ_TX = 1, /* for guest to host data */ 120 VSOCK_VQ_EVENT = 2, 121 VSOCK_VQ_MAX = 3, 122 }; 123 124 /* Per-socket state (accessed via vsk->trans) */ 125 struct virtio_vsock_sock { 126 struct vsock_sock *vsk; 127 128 spinlock_t tx_lock; 129 spinlock_t rx_lock; 130 131 /* Protected by tx_lock */ 132 u32 tx_cnt; 133 u32 peer_fwd_cnt; 134 u32 peer_buf_alloc; 135 136 /* Protected by rx_lock */ 137 u32 fwd_cnt; 138 u32 last_fwd_cnt; 139 u32 rx_bytes; 140 u32 buf_alloc; 141 struct sk_buff_head rx_queue; 142 u32 msg_count; 143 }; 144 145 struct virtio_vsock_pkt_info { 146 u32 remote_cid, remote_port; 147 struct vsock_sock *vsk; 148 struct msghdr *msg; 149 u32 pkt_len; 150 u16 type; 151 u16 op; 152 u32 flags; 153 bool reply; 154 }; 155 156 struct virtio_transport { 157 /* This must be the first field */ 158 struct vsock_transport transport; 159 160 /* Takes ownership of the packet */ 161 int (*send_pkt)(struct sk_buff *skb); 162 }; 163 164 ssize_t 165 virtio_transport_stream_dequeue(struct vsock_sock *vsk, 166 struct msghdr *msg, 167 size_t len, 168 int type); 169 int 170 virtio_transport_dgram_dequeue(struct vsock_sock *vsk, 171 struct msghdr *msg, 172 size_t len, int flags); 173 174 int 175 virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, 176 struct msghdr *msg, 177 size_t len); 178 ssize_t 179 virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, 180 struct msghdr *msg, 181 int flags); 182 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk); 183 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk); 184 u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk); 185 186 int virtio_transport_do_socket_init(struct vsock_sock *vsk, 187 struct vsock_sock *psk); 188 int 189 virtio_transport_notify_poll_in(struct vsock_sock *vsk, 190 size_t target, 191 bool *data_ready_now); 192 int 193 virtio_transport_notify_poll_out(struct vsock_sock *vsk, 194 size_t target, 195 bool *space_available_now); 196 197 int virtio_transport_notify_recv_init(struct vsock_sock *vsk, 198 size_t target, struct vsock_transport_recv_notify_data *data); 199 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk, 200 size_t target, struct vsock_transport_recv_notify_data *data); 201 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk, 202 size_t target, struct vsock_transport_recv_notify_data *data); 203 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk, 204 size_t target, ssize_t copied, bool data_read, 205 struct vsock_transport_recv_notify_data *data); 206 int virtio_transport_notify_send_init(struct vsock_sock *vsk, 207 struct vsock_transport_send_notify_data *data); 208 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk, 209 struct vsock_transport_send_notify_data *data); 210 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk, 211 struct vsock_transport_send_notify_data *data); 212 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk, 213 ssize_t written, struct vsock_transport_send_notify_data *data); 214 void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val); 215 216 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk); 217 bool virtio_transport_stream_is_active(struct vsock_sock *vsk); 218 bool virtio_transport_stream_allow(u32 cid, u32 port); 219 int virtio_transport_dgram_bind(struct vsock_sock *vsk, 220 struct sockaddr_vm *addr); 221 bool virtio_transport_dgram_allow(u32 cid, u32 port); 222 223 int virtio_transport_connect(struct vsock_sock *vsk); 224 225 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode); 226 227 void virtio_transport_release(struct vsock_sock *vsk); 228 229 ssize_t 230 virtio_transport_stream_enqueue(struct vsock_sock *vsk, 231 struct msghdr *msg, 232 size_t len); 233 int 234 virtio_transport_dgram_enqueue(struct vsock_sock *vsk, 235 struct sockaddr_vm *remote_addr, 236 struct msghdr *msg, 237 size_t len); 238 239 void virtio_transport_destruct(struct vsock_sock *vsk); 240 241 void virtio_transport_recv_pkt(struct virtio_transport *t, 242 struct sk_buff *skb); 243 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb); 244 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); 245 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit); 246 void virtio_transport_deliver_tap_pkt(struct sk_buff *skb); 247 int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list); 248 int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor); 249 #endif /* _LINUX_VIRTIO_VSOCK_H */ 250