1 /* 2 * 3 * Copyright (c) 2011, Microsoft Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 * Place - Suite 330, Boston, MA 02111-1307 USA. 17 * 18 * Authors: 19 * Haiyang Zhang <haiyangz@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com> 21 * K. Y. Srinivasan <kys@microsoft.com> 22 * 23 */ 24 25 #ifndef _HYPERV_VMBUS_H 26 #define _HYPERV_VMBUS_H 27 28 #include <linux/list.h> 29 #include <asm/sync_bitops.h> 30 #include <asm/hyperv-tlfs.h> 31 #include <linux/atomic.h> 32 #include <linux/hyperv.h> 33 #include <linux/interrupt.h> 34 35 #include "hv_trace.h" 36 37 /* 38 * Timeout for services such as KVP and fcopy. 39 */ 40 #define HV_UTIL_TIMEOUT 30 41 42 /* 43 * Timeout for guest-host handshake for services. 44 */ 45 #define HV_UTIL_NEGO_TIMEOUT 55 46 47 48 /* Definitions for the monitored notification facility */ 49 union hv_monitor_trigger_group { 50 u64 as_uint64; 51 struct { 52 u32 pending; 53 u32 armed; 54 }; 55 }; 56 57 struct hv_monitor_parameter { 58 union hv_connection_id connectionid; 59 u16 flagnumber; 60 u16 rsvdz; 61 }; 62 63 union hv_monitor_trigger_state { 64 u32 asu32; 65 66 struct { 67 u32 group_enable:4; 68 u32 rsvdz:28; 69 }; 70 }; 71 72 /* struct hv_monitor_page Layout */ 73 /* ------------------------------------------------------ */ 74 /* | 0 | TriggerState (4 bytes) | Rsvd1 (4 bytes) | */ 75 /* | 8 | TriggerGroup[0] | */ 76 /* | 10 | TriggerGroup[1] | */ 77 /* | 18 | TriggerGroup[2] | */ 78 /* | 20 | TriggerGroup[3] | */ 79 /* | 28 | Rsvd2[0] | */ 80 /* | 30 | Rsvd2[1] | */ 81 /* | 38 | Rsvd2[2] | */ 82 /* | 40 | NextCheckTime[0][0] | NextCheckTime[0][1] | */ 83 /* | ... | */ 84 /* | 240 | Latency[0][0..3] | */ 85 /* | 340 | Rsvz3[0] | */ 86 /* | 440 | Parameter[0][0] | */ 87 /* | 448 | Parameter[0][1] | */ 88 /* | ... | */ 89 /* | 840 | Rsvd4[0] | */ 90 /* ------------------------------------------------------ */ 91 struct hv_monitor_page { 92 union hv_monitor_trigger_state trigger_state; 93 u32 rsvdz1; 94 95 union hv_monitor_trigger_group trigger_group[4]; 96 u64 rsvdz2[3]; 97 98 s32 next_checktime[4][32]; 99 100 u16 latency[4][32]; 101 u64 rsvdz3[32]; 102 103 struct hv_monitor_parameter parameter[4][32]; 104 105 u8 rsvdz4[1984]; 106 }; 107 108 #define HV_HYPERCALL_PARAM_ALIGN sizeof(u64) 109 110 /* Definition of the hv_post_message hypercall input structure. */ 111 struct hv_input_post_message { 112 union hv_connection_id connectionid; 113 u32 reserved; 114 u32 message_type; 115 u32 payload_size; 116 u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; 117 }; 118 119 120 enum { 121 VMBUS_MESSAGE_CONNECTION_ID = 1, 122 VMBUS_MESSAGE_CONNECTION_ID_4 = 4, 123 VMBUS_MESSAGE_PORT_ID = 1, 124 VMBUS_EVENT_CONNECTION_ID = 2, 125 VMBUS_EVENT_PORT_ID = 2, 126 VMBUS_MONITOR_CONNECTION_ID = 3, 127 VMBUS_MONITOR_PORT_ID = 3, 128 VMBUS_MESSAGE_SINT = 2, 129 }; 130 131 /* 132 * Per cpu state for channel handling 133 */ 134 struct hv_per_cpu_context { 135 void *synic_message_page; 136 void *synic_event_page; 137 /* 138 * buffer to post messages to the host. 139 */ 140 void *post_msg_page; 141 142 /* 143 * Starting with win8, we can take channel interrupts on any CPU; 144 * we will manage the tasklet that handles events messages on a per CPU 145 * basis. 146 */ 147 struct tasklet_struct msg_dpc; 148 149 /* 150 * To optimize the mapping of relid to channel, maintain 151 * per-cpu list of the channels based on their CPU affinity. 152 */ 153 struct list_head chan_list; 154 struct clock_event_device *clk_evt; 155 }; 156 157 struct hv_context { 158 /* We only support running on top of Hyper-V 159 * So at this point this really can only contain the Hyper-V ID 160 */ 161 u64 guestid; 162 163 void *tsc_page; 164 165 struct hv_per_cpu_context __percpu *cpu_context; 166 167 /* 168 * To manage allocations in a NUMA node. 169 * Array indexed by numa node ID. 170 */ 171 struct cpumask *hv_numa_map; 172 }; 173 174 extern struct hv_context hv_context; 175 176 /* Hv Interface */ 177 178 extern int hv_init(void); 179 180 extern int hv_post_message(union hv_connection_id connection_id, 181 enum hv_message_type message_type, 182 void *payload, size_t payload_size); 183 184 extern int hv_synic_alloc(void); 185 186 extern void hv_synic_free(void); 187 188 extern int hv_synic_init(unsigned int cpu); 189 190 extern int hv_synic_cleanup(unsigned int cpu); 191 192 extern void hv_synic_clockevents_cleanup(void); 193 194 /* Interface */ 195 196 void hv_ringbuffer_pre_init(struct vmbus_channel *channel); 197 198 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, 199 struct page *pages, u32 pagecnt); 200 201 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); 202 203 int hv_ringbuffer_write(struct vmbus_channel *channel, 204 const struct kvec *kv_list, u32 kv_count); 205 206 int hv_ringbuffer_read(struct vmbus_channel *channel, 207 void *buffer, u32 buflen, u32 *buffer_actual_len, 208 u64 *requestid, bool raw); 209 210 /* 211 * Maximum channels is determined by the size of the interrupt page 212 * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt 213 * and the other is receive endpoint interrupt 214 */ 215 #define MAX_NUM_CHANNELS ((PAGE_SIZE >> 1) << 3) /* 16348 channels */ 216 217 /* The value here must be in multiple of 32 */ 218 /* TODO: Need to make this configurable */ 219 #define MAX_NUM_CHANNELS_SUPPORTED 256 220 221 222 enum vmbus_connect_state { 223 DISCONNECTED, 224 CONNECTING, 225 CONNECTED, 226 DISCONNECTING 227 }; 228 229 #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT 230 231 struct vmbus_connection { 232 /* 233 * CPU on which the initial host contact was made. 234 */ 235 int connect_cpu; 236 237 u32 msg_conn_id; 238 239 atomic_t offer_in_progress; 240 241 enum vmbus_connect_state conn_state; 242 243 atomic_t next_gpadl_handle; 244 245 struct completion unload_event; 246 /* 247 * Represents channel interrupts. Each bit position represents a 248 * channel. When a channel sends an interrupt via VMBUS, it finds its 249 * bit in the sendInterruptPage, set it and calls Hv to generate a port 250 * event. The other end receives the port event and parse the 251 * recvInterruptPage to see which bit is set 252 */ 253 void *int_page; 254 void *send_int_page; 255 void *recv_int_page; 256 257 /* 258 * 2 pages - 1st page for parent->child notification and 2nd 259 * is child->parent notification 260 */ 261 struct hv_monitor_page *monitor_pages[2]; 262 struct list_head chn_msg_list; 263 spinlock_t channelmsg_lock; 264 265 /* List of channels */ 266 struct list_head chn_list; 267 struct mutex channel_mutex; 268 269 /* 270 * An offer message is handled first on the work_queue, and then 271 * is further handled on handle_primary_chan_wq or 272 * handle_sub_chan_wq. 273 */ 274 struct workqueue_struct *work_queue; 275 struct workqueue_struct *handle_primary_chan_wq; 276 struct workqueue_struct *handle_sub_chan_wq; 277 }; 278 279 280 struct vmbus_msginfo { 281 /* Bookkeeping stuff */ 282 struct list_head msglist_entry; 283 284 /* The message itself */ 285 unsigned char msg[0]; 286 }; 287 288 289 extern struct vmbus_connection vmbus_connection; 290 291 static inline void vmbus_send_interrupt(u32 relid) 292 { 293 sync_set_bit(relid, vmbus_connection.send_int_page); 294 } 295 296 enum vmbus_message_handler_type { 297 /* The related handler can sleep. */ 298 VMHT_BLOCKING = 0, 299 300 /* The related handler must NOT sleep. */ 301 VMHT_NON_BLOCKING = 1, 302 }; 303 304 struct vmbus_channel_message_table_entry { 305 enum vmbus_channel_message_type message_type; 306 enum vmbus_message_handler_type handler_type; 307 void (*message_handler)(struct vmbus_channel_message_header *msg); 308 }; 309 310 extern const struct vmbus_channel_message_table_entry 311 channel_message_table[CHANNELMSG_COUNT]; 312 313 314 /* General vmbus interface */ 315 316 struct hv_device *vmbus_device_create(const guid_t *type, 317 const guid_t *instance, 318 struct vmbus_channel *channel); 319 320 int vmbus_device_register(struct hv_device *child_device_obj); 321 void vmbus_device_unregister(struct hv_device *device_obj); 322 int vmbus_add_channel_kobj(struct hv_device *device_obj, 323 struct vmbus_channel *channel); 324 325 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel); 326 327 struct vmbus_channel *relid2channel(u32 relid); 328 329 void vmbus_free_channels(void); 330 331 /* Connection interface */ 332 333 int vmbus_connect(void); 334 void vmbus_disconnect(void); 335 336 int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep); 337 338 void vmbus_on_event(unsigned long data); 339 void vmbus_on_msg_dpc(unsigned long data); 340 341 int hv_kvp_init(struct hv_util_service *srv); 342 void hv_kvp_deinit(void); 343 void hv_kvp_onchannelcallback(void *context); 344 345 int hv_vss_init(struct hv_util_service *srv); 346 void hv_vss_deinit(void); 347 void hv_vss_onchannelcallback(void *context); 348 349 int hv_fcopy_init(struct hv_util_service *srv); 350 void hv_fcopy_deinit(void); 351 void hv_fcopy_onchannelcallback(void *context); 352 void vmbus_initiate_unload(bool crash); 353 354 static inline void hv_poll_channel(struct vmbus_channel *channel, 355 void (*cb)(void *)) 356 { 357 if (!channel) 358 return; 359 360 if (in_interrupt() && (channel->target_cpu == smp_processor_id())) { 361 cb(channel); 362 return; 363 } 364 smp_call_function_single(channel->target_cpu, cb, channel, true); 365 } 366 367 enum hvutil_device_state { 368 HVUTIL_DEVICE_INIT = 0, /* driver is loaded, waiting for userspace */ 369 HVUTIL_READY, /* userspace is registered */ 370 HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */ 371 HVUTIL_USERSPACE_REQ, /* request to userspace was sent */ 372 HVUTIL_USERSPACE_RECV, /* reply from userspace was received */ 373 HVUTIL_DEVICE_DYING, /* driver unload is in progress */ 374 }; 375 376 #endif /* _HYPERV_VMBUS_H */ 377