1685a6bf8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
220259849SGeorge Zhang /*
320259849SGeorge Zhang * VMware VMCI Driver
420259849SGeorge Zhang *
520259849SGeorge Zhang * Copyright (C) 2012 VMware, Inc. All rights reserved.
620259849SGeorge Zhang */
720259849SGeorge Zhang
820259849SGeorge Zhang #ifndef _VMW_VMCI_DEF_H_
920259849SGeorge Zhang #define _VMW_VMCI_DEF_H_
1020259849SGeorge Zhang
1120259849SGeorge Zhang #include <linux/atomic.h>
129a41691eSVishnu DASA #include <linux/bits.h>
1320259849SGeorge Zhang
1420259849SGeorge Zhang /* Register offsets. */
1520259849SGeorge Zhang #define VMCI_STATUS_ADDR 0x00
1620259849SGeorge Zhang #define VMCI_CONTROL_ADDR 0x04
1720259849SGeorge Zhang #define VMCI_ICR_ADDR 0x08
1820259849SGeorge Zhang #define VMCI_IMR_ADDR 0x0c
1920259849SGeorge Zhang #define VMCI_DATA_OUT_ADDR 0x10
2020259849SGeorge Zhang #define VMCI_DATA_IN_ADDR 0x14
2120259849SGeorge Zhang #define VMCI_CAPS_ADDR 0x18
2220259849SGeorge Zhang #define VMCI_RESULT_LOW_ADDR 0x1c
2320259849SGeorge Zhang #define VMCI_RESULT_HIGH_ADDR 0x20
245ee10982SJorgen Hansen #define VMCI_DATA_OUT_LOW_ADDR 0x24
255ee10982SJorgen Hansen #define VMCI_DATA_OUT_HIGH_ADDR 0x28
265ee10982SJorgen Hansen #define VMCI_DATA_IN_LOW_ADDR 0x2c
275ee10982SJorgen Hansen #define VMCI_DATA_IN_HIGH_ADDR 0x30
288cb520beSJorgen Hansen #define VMCI_GUEST_PAGE_SHIFT 0x34
2920259849SGeorge Zhang
3020259849SGeorge Zhang /* Max number of devices. */
3120259849SGeorge Zhang #define VMCI_MAX_DEVICES 1
3220259849SGeorge Zhang
3320259849SGeorge Zhang /* Status register bits. */
349a41691eSVishnu DASA #define VMCI_STATUS_INT_ON BIT(0)
3520259849SGeorge Zhang
3620259849SGeorge Zhang /* Control register bits. */
379a41691eSVishnu DASA #define VMCI_CONTROL_RESET BIT(0)
389a41691eSVishnu DASA #define VMCI_CONTROL_INT_ENABLE BIT(1)
399a41691eSVishnu DASA #define VMCI_CONTROL_INT_DISABLE BIT(2)
4020259849SGeorge Zhang
4120259849SGeorge Zhang /* Capabilities register bits. */
429a41691eSVishnu DASA #define VMCI_CAPS_HYPERCALL BIT(0)
439a41691eSVishnu DASA #define VMCI_CAPS_GUESTCALL BIT(1)
449a41691eSVishnu DASA #define VMCI_CAPS_DATAGRAM BIT(2)
459a41691eSVishnu DASA #define VMCI_CAPS_NOTIFICATIONS BIT(3)
469a41691eSVishnu DASA #define VMCI_CAPS_PPN64 BIT(4)
47eed2298dSJorgen Hansen #define VMCI_CAPS_DMA_DATAGRAM BIT(5)
4820259849SGeorge Zhang
4920259849SGeorge Zhang /* Interrupt Cause register bits. */
509a41691eSVishnu DASA #define VMCI_ICR_DATAGRAM BIT(0)
519a41691eSVishnu DASA #define VMCI_ICR_NOTIFICATION BIT(1)
52cc68f217SJorgen Hansen #define VMCI_ICR_DMA_DATAGRAM BIT(2)
5320259849SGeorge Zhang
5420259849SGeorge Zhang /* Interrupt Mask register bits. */
559a41691eSVishnu DASA #define VMCI_IMR_DATAGRAM BIT(0)
569a41691eSVishnu DASA #define VMCI_IMR_NOTIFICATION BIT(1)
57cc68f217SJorgen Hansen #define VMCI_IMR_DMA_DATAGRAM BIT(2)
5820259849SGeorge Zhang
59cc68f217SJorgen Hansen /*
60cc68f217SJorgen Hansen * Maximum MSI/MSI-X interrupt vectors in the device.
61cc68f217SJorgen Hansen * If VMCI_CAPS_DMA_DATAGRAM is supported by the device,
62cc68f217SJorgen Hansen * VMCI_MAX_INTRS_DMA_DATAGRAM vectors are available,
63cc68f217SJorgen Hansen * otherwise only VMCI_MAX_INTRS_NOTIFICATION.
64cc68f217SJorgen Hansen */
65cc68f217SJorgen Hansen #define VMCI_MAX_INTRS_NOTIFICATION 2
66cc68f217SJorgen Hansen #define VMCI_MAX_INTRS_DMA_DATAGRAM 3
67cc68f217SJorgen Hansen #define VMCI_MAX_INTRS VMCI_MAX_INTRS_DMA_DATAGRAM
6820259849SGeorge Zhang
6920259849SGeorge Zhang /*
7020259849SGeorge Zhang * Supported interrupt vectors. There is one for each ICR value above,
7120259849SGeorge Zhang * but here they indicate the position in the vector array/message ID.
7220259849SGeorge Zhang */
7320259849SGeorge Zhang enum {
7420259849SGeorge Zhang VMCI_INTR_DATAGRAM = 0,
7520259849SGeorge Zhang VMCI_INTR_NOTIFICATION = 1,
76cc68f217SJorgen Hansen VMCI_INTR_DMA_DATAGRAM = 2,
7720259849SGeorge Zhang };
7820259849SGeorge Zhang
7920259849SGeorge Zhang /*
8020259849SGeorge Zhang * A single VMCI device has an upper limit of 128MB on the amount of
811c2eb5b2SVishnu DASA * memory that can be used for queue pairs. Since each queue pair
821c2eb5b2SVishnu DASA * consists of at least two pages, the memory limit also dictates the
831c2eb5b2SVishnu DASA * number of queue pairs a guest can create.
8420259849SGeorge Zhang */
857eecea89SJorgen Hansen #define VMCI_MAX_GUEST_QP_MEMORY ((size_t)(128 * 1024 * 1024))
861c2eb5b2SVishnu DASA #define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
871c2eb5b2SVishnu DASA
881c2eb5b2SVishnu DASA /*
891c2eb5b2SVishnu DASA * There can be at most PAGE_SIZE doorbells since there is one doorbell
901c2eb5b2SVishnu DASA * per byte in the doorbell bitmap page.
911c2eb5b2SVishnu DASA */
921c2eb5b2SVishnu DASA #define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE
9320259849SGeorge Zhang
9420259849SGeorge Zhang /*
9520259849SGeorge Zhang * Queues with pre-mapped data pages must be small, so that we don't pin
9620259849SGeorge Zhang * too much kernel memory (especially on vmkernel). We limit a queuepair to
9720259849SGeorge Zhang * 32 KB, or 16 KB per queue for symmetrical pairs.
9820259849SGeorge Zhang */
997eecea89SJorgen Hansen #define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024))
10020259849SGeorge Zhang
10120259849SGeorge Zhang /*
102e283a0e8SJorgen Hansen * The version of the VMCI device that supports MMIO access to registers
103e283a0e8SJorgen Hansen * requests 256KB for BAR1 whereas the version of VMCI that supports
104e283a0e8SJorgen Hansen * MSI/MSI-X only requests 8KB. The layout of the larger 256KB region is:
105e283a0e8SJorgen Hansen * - the first 128KB are used for MSI/MSI-X.
106e283a0e8SJorgen Hansen * - the following 64KB are used for MMIO register access.
107e283a0e8SJorgen Hansen * - the remaining 64KB are unused.
108e283a0e8SJorgen Hansen */
109e283a0e8SJorgen Hansen #define VMCI_WITH_MMIO_ACCESS_BAR_SIZE ((size_t)(256 * 1024))
110e283a0e8SJorgen Hansen #define VMCI_MMIO_ACCESS_OFFSET ((size_t)(128 * 1024))
111e283a0e8SJorgen Hansen #define VMCI_MMIO_ACCESS_SIZE ((size_t)(64 * 1024))
112e283a0e8SJorgen Hansen
113e283a0e8SJorgen Hansen /*
114*22aa5c7fSJorgen Hansen * For VMCI devices supporting the VMCI_CAPS_DMA_DATAGRAM capability, the
115*22aa5c7fSJorgen Hansen * sending and receiving of datagrams can be performed using DMA to/from
116*22aa5c7fSJorgen Hansen * a driver allocated buffer.
117*22aa5c7fSJorgen Hansen * Sending and receiving will be handled as follows:
118*22aa5c7fSJorgen Hansen * - when sending datagrams, the driver initializes the buffer where the
119*22aa5c7fSJorgen Hansen * data part will refer to the outgoing VMCI datagram, sets the busy flag
120*22aa5c7fSJorgen Hansen * to 1 and writes the address of the buffer to VMCI_DATA_OUT_HIGH_ADDR
121*22aa5c7fSJorgen Hansen * and VMCI_DATA_OUT_LOW_ADDR. Writing to VMCI_DATA_OUT_LOW_ADDR triggers
122*22aa5c7fSJorgen Hansen * the device processing of the buffer. When the device has processed the
123*22aa5c7fSJorgen Hansen * buffer, it will write the result value to the buffer and then clear the
124*22aa5c7fSJorgen Hansen * busy flag.
125*22aa5c7fSJorgen Hansen * - when receiving datagrams, the driver initializes the buffer where the
126*22aa5c7fSJorgen Hansen * data part will describe the receive buffer, clears the busy flag and
127*22aa5c7fSJorgen Hansen * writes the address of the buffer to VMCI_DATA_IN_HIGH_ADDR and
128*22aa5c7fSJorgen Hansen * VMCI_DATA_IN_LOW_ADDR. Writing to VMCI_DATA_IN_LOW_ADDR triggers the
129*22aa5c7fSJorgen Hansen * device processing of the buffer. The device will copy as many available
130*22aa5c7fSJorgen Hansen * datagrams into the buffer as possible, and then sets the busy flag.
131*22aa5c7fSJorgen Hansen * When the busy flag is set, the driver will process the datagrams in the
132*22aa5c7fSJorgen Hansen * buffer.
133*22aa5c7fSJorgen Hansen */
134*22aa5c7fSJorgen Hansen struct vmci_data_in_out_header {
135*22aa5c7fSJorgen Hansen uint32_t busy;
136*22aa5c7fSJorgen Hansen uint32_t opcode;
137*22aa5c7fSJorgen Hansen uint32_t size;
138*22aa5c7fSJorgen Hansen uint32_t rsvd;
139*22aa5c7fSJorgen Hansen uint64_t result;
140*22aa5c7fSJorgen Hansen };
141*22aa5c7fSJorgen Hansen
142*22aa5c7fSJorgen Hansen struct vmci_sg_elem {
143*22aa5c7fSJorgen Hansen uint64_t addr;
144*22aa5c7fSJorgen Hansen uint64_t size;
145*22aa5c7fSJorgen Hansen };
146*22aa5c7fSJorgen Hansen
147*22aa5c7fSJorgen Hansen /*
14820259849SGeorge Zhang * We have a fixed set of resource IDs available in the VMX.
14920259849SGeorge Zhang * This allows us to have a very simple implementation since we statically
15020259849SGeorge Zhang * know how many will create datagram handles. If a new caller arrives and
15120259849SGeorge Zhang * we have run out of slots we can manually increment the maximum size of
15220259849SGeorge Zhang * available resource IDs.
15320259849SGeorge Zhang *
15420259849SGeorge Zhang * VMCI reserved hypervisor datagram resource IDs.
15520259849SGeorge Zhang */
15620259849SGeorge Zhang enum {
15720259849SGeorge Zhang VMCI_RESOURCES_QUERY = 0,
15820259849SGeorge Zhang VMCI_GET_CONTEXT_ID = 1,
15920259849SGeorge Zhang VMCI_SET_NOTIFY_BITMAP = 2,
16020259849SGeorge Zhang VMCI_DOORBELL_LINK = 3,
16120259849SGeorge Zhang VMCI_DOORBELL_UNLINK = 4,
16220259849SGeorge Zhang VMCI_DOORBELL_NOTIFY = 5,
16320259849SGeorge Zhang /*
16420259849SGeorge Zhang * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
16520259849SGeorge Zhang * obsoleted by the removal of VM to VM communication.
16620259849SGeorge Zhang */
16720259849SGeorge Zhang VMCI_DATAGRAM_REQUEST_MAP = 6,
16820259849SGeorge Zhang VMCI_DATAGRAM_REMOVE_MAP = 7,
16920259849SGeorge Zhang VMCI_EVENT_SUBSCRIBE = 8,
17020259849SGeorge Zhang VMCI_EVENT_UNSUBSCRIBE = 9,
17120259849SGeorge Zhang VMCI_QUEUEPAIR_ALLOC = 10,
17220259849SGeorge Zhang VMCI_QUEUEPAIR_DETACH = 11,
17320259849SGeorge Zhang
17420259849SGeorge Zhang /*
17520259849SGeorge Zhang * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
17620259849SGeorge Zhang * WS 7.0/7.1 and ESX 4.1
17720259849SGeorge Zhang */
17820259849SGeorge Zhang VMCI_HGFS_TRANSPORT = 13,
17920259849SGeorge Zhang VMCI_UNITY_PBRPC_REGISTER = 14,
18020259849SGeorge Zhang VMCI_RPC_PRIVILEGED = 15,
18120259849SGeorge Zhang VMCI_RPC_UNPRIVILEGED = 16,
18220259849SGeorge Zhang VMCI_RESOURCE_MAX = 17,
18320259849SGeorge Zhang };
18420259849SGeorge Zhang
18520259849SGeorge Zhang /*
18620259849SGeorge Zhang * struct vmci_handle - Ownership information structure
18720259849SGeorge Zhang * @context: The VMX context ID.
18820259849SGeorge Zhang * @resource: The resource ID (used for locating in resource hash).
18920259849SGeorge Zhang *
19020259849SGeorge Zhang * The vmci_handle structure is used to track resources used within
19120259849SGeorge Zhang * vmw_vmci.
19220259849SGeorge Zhang */
19320259849SGeorge Zhang struct vmci_handle {
19420259849SGeorge Zhang u32 context;
19520259849SGeorge Zhang u32 resource;
19620259849SGeorge Zhang };
19720259849SGeorge Zhang
19820259849SGeorge Zhang #define vmci_make_handle(_cid, _rid) \
19920259849SGeorge Zhang (struct vmci_handle){ .context = _cid, .resource = _rid }
20020259849SGeorge Zhang
vmci_handle_is_equal(struct vmci_handle h1,struct vmci_handle h2)20120259849SGeorge Zhang static inline bool vmci_handle_is_equal(struct vmci_handle h1,
20220259849SGeorge Zhang struct vmci_handle h2)
20320259849SGeorge Zhang {
20420259849SGeorge Zhang return h1.context == h2.context && h1.resource == h2.resource;
20520259849SGeorge Zhang }
20620259849SGeorge Zhang
20720259849SGeorge Zhang #define VMCI_INVALID_ID ~0
20820259849SGeorge Zhang static const struct vmci_handle VMCI_INVALID_HANDLE = {
20920259849SGeorge Zhang .context = VMCI_INVALID_ID,
21020259849SGeorge Zhang .resource = VMCI_INVALID_ID
21120259849SGeorge Zhang };
21220259849SGeorge Zhang
vmci_handle_is_invalid(struct vmci_handle h)21320259849SGeorge Zhang static inline bool vmci_handle_is_invalid(struct vmci_handle h)
21420259849SGeorge Zhang {
21520259849SGeorge Zhang return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
21620259849SGeorge Zhang }
21720259849SGeorge Zhang
21820259849SGeorge Zhang /*
21920259849SGeorge Zhang * The below defines can be used to send anonymous requests.
22020259849SGeorge Zhang * This also indicates that no response is expected.
22120259849SGeorge Zhang */
22220259849SGeorge Zhang #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
22320259849SGeorge Zhang #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
224a9f91cebSLee Jones static const struct vmci_handle __maybe_unused VMCI_ANON_SRC_HANDLE = {
22520259849SGeorge Zhang .context = VMCI_ANON_SRC_CONTEXT_ID,
22620259849SGeorge Zhang .resource = VMCI_ANON_SRC_RESOURCE_ID
22720259849SGeorge Zhang };
22820259849SGeorge Zhang
22920259849SGeorge Zhang /* The lowest 16 context ids are reserved for internal use. */
23020259849SGeorge Zhang #define VMCI_RESERVED_CID_LIMIT ((u32) 16)
23120259849SGeorge Zhang
23220259849SGeorge Zhang /*
23320259849SGeorge Zhang * Hypervisor context id, used for calling into hypervisor
23420259849SGeorge Zhang * supplied services from the VM.
23520259849SGeorge Zhang */
23620259849SGeorge Zhang #define VMCI_HYPERVISOR_CONTEXT_ID 0
23720259849SGeorge Zhang
23820259849SGeorge Zhang /*
23920259849SGeorge Zhang * Well-known context id, a logical context that contains a set of
24020259849SGeorge Zhang * well-known services. This context ID is now obsolete.
24120259849SGeorge Zhang */
24220259849SGeorge Zhang #define VMCI_WELL_KNOWN_CONTEXT_ID 1
24320259849SGeorge Zhang
24420259849SGeorge Zhang /*
24520259849SGeorge Zhang * Context ID used by host endpoints.
24620259849SGeorge Zhang */
24720259849SGeorge Zhang #define VMCI_HOST_CONTEXT_ID 2
24820259849SGeorge Zhang
24920259849SGeorge Zhang #define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) && \
25020259849SGeorge Zhang (_cid) > VMCI_HOST_CONTEXT_ID)
25120259849SGeorge Zhang
25220259849SGeorge Zhang /*
25320259849SGeorge Zhang * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
25420259849SGeorge Zhang * handles that refer to a specific context.
25520259849SGeorge Zhang */
25620259849SGeorge Zhang #define VMCI_CONTEXT_RESOURCE_ID 0
25720259849SGeorge Zhang
25820259849SGeorge Zhang /*
25920259849SGeorge Zhang * VMCI error codes.
26020259849SGeorge Zhang */
26120259849SGeorge Zhang enum {
26220259849SGeorge Zhang VMCI_SUCCESS_QUEUEPAIR_ATTACH = 5,
26320259849SGeorge Zhang VMCI_SUCCESS_QUEUEPAIR_CREATE = 4,
26420259849SGeorge Zhang VMCI_SUCCESS_LAST_DETACH = 3,
26520259849SGeorge Zhang VMCI_SUCCESS_ACCESS_GRANTED = 2,
26620259849SGeorge Zhang VMCI_SUCCESS_ENTRY_DEAD = 1,
26720259849SGeorge Zhang VMCI_SUCCESS = 0,
26820259849SGeorge Zhang VMCI_ERROR_INVALID_RESOURCE = (-1),
26920259849SGeorge Zhang VMCI_ERROR_INVALID_ARGS = (-2),
27020259849SGeorge Zhang VMCI_ERROR_NO_MEM = (-3),
27120259849SGeorge Zhang VMCI_ERROR_DATAGRAM_FAILED = (-4),
27220259849SGeorge Zhang VMCI_ERROR_MORE_DATA = (-5),
27320259849SGeorge Zhang VMCI_ERROR_NO_MORE_DATAGRAMS = (-6),
27420259849SGeorge Zhang VMCI_ERROR_NO_ACCESS = (-7),
27520259849SGeorge Zhang VMCI_ERROR_NO_HANDLE = (-8),
27620259849SGeorge Zhang VMCI_ERROR_DUPLICATE_ENTRY = (-9),
27720259849SGeorge Zhang VMCI_ERROR_DST_UNREACHABLE = (-10),
27820259849SGeorge Zhang VMCI_ERROR_PAYLOAD_TOO_LARGE = (-11),
27920259849SGeorge Zhang VMCI_ERROR_INVALID_PRIV = (-12),
28020259849SGeorge Zhang VMCI_ERROR_GENERIC = (-13),
28120259849SGeorge Zhang VMCI_ERROR_PAGE_ALREADY_SHARED = (-14),
28220259849SGeorge Zhang VMCI_ERROR_CANNOT_SHARE_PAGE = (-15),
28320259849SGeorge Zhang VMCI_ERROR_CANNOT_UNSHARE_PAGE = (-16),
28420259849SGeorge Zhang VMCI_ERROR_NO_PROCESS = (-17),
28520259849SGeorge Zhang VMCI_ERROR_NO_DATAGRAM = (-18),
28620259849SGeorge Zhang VMCI_ERROR_NO_RESOURCES = (-19),
28720259849SGeorge Zhang VMCI_ERROR_UNAVAILABLE = (-20),
28820259849SGeorge Zhang VMCI_ERROR_NOT_FOUND = (-21),
28920259849SGeorge Zhang VMCI_ERROR_ALREADY_EXISTS = (-22),
29020259849SGeorge Zhang VMCI_ERROR_NOT_PAGE_ALIGNED = (-23),
29120259849SGeorge Zhang VMCI_ERROR_INVALID_SIZE = (-24),
29220259849SGeorge Zhang VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
29320259849SGeorge Zhang VMCI_ERROR_TIMEOUT = (-26),
29420259849SGeorge Zhang VMCI_ERROR_DATAGRAM_INCOMPLETE = (-27),
29520259849SGeorge Zhang VMCI_ERROR_INCORRECT_IRQL = (-28),
29620259849SGeorge Zhang VMCI_ERROR_EVENT_UNKNOWN = (-29),
29720259849SGeorge Zhang VMCI_ERROR_OBSOLETE = (-30),
29820259849SGeorge Zhang VMCI_ERROR_QUEUEPAIR_MISMATCH = (-31),
29920259849SGeorge Zhang VMCI_ERROR_QUEUEPAIR_NOTSET = (-32),
30020259849SGeorge Zhang VMCI_ERROR_QUEUEPAIR_NOTOWNER = (-33),
30120259849SGeorge Zhang VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
30220259849SGeorge Zhang VMCI_ERROR_QUEUEPAIR_NOSPACE = (-35),
30320259849SGeorge Zhang VMCI_ERROR_QUEUEPAIR_NODATA = (-36),
30420259849SGeorge Zhang VMCI_ERROR_BUSMEM_INVALIDATION = (-37),
30520259849SGeorge Zhang VMCI_ERROR_MODULE_NOT_LOADED = (-38),
30620259849SGeorge Zhang VMCI_ERROR_DEVICE_NOT_FOUND = (-39),
30720259849SGeorge Zhang VMCI_ERROR_QUEUEPAIR_NOT_READY = (-40),
30820259849SGeorge Zhang VMCI_ERROR_WOULD_BLOCK = (-41),
30920259849SGeorge Zhang
31020259849SGeorge Zhang /* VMCI clients should return error code within this range */
31120259849SGeorge Zhang VMCI_ERROR_CLIENT_MIN = (-500),
31220259849SGeorge Zhang VMCI_ERROR_CLIENT_MAX = (-550),
31320259849SGeorge Zhang
31420259849SGeorge Zhang /* Internal error codes. */
31520259849SGeorge Zhang VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
31620259849SGeorge Zhang };
31720259849SGeorge Zhang
31820259849SGeorge Zhang /* VMCI reserved events. */
31920259849SGeorge Zhang enum {
32020259849SGeorge Zhang /* Only applicable to guest endpoints */
32120259849SGeorge Zhang VMCI_EVENT_CTX_ID_UPDATE = 0,
32220259849SGeorge Zhang
32320259849SGeorge Zhang /* Applicable to guest and host */
32420259849SGeorge Zhang VMCI_EVENT_CTX_REMOVED = 1,
32520259849SGeorge Zhang
32620259849SGeorge Zhang /* Only applicable to guest endpoints */
32720259849SGeorge Zhang VMCI_EVENT_QP_RESUMED = 2,
32820259849SGeorge Zhang
32920259849SGeorge Zhang /* Applicable to guest and host */
33020259849SGeorge Zhang VMCI_EVENT_QP_PEER_ATTACH = 3,
33120259849SGeorge Zhang
33220259849SGeorge Zhang /* Applicable to guest and host */
33320259849SGeorge Zhang VMCI_EVENT_QP_PEER_DETACH = 4,
33420259849SGeorge Zhang
33520259849SGeorge Zhang /*
33620259849SGeorge Zhang * Applicable to VMX and vmk. On vmk,
33720259849SGeorge Zhang * this event has the Context payload type.
33820259849SGeorge Zhang */
33920259849SGeorge Zhang VMCI_EVENT_MEM_ACCESS_ON = 5,
34020259849SGeorge Zhang
34120259849SGeorge Zhang /*
34220259849SGeorge Zhang * Applicable to VMX and vmk. Same as
34320259849SGeorge Zhang * above for the payload type.
34420259849SGeorge Zhang */
34520259849SGeorge Zhang VMCI_EVENT_MEM_ACCESS_OFF = 6,
34620259849SGeorge Zhang VMCI_EVENT_MAX = 7,
34720259849SGeorge Zhang };
34820259849SGeorge Zhang
34920259849SGeorge Zhang /*
35020259849SGeorge Zhang * Of the above events, a few are reserved for use in the VMX, and
35120259849SGeorge Zhang * other endpoints (guest and host kernel) should not use them. For
35220259849SGeorge Zhang * the rest of the events, we allow both host and guest endpoints to
35320259849SGeorge Zhang * subscribe to them, to maintain the same API for host and guest
35420259849SGeorge Zhang * endpoints.
35520259849SGeorge Zhang */
35620259849SGeorge Zhang #define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
35720259849SGeorge Zhang (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
35820259849SGeorge Zhang
35920259849SGeorge Zhang #define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX && \
36020259849SGeorge Zhang !VMCI_EVENT_VALID_VMX(_event))
36120259849SGeorge Zhang
36220259849SGeorge Zhang /* Reserved guest datagram resource ids. */
36320259849SGeorge Zhang #define VMCI_EVENT_HANDLER 0
36420259849SGeorge Zhang
36520259849SGeorge Zhang /*
36620259849SGeorge Zhang * VMCI coarse-grained privileges (per context or host
36720259849SGeorge Zhang * process/endpoint. An entity with the restricted flag is only
36820259849SGeorge Zhang * allowed to interact with the hypervisor and trusted entities.
36920259849SGeorge Zhang */
37020259849SGeorge Zhang enum {
37120259849SGeorge Zhang VMCI_NO_PRIVILEGE_FLAGS = 0,
37220259849SGeorge Zhang VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
37320259849SGeorge Zhang VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
37420259849SGeorge Zhang VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
37520259849SGeorge Zhang VMCI_PRIVILEGE_FLAG_TRUSTED),
37620259849SGeorge Zhang VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
37720259849SGeorge Zhang VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
37820259849SGeorge Zhang VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
37920259849SGeorge Zhang };
38020259849SGeorge Zhang
38120259849SGeorge Zhang /* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
38220259849SGeorge Zhang #define VMCI_RESERVED_RESOURCE_ID_MAX 1023
38320259849SGeorge Zhang
38420259849SGeorge Zhang /*
38520259849SGeorge Zhang * Driver version.
38620259849SGeorge Zhang *
38720259849SGeorge Zhang * Increment major version when you make an incompatible change.
38820259849SGeorge Zhang * Compatibility goes both ways (old driver with new executable
38920259849SGeorge Zhang * as well as new driver with old executable).
39020259849SGeorge Zhang */
39120259849SGeorge Zhang
39220259849SGeorge Zhang /* Never change VMCI_VERSION_SHIFT_WIDTH */
39320259849SGeorge Zhang #define VMCI_VERSION_SHIFT_WIDTH 16
39420259849SGeorge Zhang #define VMCI_MAKE_VERSION(_major, _minor) \
39520259849SGeorge Zhang ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
39620259849SGeorge Zhang
39720259849SGeorge Zhang #define VMCI_VERSION_MAJOR(v) ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
39820259849SGeorge Zhang #define VMCI_VERSION_MINOR(v) ((u16) (v))
39920259849SGeorge Zhang
40020259849SGeorge Zhang /*
40120259849SGeorge Zhang * VMCI_VERSION is always the current version. Subsequently listed
40220259849SGeorge Zhang * versions are ways of detecting previous versions of the connecting
40320259849SGeorge Zhang * application (i.e., VMX).
40420259849SGeorge Zhang *
40520259849SGeorge Zhang * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
40620259849SGeorge Zhang * communication.
40720259849SGeorge Zhang *
40820259849SGeorge Zhang * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
40920259849SGeorge Zhang * support.
41020259849SGeorge Zhang *
41120259849SGeorge Zhang * VMCI_VERSION_HOSTQP: This version introduced host end point support
41220259849SGeorge Zhang * for hosted products.
41320259849SGeorge Zhang *
41420259849SGeorge Zhang * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
41520259849SGeorge Zhang * support for host end-points.
41620259849SGeorge Zhang *
41720259849SGeorge Zhang * VMCI_VERSION_PREVERS2: This fictional version number is intended to
41820259849SGeorge Zhang * represent the version of a VMX which doesn't call into the driver
41920259849SGeorge Zhang * with ioctl VERSION2 and thus doesn't establish its version with the
42020259849SGeorge Zhang * driver.
42120259849SGeorge Zhang */
42220259849SGeorge Zhang
42320259849SGeorge Zhang #define VMCI_VERSION VMCI_VERSION_NOVMVM
42420259849SGeorge Zhang #define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0)
42520259849SGeorge Zhang #define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0)
42620259849SGeorge Zhang #define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0)
42720259849SGeorge Zhang #define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0)
42820259849SGeorge Zhang #define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0)
42920259849SGeorge Zhang
43020259849SGeorge Zhang #define VMCI_SOCKETS_MAKE_VERSION(_p) \
43120259849SGeorge Zhang ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
43220259849SGeorge Zhang
43320259849SGeorge Zhang /*
43420259849SGeorge Zhang * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and
43520259849SGeorge Zhang * we start at sequence 9f. This gives us the same values that our shipping
43620259849SGeorge Zhang * products use, starting at 1951, provided we leave out the direction and
43720259849SGeorge Zhang * structure size. Note that VMMon occupies the block following us, starting
43820259849SGeorge Zhang * at 2001.
43920259849SGeorge Zhang */
44020259849SGeorge Zhang #define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */
44120259849SGeorge Zhang #define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
44220259849SGeorge Zhang #define IOCTL_VMCI_QUEUEPAIR_SETVA _IO(7, 0xa4)
44320259849SGeorge Zhang #define IOCTL_VMCI_NOTIFY_RESOURCE _IO(7, 0xa5)
44420259849SGeorge Zhang #define IOCTL_VMCI_NOTIFICATIONS_RECEIVE _IO(7, 0xa6)
44520259849SGeorge Zhang #define IOCTL_VMCI_VERSION2 _IO(7, 0xa7)
44620259849SGeorge Zhang #define IOCTL_VMCI_QUEUEPAIR_ALLOC _IO(7, 0xa8)
44720259849SGeorge Zhang #define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE _IO(7, 0xa9)
44820259849SGeorge Zhang #define IOCTL_VMCI_QUEUEPAIR_DETACH _IO(7, 0xaa)
44920259849SGeorge Zhang #define IOCTL_VMCI_DATAGRAM_SEND _IO(7, 0xab)
45020259849SGeorge Zhang #define IOCTL_VMCI_DATAGRAM_RECEIVE _IO(7, 0xac)
45120259849SGeorge Zhang #define IOCTL_VMCI_CTX_ADD_NOTIFICATION _IO(7, 0xaf)
45220259849SGeorge Zhang #define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION _IO(7, 0xb0)
45320259849SGeorge Zhang #define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
45420259849SGeorge Zhang #define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
45520259849SGeorge Zhang #define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
45620259849SGeorge Zhang #define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
45720259849SGeorge Zhang #define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
45820259849SGeorge Zhang #define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
45920259849SGeorge Zhang #define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */
46020259849SGeorge Zhang /*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */
46120259849SGeorge Zhang
46220259849SGeorge Zhang /*
46320259849SGeorge Zhang * struct vmci_queue_header - VMCI Queue Header information.
46420259849SGeorge Zhang *
46520259849SGeorge Zhang * A Queue cannot stand by itself as designed. Each Queue's header
46620259849SGeorge Zhang * contains a pointer into itself (the producer_tail) and into its peer
46720259849SGeorge Zhang * (consumer_head). The reason for the separation is one of
46820259849SGeorge Zhang * accessibility: Each end-point can modify two things: where the next
46920259849SGeorge Zhang * location to enqueue is within its produce_q (producer_tail); and
47020259849SGeorge Zhang * where the next dequeue location is in its consume_q (consumer_head).
47120259849SGeorge Zhang *
47220259849SGeorge Zhang * An end-point cannot modify the pointers of its peer (guest to
47320259849SGeorge Zhang * guest; NOTE that in the host both queue headers are mapped r/w).
47420259849SGeorge Zhang * But, each end-point needs read access to both Queue header
47520259849SGeorge Zhang * structures in order to determine how much space is used (or left)
47620259849SGeorge Zhang * in the Queue. This is because for an end-point to know how full
47720259849SGeorge Zhang * its produce_q is, it needs to use the consumer_head that points into
47820259849SGeorge Zhang * the produce_q but -that- consumer_head is in the Queue header for
47920259849SGeorge Zhang * that end-points consume_q.
48020259849SGeorge Zhang *
48120259849SGeorge Zhang * Thoroughly confused? Sorry.
48220259849SGeorge Zhang *
48320259849SGeorge Zhang * producer_tail: the point to enqueue new entrants. When you approach
48420259849SGeorge Zhang * a line in a store, for example, you walk up to the tail.
48520259849SGeorge Zhang *
48620259849SGeorge Zhang * consumer_head: the point in the queue from which the next element is
48720259849SGeorge Zhang * dequeued. In other words, who is next in line is he who is at the
48820259849SGeorge Zhang * head of the line.
48920259849SGeorge Zhang *
49020259849SGeorge Zhang * Also, producer_tail points to an empty byte in the Queue, whereas
49120259849SGeorge Zhang * consumer_head points to a valid byte of data (unless producer_tail ==
49220259849SGeorge Zhang * consumer_head in which case consumer_head does not point to a valid
49320259849SGeorge Zhang * byte of data).
49420259849SGeorge Zhang *
49520259849SGeorge Zhang * For a queue of buffer 'size' bytes, the tail and head pointers will be in
49620259849SGeorge Zhang * the range [0, size-1].
49720259849SGeorge Zhang *
49820259849SGeorge Zhang * If produce_q_header->producer_tail == consume_q_header->consumer_head
49920259849SGeorge Zhang * then the produce_q is empty.
50020259849SGeorge Zhang */
50120259849SGeorge Zhang struct vmci_queue_header {
50220259849SGeorge Zhang /* All fields are 64bit and aligned. */
50320259849SGeorge Zhang struct vmci_handle handle; /* Identifier. */
5049c3cef54SPeter Zijlstra u64 producer_tail; /* Offset in this queue. */
5059c3cef54SPeter Zijlstra u64 consumer_head; /* Offset in peer queue. */
50620259849SGeorge Zhang };
50720259849SGeorge Zhang
50820259849SGeorge Zhang /*
50920259849SGeorge Zhang * struct vmci_datagram - Base struct for vmci datagrams.
51020259849SGeorge Zhang * @dst: A vmci_handle that tracks the destination of the datagram.
51120259849SGeorge Zhang * @src: A vmci_handle that tracks the source of the datagram.
51220259849SGeorge Zhang * @payload_size: The size of the payload.
51320259849SGeorge Zhang *
51420259849SGeorge Zhang * vmci_datagram structs are used when sending vmci datagrams. They include
51520259849SGeorge Zhang * the necessary source and destination information to properly route
51620259849SGeorge Zhang * the information along with the size of the package.
51720259849SGeorge Zhang */
51820259849SGeorge Zhang struct vmci_datagram {
51920259849SGeorge Zhang struct vmci_handle dst;
52020259849SGeorge Zhang struct vmci_handle src;
52120259849SGeorge Zhang u64 payload_size;
52220259849SGeorge Zhang };
52320259849SGeorge Zhang
52420259849SGeorge Zhang /*
52520259849SGeorge Zhang * Second flag is for creating a well-known handle instead of a per context
52620259849SGeorge Zhang * handle. Next flag is for deferring datagram delivery, so that the
52720259849SGeorge Zhang * datagram callback is invoked in a delayed context (not interrupt context).
52820259849SGeorge Zhang */
52920259849SGeorge Zhang #define VMCI_FLAG_DG_NONE 0
5309a41691eSVishnu DASA #define VMCI_FLAG_WELLKNOWN_DG_HND BIT(0)
5319a41691eSVishnu DASA #define VMCI_FLAG_ANYCID_DG_HND BIT(1)
5329a41691eSVishnu DASA #define VMCI_FLAG_DG_DELAYED_CB BIT(2)
53320259849SGeorge Zhang
53420259849SGeorge Zhang /*
53520259849SGeorge Zhang * Maximum supported size of a VMCI datagram for routable datagrams.
53620259849SGeorge Zhang * Datagrams going to the hypervisor are allowed to be larger.
53720259849SGeorge Zhang */
53820259849SGeorge Zhang #define VMCI_MAX_DG_SIZE (17 * 4096)
53920259849SGeorge Zhang #define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
54020259849SGeorge Zhang sizeof(struct vmci_datagram))
54120259849SGeorge Zhang #define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + \
54220259849SGeorge Zhang sizeof(struct vmci_datagram))
54320259849SGeorge Zhang #define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
54420259849SGeorge Zhang #define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
54520259849SGeorge Zhang #define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
54620259849SGeorge Zhang #define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
54720259849SGeorge Zhang
54820259849SGeorge Zhang struct vmci_event_payload_qp {
54920259849SGeorge Zhang struct vmci_handle handle; /* queue_pair handle. */
55020259849SGeorge Zhang u32 peer_id; /* Context id of attaching/detaching VM. */
55120259849SGeorge Zhang u32 _pad;
55220259849SGeorge Zhang };
55320259849SGeorge Zhang
55420259849SGeorge Zhang /* Flags for VMCI queue_pair API. */
55520259849SGeorge Zhang enum {
55620259849SGeorge Zhang /* Fail alloc if QP not created by peer. */
55720259849SGeorge Zhang VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
55820259849SGeorge Zhang
55920259849SGeorge Zhang /* Only allow attaches from local context. */
56020259849SGeorge Zhang VMCI_QPFLAG_LOCAL = 1 << 1,
56120259849SGeorge Zhang
56220259849SGeorge Zhang /* Host won't block when guest is quiesced. */
56320259849SGeorge Zhang VMCI_QPFLAG_NONBLOCK = 1 << 2,
56420259849SGeorge Zhang
56520259849SGeorge Zhang /* Pin data pages in ESX. Used with NONBLOCK */
56620259849SGeorge Zhang VMCI_QPFLAG_PINNED = 1 << 3,
56720259849SGeorge Zhang
56820259849SGeorge Zhang /* Update the following flag when adding new flags. */
56920259849SGeorge Zhang VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
57020259849SGeorge Zhang VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
57120259849SGeorge Zhang
57220259849SGeorge Zhang /* Convenience flags */
57320259849SGeorge Zhang VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
57420259849SGeorge Zhang VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
57520259849SGeorge Zhang };
57620259849SGeorge Zhang
57720259849SGeorge Zhang /*
57820259849SGeorge Zhang * We allow at least 1024 more event datagrams from the hypervisor past the
57920259849SGeorge Zhang * normally allowed datagrams pending for a given context. We define this
58020259849SGeorge Zhang * limit on event datagrams from the hypervisor to guard against DoS attack
58120259849SGeorge Zhang * from a malicious VM which could repeatedly attach to and detach from a queue
58220259849SGeorge Zhang * pair, causing events to be queued at the destination VM. However, the rate
58320259849SGeorge Zhang * at which such events can be generated is small since it requires a VM exit
58420259849SGeorge Zhang * and handling of queue pair attach/detach call at the hypervisor. Event
58520259849SGeorge Zhang * datagrams may be queued up at the destination VM if it has interrupts
58620259849SGeorge Zhang * disabled or if it is not draining events for some other reason. 1024
58720259849SGeorge Zhang * datagrams is a grossly conservative estimate of the time for which
58820259849SGeorge Zhang * interrupts may be disabled in the destination VM, but at the same time does
58920259849SGeorge Zhang * not exacerbate the memory pressure problem on the host by much (size of each
59020259849SGeorge Zhang * event datagram is small).
59120259849SGeorge Zhang */
59220259849SGeorge Zhang #define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
59320259849SGeorge Zhang (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
59420259849SGeorge Zhang 1024 * (sizeof(struct vmci_datagram) + \
59520259849SGeorge Zhang sizeof(struct vmci_event_data_max)))
59620259849SGeorge Zhang
59720259849SGeorge Zhang /*
59820259849SGeorge Zhang * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
59920259849SGeorge Zhang * hypervisor resources. Struct size is 16 bytes. All fields in struct are
60020259849SGeorge Zhang * aligned to their natural alignment.
60120259849SGeorge Zhang */
60220259849SGeorge Zhang struct vmci_resource_query_hdr {
60320259849SGeorge Zhang struct vmci_datagram hdr;
60420259849SGeorge Zhang u32 num_resources;
60520259849SGeorge Zhang u32 _padding;
60620259849SGeorge Zhang };
60720259849SGeorge Zhang
60820259849SGeorge Zhang /*
60920259849SGeorge Zhang * Convenience struct for negotiating vectors. Must match layout of
61020259849SGeorge Zhang * VMCIResourceQueryHdr minus the struct vmci_datagram header.
61120259849SGeorge Zhang */
61220259849SGeorge Zhang struct vmci_resource_query_msg {
61320259849SGeorge Zhang u32 num_resources;
61420259849SGeorge Zhang u32 _padding;
61520259849SGeorge Zhang u32 resources[1];
61620259849SGeorge Zhang };
61720259849SGeorge Zhang
61820259849SGeorge Zhang /*
61920259849SGeorge Zhang * The maximum number of resources that can be queried using
62020259849SGeorge Zhang * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
62120259849SGeorge Zhang * bits of a positive return value. Negative values are reserved for
62220259849SGeorge Zhang * errors.
62320259849SGeorge Zhang */
62420259849SGeorge Zhang #define VMCI_RESOURCE_QUERY_MAX_NUM 31
62520259849SGeorge Zhang
62620259849SGeorge Zhang /* Maximum size for the VMCI_RESOURCE_QUERY request. */
62720259849SGeorge Zhang #define VMCI_RESOURCE_QUERY_MAX_SIZE \
62820259849SGeorge Zhang (sizeof(struct vmci_resource_query_hdr) + \
62920259849SGeorge Zhang sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
63020259849SGeorge Zhang
63120259849SGeorge Zhang /*
63220259849SGeorge Zhang * Struct used for setting the notification bitmap. All fields in
63320259849SGeorge Zhang * struct are aligned to their natural alignment.
63420259849SGeorge Zhang */
63520259849SGeorge Zhang struct vmci_notify_bm_set_msg {
63620259849SGeorge Zhang struct vmci_datagram hdr;
637f2db7361SVishnu DASA union {
638f2db7361SVishnu DASA u32 bitmap_ppn32;
639f2db7361SVishnu DASA u64 bitmap_ppn64;
640f2db7361SVishnu DASA };
64120259849SGeorge Zhang };
64220259849SGeorge Zhang
64320259849SGeorge Zhang /*
64420259849SGeorge Zhang * Struct used for linking a doorbell handle with an index in the
64520259849SGeorge Zhang * notify bitmap. All fields in struct are aligned to their natural
64620259849SGeorge Zhang * alignment.
64720259849SGeorge Zhang */
64820259849SGeorge Zhang struct vmci_doorbell_link_msg {
64920259849SGeorge Zhang struct vmci_datagram hdr;
65020259849SGeorge Zhang struct vmci_handle handle;
65120259849SGeorge Zhang u64 notify_idx;
65220259849SGeorge Zhang };
65320259849SGeorge Zhang
65420259849SGeorge Zhang /*
65520259849SGeorge Zhang * Struct used for unlinking a doorbell handle from an index in the
65620259849SGeorge Zhang * notify bitmap. All fields in struct are aligned to their natural
65720259849SGeorge Zhang * alignment.
65820259849SGeorge Zhang */
65920259849SGeorge Zhang struct vmci_doorbell_unlink_msg {
66020259849SGeorge Zhang struct vmci_datagram hdr;
66120259849SGeorge Zhang struct vmci_handle handle;
66220259849SGeorge Zhang };
66320259849SGeorge Zhang
66420259849SGeorge Zhang /*
66520259849SGeorge Zhang * Struct used for generating a notification on a doorbell handle. All
66620259849SGeorge Zhang * fields in struct are aligned to their natural alignment.
66720259849SGeorge Zhang */
66820259849SGeorge Zhang struct vmci_doorbell_notify_msg {
66920259849SGeorge Zhang struct vmci_datagram hdr;
67020259849SGeorge Zhang struct vmci_handle handle;
67120259849SGeorge Zhang };
67220259849SGeorge Zhang
67320259849SGeorge Zhang /*
67420259849SGeorge Zhang * This struct is used to contain data for events. Size of this struct is a
67520259849SGeorge Zhang * multiple of 8 bytes, and all fields are aligned to their natural alignment.
67620259849SGeorge Zhang */
67720259849SGeorge Zhang struct vmci_event_data {
67820259849SGeorge Zhang u32 event; /* 4 bytes. */
67920259849SGeorge Zhang u32 _pad;
68020259849SGeorge Zhang /* Event payload is put here. */
68120259849SGeorge Zhang };
68220259849SGeorge Zhang
68320259849SGeorge Zhang /*
68420259849SGeorge Zhang * Define the different VMCI_EVENT payload data types here. All structs must
68520259849SGeorge Zhang * be a multiple of 8 bytes, and fields must be aligned to their natural
68620259849SGeorge Zhang * alignment.
68720259849SGeorge Zhang */
68820259849SGeorge Zhang struct vmci_event_payld_ctx {
68920259849SGeorge Zhang u32 context_id; /* 4 bytes. */
69020259849SGeorge Zhang u32 _pad;
69120259849SGeorge Zhang };
69220259849SGeorge Zhang
69320259849SGeorge Zhang struct vmci_event_payld_qp {
69420259849SGeorge Zhang struct vmci_handle handle; /* queue_pair handle. */
69520259849SGeorge Zhang u32 peer_id; /* Context id of attaching/detaching VM. */
69620259849SGeorge Zhang u32 _pad;
69720259849SGeorge Zhang };
69820259849SGeorge Zhang
69920259849SGeorge Zhang /*
70020259849SGeorge Zhang * We define the following struct to get the size of the maximum event
70120259849SGeorge Zhang * data the hypervisor may send to the guest. If adding a new event
70220259849SGeorge Zhang * payload type above, add it to the following struct too (inside the
70320259849SGeorge Zhang * union).
70420259849SGeorge Zhang */
70520259849SGeorge Zhang struct vmci_event_data_max {
70620259849SGeorge Zhang struct vmci_event_data event_data;
70720259849SGeorge Zhang union {
70820259849SGeorge Zhang struct vmci_event_payld_ctx context_payload;
70920259849SGeorge Zhang struct vmci_event_payld_qp qp_payload;
71020259849SGeorge Zhang } ev_data_payload;
71120259849SGeorge Zhang };
71220259849SGeorge Zhang
71320259849SGeorge Zhang /*
71420259849SGeorge Zhang * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
71520259849SGeorge Zhang * VMCI_EVENT_HANDLER messages. Struct size is 32 bytes. All fields
71620259849SGeorge Zhang * in struct are aligned to their natural alignment.
71720259849SGeorge Zhang */
71820259849SGeorge Zhang struct vmci_event_msg {
71920259849SGeorge Zhang struct vmci_datagram hdr;
72020259849SGeorge Zhang
72120259849SGeorge Zhang /* Has event type and payload. */
72220259849SGeorge Zhang struct vmci_event_data event_data;
72320259849SGeorge Zhang
72420259849SGeorge Zhang /* Payload gets put here. */
72520259849SGeorge Zhang };
72620259849SGeorge Zhang
72720259849SGeorge Zhang /* Event with context payload. */
72820259849SGeorge Zhang struct vmci_event_ctx {
72920259849SGeorge Zhang struct vmci_event_msg msg;
73020259849SGeorge Zhang struct vmci_event_payld_ctx payload;
73120259849SGeorge Zhang };
73220259849SGeorge Zhang
73320259849SGeorge Zhang /* Event with QP payload. */
73420259849SGeorge Zhang struct vmci_event_qp {
73520259849SGeorge Zhang struct vmci_event_msg msg;
73620259849SGeorge Zhang struct vmci_event_payld_qp payload;
73720259849SGeorge Zhang };
73820259849SGeorge Zhang
73920259849SGeorge Zhang /*
74020259849SGeorge Zhang * Structs used for queue_pair alloc and detach messages. We align fields of
74120259849SGeorge Zhang * these structs to 64bit boundaries.
74220259849SGeorge Zhang */
74320259849SGeorge Zhang struct vmci_qp_alloc_msg {
74420259849SGeorge Zhang struct vmci_datagram hdr;
74520259849SGeorge Zhang struct vmci_handle handle;
74620259849SGeorge Zhang u32 peer;
74720259849SGeorge Zhang u32 flags;
74820259849SGeorge Zhang u64 produce_size;
74920259849SGeorge Zhang u64 consume_size;
75020259849SGeorge Zhang u64 num_ppns;
75120259849SGeorge Zhang
75220259849SGeorge Zhang /* List of PPNs placed here. */
75320259849SGeorge Zhang };
75420259849SGeorge Zhang
75520259849SGeorge Zhang struct vmci_qp_detach_msg {
75620259849SGeorge Zhang struct vmci_datagram hdr;
75720259849SGeorge Zhang struct vmci_handle handle;
75820259849SGeorge Zhang };
75920259849SGeorge Zhang
76020259849SGeorge Zhang /* VMCI Doorbell API. */
7619a41691eSVishnu DASA #define VMCI_FLAG_DELAYED_CB BIT(0)
76220259849SGeorge Zhang
76320259849SGeorge Zhang typedef void (*vmci_callback) (void *client_data);
76420259849SGeorge Zhang
76520259849SGeorge Zhang /*
76620259849SGeorge Zhang * struct vmci_qp - A vmw_vmci queue pair handle.
76720259849SGeorge Zhang *
76820259849SGeorge Zhang * This structure is used as a handle to a queue pair created by
76920259849SGeorge Zhang * VMCI. It is intentionally left opaque to clients.
77020259849SGeorge Zhang */
77120259849SGeorge Zhang struct vmci_qp;
77220259849SGeorge Zhang
77320259849SGeorge Zhang /* Callback needed for correctly waiting on events. */
77420259849SGeorge Zhang typedef int (*vmci_datagram_recv_cb) (void *client_data,
77520259849SGeorge Zhang struct vmci_datagram *msg);
77620259849SGeorge Zhang
77720259849SGeorge Zhang /* VMCI Event API. */
77820259849SGeorge Zhang typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
77920259849SGeorge Zhang void *client_data);
78020259849SGeorge Zhang
78120259849SGeorge Zhang /*
78220259849SGeorge Zhang * We use the following inline function to access the payload data
78320259849SGeorge Zhang * associated with an event data.
78420259849SGeorge Zhang */
78520259849SGeorge Zhang static inline const void *
vmci_event_data_const_payload(const struct vmci_event_data * ev_data)78620259849SGeorge Zhang vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
78720259849SGeorge Zhang {
78820259849SGeorge Zhang return (const char *)ev_data + sizeof(*ev_data);
78920259849SGeorge Zhang }
79020259849SGeorge Zhang
vmci_event_data_payload(struct vmci_event_data * ev_data)79120259849SGeorge Zhang static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
79220259849SGeorge Zhang {
79320259849SGeorge Zhang return (void *)vmci_event_data_const_payload(ev_data);
79420259849SGeorge Zhang }
79520259849SGeorge Zhang
79620259849SGeorge Zhang /*
797f42a0fd1SJorgen Hansen * Helper to read a value from a head or tail pointer. For X86_32, the
798f42a0fd1SJorgen Hansen * pointer is treated as a 32bit value, since the pointer value
799f42a0fd1SJorgen Hansen * never exceeds a 32bit value in this case. Also, doing an
800f42a0fd1SJorgen Hansen * atomic64_read on X86_32 uniprocessor systems may be implemented
801f42a0fd1SJorgen Hansen * as a non locked cmpxchg8b, that may end up overwriting updates done
802f42a0fd1SJorgen Hansen * by the VMCI device to the memory location. On 32bit SMP, the lock
803f42a0fd1SJorgen Hansen * prefix will be used, so correctness isn't an issue, but using a
804f42a0fd1SJorgen Hansen * 64bit operation still adds unnecessary overhead.
805f42a0fd1SJorgen Hansen */
vmci_q_read_pointer(u64 * var)8069c3cef54SPeter Zijlstra static inline u64 vmci_q_read_pointer(u64 *var)
807f42a0fd1SJorgen Hansen {
8089c3cef54SPeter Zijlstra return READ_ONCE(*(unsigned long *)var);
809f42a0fd1SJorgen Hansen }
810f42a0fd1SJorgen Hansen
811f42a0fd1SJorgen Hansen /*
812f42a0fd1SJorgen Hansen * Helper to set the value of a head or tail pointer. For X86_32, the
813f42a0fd1SJorgen Hansen * pointer is treated as a 32bit value, since the pointer value
814f42a0fd1SJorgen Hansen * never exceeds a 32bit value in this case. On 32bit SMP, using a
815f42a0fd1SJorgen Hansen * locked cmpxchg8b adds unnecessary overhead.
816f42a0fd1SJorgen Hansen */
vmci_q_set_pointer(u64 * var,u64 new_val)8179c3cef54SPeter Zijlstra static inline void vmci_q_set_pointer(u64 *var, u64 new_val)
818f42a0fd1SJorgen Hansen {
8199c3cef54SPeter Zijlstra /* XXX buggered on big-endian */
8209c3cef54SPeter Zijlstra WRITE_ONCE(*(unsigned long *)var, (unsigned long)new_val);
821f42a0fd1SJorgen Hansen }
822f42a0fd1SJorgen Hansen
823f42a0fd1SJorgen Hansen /*
82420259849SGeorge Zhang * Helper to add a given offset to a head or tail pointer. Wraps the
82520259849SGeorge Zhang * value of the pointer around the max size of the queue.
82620259849SGeorge Zhang */
vmci_qp_add_pointer(u64 * var,size_t add,u64 size)8279c3cef54SPeter Zijlstra static inline void vmci_qp_add_pointer(u64 *var, size_t add, u64 size)
82820259849SGeorge Zhang {
829f42a0fd1SJorgen Hansen u64 new_val = vmci_q_read_pointer(var);
83020259849SGeorge Zhang
83120259849SGeorge Zhang if (new_val >= size - add)
83220259849SGeorge Zhang new_val -= size;
83320259849SGeorge Zhang
83420259849SGeorge Zhang new_val += add;
83520259849SGeorge Zhang
836f42a0fd1SJorgen Hansen vmci_q_set_pointer(var, new_val);
83720259849SGeorge Zhang }
83820259849SGeorge Zhang
83920259849SGeorge Zhang /*
84020259849SGeorge Zhang * Helper routine to get the Producer Tail from the supplied queue.
84120259849SGeorge Zhang */
84220259849SGeorge Zhang static inline u64
vmci_q_header_producer_tail(const struct vmci_queue_header * q_header)84320259849SGeorge Zhang vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
84420259849SGeorge Zhang {
84520259849SGeorge Zhang struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
846f42a0fd1SJorgen Hansen return vmci_q_read_pointer(&qh->producer_tail);
84720259849SGeorge Zhang }
84820259849SGeorge Zhang
84920259849SGeorge Zhang /*
85020259849SGeorge Zhang * Helper routine to get the Consumer Head from the supplied queue.
85120259849SGeorge Zhang */
85220259849SGeorge Zhang static inline u64
vmci_q_header_consumer_head(const struct vmci_queue_header * q_header)85320259849SGeorge Zhang vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
85420259849SGeorge Zhang {
85520259849SGeorge Zhang struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
856f42a0fd1SJorgen Hansen return vmci_q_read_pointer(&qh->consumer_head);
85720259849SGeorge Zhang }
85820259849SGeorge Zhang
85920259849SGeorge Zhang /*
86020259849SGeorge Zhang * Helper routine to increment the Producer Tail. Fundamentally,
86120259849SGeorge Zhang * vmci_qp_add_pointer() is used to manipulate the tail itself.
86220259849SGeorge Zhang */
86320259849SGeorge Zhang static inline void
vmci_q_header_add_producer_tail(struct vmci_queue_header * q_header,size_t add,u64 queue_size)86420259849SGeorge Zhang vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
86520259849SGeorge Zhang size_t add,
86620259849SGeorge Zhang u64 queue_size)
86720259849SGeorge Zhang {
86820259849SGeorge Zhang vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
86920259849SGeorge Zhang }
87020259849SGeorge Zhang
87120259849SGeorge Zhang /*
87220259849SGeorge Zhang * Helper routine to increment the Consumer Head. Fundamentally,
87320259849SGeorge Zhang * vmci_qp_add_pointer() is used to manipulate the head itself.
87420259849SGeorge Zhang */
87520259849SGeorge Zhang static inline void
vmci_q_header_add_consumer_head(struct vmci_queue_header * q_header,size_t add,u64 queue_size)87620259849SGeorge Zhang vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
87720259849SGeorge Zhang size_t add,
87820259849SGeorge Zhang u64 queue_size)
87920259849SGeorge Zhang {
88020259849SGeorge Zhang vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
88120259849SGeorge Zhang }
88220259849SGeorge Zhang
88320259849SGeorge Zhang /*
88420259849SGeorge Zhang * Helper routine for getting the head and the tail pointer for a queue.
88520259849SGeorge Zhang * Both the VMCIQueues are needed to get both the pointers for one queue.
88620259849SGeorge Zhang */
88720259849SGeorge Zhang static inline void
vmci_q_header_get_pointers(const struct vmci_queue_header * produce_q_header,const struct vmci_queue_header * consume_q_header,u64 * producer_tail,u64 * consumer_head)88820259849SGeorge Zhang vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
88920259849SGeorge Zhang const struct vmci_queue_header *consume_q_header,
89020259849SGeorge Zhang u64 *producer_tail,
89120259849SGeorge Zhang u64 *consumer_head)
89220259849SGeorge Zhang {
89320259849SGeorge Zhang if (producer_tail)
89420259849SGeorge Zhang *producer_tail = vmci_q_header_producer_tail(produce_q_header);
89520259849SGeorge Zhang
89620259849SGeorge Zhang if (consumer_head)
89720259849SGeorge Zhang *consumer_head = vmci_q_header_consumer_head(consume_q_header);
89820259849SGeorge Zhang }
89920259849SGeorge Zhang
vmci_q_header_init(struct vmci_queue_header * q_header,const struct vmci_handle handle)90020259849SGeorge Zhang static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
90120259849SGeorge Zhang const struct vmci_handle handle)
90220259849SGeorge Zhang {
90320259849SGeorge Zhang q_header->handle = handle;
9049c3cef54SPeter Zijlstra q_header->producer_tail = 0;
9059c3cef54SPeter Zijlstra q_header->consumer_head = 0;
90620259849SGeorge Zhang }
90720259849SGeorge Zhang
90820259849SGeorge Zhang /*
90920259849SGeorge Zhang * Finds available free space in a produce queue to enqueue more
91020259849SGeorge Zhang * data or reports an error if queue pair corruption is detected.
91120259849SGeorge Zhang */
91220259849SGeorge Zhang static s64
vmci_q_header_free_space(const struct vmci_queue_header * produce_q_header,const struct vmci_queue_header * consume_q_header,const u64 produce_q_size)91320259849SGeorge Zhang vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
91420259849SGeorge Zhang const struct vmci_queue_header *consume_q_header,
91520259849SGeorge Zhang const u64 produce_q_size)
91620259849SGeorge Zhang {
91720259849SGeorge Zhang u64 tail;
91820259849SGeorge Zhang u64 head;
91920259849SGeorge Zhang u64 free_space;
92020259849SGeorge Zhang
92120259849SGeorge Zhang tail = vmci_q_header_producer_tail(produce_q_header);
92220259849SGeorge Zhang head = vmci_q_header_consumer_head(consume_q_header);
92320259849SGeorge Zhang
92420259849SGeorge Zhang if (tail >= produce_q_size || head >= produce_q_size)
92520259849SGeorge Zhang return VMCI_ERROR_INVALID_SIZE;
92620259849SGeorge Zhang
92720259849SGeorge Zhang /*
92820259849SGeorge Zhang * Deduct 1 to avoid tail becoming equal to head which causes
92920259849SGeorge Zhang * ambiguity. If head and tail are equal it means that the
93020259849SGeorge Zhang * queue is empty.
93120259849SGeorge Zhang */
93220259849SGeorge Zhang if (tail >= head)
93320259849SGeorge Zhang free_space = produce_q_size - (tail - head) - 1;
93420259849SGeorge Zhang else
93520259849SGeorge Zhang free_space = head - tail - 1;
93620259849SGeorge Zhang
93720259849SGeorge Zhang return free_space;
93820259849SGeorge Zhang }
93920259849SGeorge Zhang
94020259849SGeorge Zhang /*
94120259849SGeorge Zhang * vmci_q_header_free_space() does all the heavy lifting of
94220259849SGeorge Zhang * determing the number of free bytes in a Queue. This routine,
94320259849SGeorge Zhang * then subtracts that size from the full size of the Queue so
94420259849SGeorge Zhang * the caller knows how many bytes are ready to be dequeued.
94520259849SGeorge Zhang * Results:
94620259849SGeorge Zhang * On success, available data size in bytes (up to MAX_INT64).
94720259849SGeorge Zhang * On failure, appropriate error code.
94820259849SGeorge Zhang */
94920259849SGeorge Zhang static inline s64
vmci_q_header_buf_ready(const struct vmci_queue_header * consume_q_header,const struct vmci_queue_header * produce_q_header,const u64 consume_q_size)95020259849SGeorge Zhang vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
95120259849SGeorge Zhang const struct vmci_queue_header *produce_q_header,
95220259849SGeorge Zhang const u64 consume_q_size)
95320259849SGeorge Zhang {
95420259849SGeorge Zhang s64 free_space;
95520259849SGeorge Zhang
95620259849SGeorge Zhang free_space = vmci_q_header_free_space(consume_q_header,
95720259849SGeorge Zhang produce_q_header, consume_q_size);
95820259849SGeorge Zhang if (free_space < VMCI_SUCCESS)
95920259849SGeorge Zhang return free_space;
96020259849SGeorge Zhang
96120259849SGeorge Zhang return consume_q_size - free_space - 1;
96220259849SGeorge Zhang }
96320259849SGeorge Zhang
96420259849SGeorge Zhang
96520259849SGeorge Zhang #endif /* _VMW_VMCI_DEF_H_ */
966