1685a6bf8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
206164d2bSGeorge Zhang /*
306164d2bSGeorge Zhang  * VMware VMCI Driver
406164d2bSGeorge Zhang  *
506164d2bSGeorge Zhang  * Copyright (C) 2012 VMware, Inc. All rights reserved.
606164d2bSGeorge Zhang  */
706164d2bSGeorge Zhang 
806164d2bSGeorge Zhang #ifndef _VMCI_QUEUE_PAIR_H_
906164d2bSGeorge Zhang #define _VMCI_QUEUE_PAIR_H_
1006164d2bSGeorge Zhang 
1106164d2bSGeorge Zhang #include <linux/vmw_vmci_defs.h>
1206164d2bSGeorge Zhang #include <linux/types.h>
1306164d2bSGeorge Zhang 
1406164d2bSGeorge Zhang #include "vmci_context.h"
1506164d2bSGeorge Zhang 
1606164d2bSGeorge Zhang /* Callback needed for correctly waiting on events. */
1706164d2bSGeorge Zhang typedef int (*vmci_event_release_cb) (void *client_data);
1806164d2bSGeorge Zhang 
1906164d2bSGeorge Zhang /* Guest device port I/O. */
20e6389a13SDmitry Torokhov struct ppn_set {
2106164d2bSGeorge Zhang 	u64 num_produce_pages;
2206164d2bSGeorge Zhang 	u64 num_consume_pages;
23f2db7361SVishnu DASA 	u64 *produce_ppns;
24f2db7361SVishnu DASA 	u64 *consume_ppns;
2506164d2bSGeorge Zhang 	bool initialized;
2606164d2bSGeorge Zhang };
2706164d2bSGeorge Zhang 
2806164d2bSGeorge Zhang /* VMCIqueue_pairAllocInfo */
2906164d2bSGeorge Zhang struct vmci_qp_alloc_info {
3006164d2bSGeorge Zhang 	struct vmci_handle handle;
3106164d2bSGeorge Zhang 	u32 peer;
3206164d2bSGeorge Zhang 	u32 flags;
3306164d2bSGeorge Zhang 	u64 produce_size;
3406164d2bSGeorge Zhang 	u64 consume_size;
3506164d2bSGeorge Zhang 	u64 ppn_va;	/* Start VA of queue pair PPNs. */
3606164d2bSGeorge Zhang 	u64 num_ppns;
3706164d2bSGeorge Zhang 	s32 result;
3806164d2bSGeorge Zhang 	u32 version;
3906164d2bSGeorge Zhang };
4006164d2bSGeorge Zhang 
4106164d2bSGeorge Zhang /* VMCIqueue_pairSetVAInfo */
4206164d2bSGeorge Zhang struct vmci_qp_set_va_info {
4306164d2bSGeorge Zhang 	struct vmci_handle handle;
4406164d2bSGeorge Zhang 	u64 va;		/* Start VA of queue pair PPNs. */
4506164d2bSGeorge Zhang 	u64 num_ppns;
4606164d2bSGeorge Zhang 	u32 version;
4706164d2bSGeorge Zhang 	s32 result;
4806164d2bSGeorge Zhang };
4906164d2bSGeorge Zhang 
5006164d2bSGeorge Zhang /*
5106164d2bSGeorge Zhang  * For backwards compatibility, here is a version of the
5206164d2bSGeorge Zhang  * VMCIqueue_pairPageFileInfo before host support end-points was added.
5306164d2bSGeorge Zhang  * Note that the current version of that structure requires VMX to
5406164d2bSGeorge Zhang  * pass down the VA of the mapped file.  Before host support was added
5506164d2bSGeorge Zhang  * there was nothing of the sort.  So, when the driver sees the ioctl
5606164d2bSGeorge Zhang  * with a parameter that is the sizeof
5706164d2bSGeorge Zhang  * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version
5806164d2bSGeorge Zhang  * of VMX running can't attach to host end points because it doesn't
5906164d2bSGeorge Zhang  * provide the VA of the mapped files.
6006164d2bSGeorge Zhang  *
6106164d2bSGeorge Zhang  * The Linux driver doesn't get an indication of the size of the
6206164d2bSGeorge Zhang  * structure passed down from user space.  So, to fix a long standing
6306164d2bSGeorge Zhang  * but unfiled bug, the _pad field has been renamed to version.
6406164d2bSGeorge Zhang  * Existing versions of VMX always initialize the PageFileInfo
6506164d2bSGeorge Zhang  * structure so that _pad, er, version is set to 0.
6606164d2bSGeorge Zhang  *
6706164d2bSGeorge Zhang  * A version value of 1 indicates that the size of the structure has
6806164d2bSGeorge Zhang  * been increased to include two UVA's: produce_uva and consume_uva.
6906164d2bSGeorge Zhang  * These UVA's are of the mmap()'d queue contents backing files.
7006164d2bSGeorge Zhang  *
7106164d2bSGeorge Zhang  * In addition, if when VMX is sending down the
7206164d2bSGeorge Zhang  * VMCIqueue_pairPageFileInfo structure it gets an error then it will
7306164d2bSGeorge Zhang  * try again with the _NoHostQP version of the file to see if an older
7406164d2bSGeorge Zhang  * VMCI kernel module is running.
7506164d2bSGeorge Zhang  */
7606164d2bSGeorge Zhang 
7706164d2bSGeorge Zhang /* VMCIqueue_pairPageFileInfo */
7806164d2bSGeorge Zhang struct vmci_qp_page_file_info {
7906164d2bSGeorge Zhang 	struct vmci_handle handle;
8006164d2bSGeorge Zhang 	u64 produce_page_file;	  /* User VA. */
8106164d2bSGeorge Zhang 	u64 consume_page_file;	  /* User VA. */
8206164d2bSGeorge Zhang 	u64 produce_page_file_size;  /* Size of the file name array. */
8306164d2bSGeorge Zhang 	u64 consume_page_file_size;  /* Size of the file name array. */
8406164d2bSGeorge Zhang 	s32 result;
8506164d2bSGeorge Zhang 	u32 version;	/* Was _pad. */
8606164d2bSGeorge Zhang 	u64 produce_va;	/* User VA of the mapped file. */
8706164d2bSGeorge Zhang 	u64 consume_va;	/* User VA of the mapped file. */
8806164d2bSGeorge Zhang };
8906164d2bSGeorge Zhang 
9006164d2bSGeorge Zhang /* vmci queuepair detach info */
9106164d2bSGeorge Zhang struct vmci_qp_dtch_info {
9206164d2bSGeorge Zhang 	struct vmci_handle handle;
9306164d2bSGeorge Zhang 	s32 result;
9406164d2bSGeorge Zhang 	u32 _pad;
9506164d2bSGeorge Zhang };
9606164d2bSGeorge Zhang 
9706164d2bSGeorge Zhang /*
9806164d2bSGeorge Zhang  * struct vmci_qp_page_store describes how the memory of a given queue pair
9906164d2bSGeorge Zhang  * is backed. When the queue pair is between the host and a guest, the
10006164d2bSGeorge Zhang  * page store consists of references to the guest pages. On vmkernel,
10106164d2bSGeorge Zhang  * this is a list of PPNs, and on hosted, it is a user VA where the
10206164d2bSGeorge Zhang  * queue pair is mapped into the VMX address space.
10306164d2bSGeorge Zhang  */
10406164d2bSGeorge Zhang struct vmci_qp_page_store {
10506164d2bSGeorge Zhang 	/* Reference to pages backing the queue pair. */
10606164d2bSGeorge Zhang 	u64 pages;
107*fc716ffbSmateng 	/* Length of pageList/virtual address range (in pages). */
10806164d2bSGeorge Zhang 	u32 len;
10906164d2bSGeorge Zhang };
11006164d2bSGeorge Zhang 
11106164d2bSGeorge Zhang /*
11206164d2bSGeorge Zhang  * This data type contains the information about a queue.
11306164d2bSGeorge Zhang  * There are two queues (hence, queue pairs) per transaction model between a
11406164d2bSGeorge Zhang  * pair of end points, A & B.  One queue is used by end point A to transmit
11506164d2bSGeorge Zhang  * commands and responses to B.  The other queue is used by B to transmit
11606164d2bSGeorge Zhang  * commands and responses.
11706164d2bSGeorge Zhang  *
11806164d2bSGeorge Zhang  * struct vmci_queue_kern_if is a per-OS defined Queue structure.  It contains
11906164d2bSGeorge Zhang  * either a direct pointer to the linear address of the buffer contents or a
12006164d2bSGeorge Zhang  * pointer to structures which help the OS locate those data pages.  See
12106164d2bSGeorge Zhang  * vmciKernelIf.c for each platform for its definition.
12206164d2bSGeorge Zhang  */
12306164d2bSGeorge Zhang struct vmci_queue {
12406164d2bSGeorge Zhang 	struct vmci_queue_header *q_header;
12506164d2bSGeorge Zhang 	struct vmci_queue_header *saved_header;
12606164d2bSGeorge Zhang 	struct vmci_queue_kern_if *kernel_if;
12706164d2bSGeorge Zhang };
12806164d2bSGeorge Zhang 
12906164d2bSGeorge Zhang /*
13006164d2bSGeorge Zhang  * Utility function that checks whether the fields of the page
13106164d2bSGeorge Zhang  * store contain valid values.
13206164d2bSGeorge Zhang  * Result:
13306164d2bSGeorge Zhang  * true if the page store is wellformed. false otherwise.
13406164d2bSGeorge Zhang  */
13506164d2bSGeorge Zhang static inline bool
VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store * page_store)13606164d2bSGeorge Zhang VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
13706164d2bSGeorge Zhang {
13806164d2bSGeorge Zhang 	return page_store->len >= 2;
13906164d2bSGeorge Zhang }
14006164d2bSGeorge Zhang 
14106164d2bSGeorge Zhang void vmci_qp_broker_exit(void);
14206164d2bSGeorge Zhang int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
14306164d2bSGeorge Zhang 			 u32 flags, u32 priv_flags,
14406164d2bSGeorge Zhang 			 u64 produce_size, u64 consume_size,
14506164d2bSGeorge Zhang 			 struct vmci_qp_page_store *page_store,
14606164d2bSGeorge Zhang 			 struct vmci_ctx *context);
14706164d2bSGeorge Zhang int vmci_qp_broker_set_page_store(struct vmci_handle handle,
14806164d2bSGeorge Zhang 				  u64 produce_uva, u64 consume_uva,
14906164d2bSGeorge Zhang 				  struct vmci_ctx *context);
15006164d2bSGeorge Zhang int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context);
15106164d2bSGeorge Zhang 
15206164d2bSGeorge Zhang void vmci_qp_guest_endpoints_exit(void);
15306164d2bSGeorge Zhang 
15406164d2bSGeorge Zhang int vmci_qp_alloc(struct vmci_handle *handle,
15506164d2bSGeorge Zhang 		  struct vmci_queue **produce_q, u64 produce_size,
15606164d2bSGeorge Zhang 		  struct vmci_queue **consume_q, u64 consume_size,
15706164d2bSGeorge Zhang 		  u32 peer, u32 flags, u32 priv_flags,
15806164d2bSGeorge Zhang 		  bool guest_endpoint, vmci_event_release_cb wakeup_cb,
15906164d2bSGeorge Zhang 		  void *client_data);
16006164d2bSGeorge Zhang int vmci_qp_broker_map(struct vmci_handle handle,
16106164d2bSGeorge Zhang 		       struct vmci_ctx *context, u64 guest_mem);
16206164d2bSGeorge Zhang int vmci_qp_broker_unmap(struct vmci_handle handle,
16306164d2bSGeorge Zhang 			 struct vmci_ctx *context, u32 gid);
16406164d2bSGeorge Zhang 
16506164d2bSGeorge Zhang #endif /* _VMCI_QUEUE_PAIR_H_ */
166