1685a6bf8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21d990201SGeorge Zhang /*
31d990201SGeorge Zhang  * VMware VMCI Driver
41d990201SGeorge Zhang  *
51d990201SGeorge Zhang  * Copyright (C) 2012 VMware, Inc. All rights reserved.
61d990201SGeorge Zhang  */
71d990201SGeorge Zhang 
81d990201SGeorge Zhang #include <linux/vmw_vmci_defs.h>
91d990201SGeorge Zhang #include <linux/vmw_vmci_api.h>
101d990201SGeorge Zhang #include <linux/list.h>
111d990201SGeorge Zhang #include <linux/module.h>
12e293c6b3SHagar Gamal Halim Hemdan #include <linux/nospec.h>
131d990201SGeorge Zhang #include <linux/sched.h>
141d990201SGeorge Zhang #include <linux/slab.h>
15b2d09103SIngo Molnar #include <linux/rculist.h>
161d990201SGeorge Zhang 
171d990201SGeorge Zhang #include "vmci_driver.h"
181d990201SGeorge Zhang #include "vmci_event.h"
191d990201SGeorge Zhang 
201d990201SGeorge Zhang #define EVENT_MAGIC 0xEABE0000
211d990201SGeorge Zhang #define VMCI_EVENT_MAX_ATTEMPTS 10
221d990201SGeorge Zhang 
231d990201SGeorge Zhang struct vmci_subscription {
241d990201SGeorge Zhang 	u32 id;
251d990201SGeorge Zhang 	u32 event;
261d990201SGeorge Zhang 	vmci_event_cb callback;
271d990201SGeorge Zhang 	void *callback_data;
281d990201SGeorge Zhang 	struct list_head node;	/* on one of subscriber lists */
291d990201SGeorge Zhang };
301d990201SGeorge Zhang 
311d990201SGeorge Zhang static struct list_head subscriber_array[VMCI_EVENT_MAX];
321d990201SGeorge Zhang static DEFINE_MUTEX(subscriber_mutex);
331d990201SGeorge Zhang 
vmci_event_init(void)341d990201SGeorge Zhang int __init vmci_event_init(void)
351d990201SGeorge Zhang {
361d990201SGeorge Zhang 	int i;
371d990201SGeorge Zhang 
381d990201SGeorge Zhang 	for (i = 0; i < VMCI_EVENT_MAX; i++)
391d990201SGeorge Zhang 		INIT_LIST_HEAD(&subscriber_array[i]);
401d990201SGeorge Zhang 
411d990201SGeorge Zhang 	return VMCI_SUCCESS;
421d990201SGeorge Zhang }
431d990201SGeorge Zhang 
vmci_event_exit(void)441d990201SGeorge Zhang void vmci_event_exit(void)
451d990201SGeorge Zhang {
461d990201SGeorge Zhang 	int e;
471d990201SGeorge Zhang 
481d990201SGeorge Zhang 	/* We free all memory at exit. */
491d990201SGeorge Zhang 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
501d990201SGeorge Zhang 		struct vmci_subscription *cur, *p2;
511d990201SGeorge Zhang 		list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
521d990201SGeorge Zhang 
531d990201SGeorge Zhang 			/*
541d990201SGeorge Zhang 			 * We should never get here because all events
551d990201SGeorge Zhang 			 * should have been unregistered before we try
561d990201SGeorge Zhang 			 * to unload the driver module.
571d990201SGeorge Zhang 			 */
581d990201SGeorge Zhang 			pr_warn("Unexpected free events occurring\n");
591d990201SGeorge Zhang 			list_del(&cur->node);
601d990201SGeorge Zhang 			kfree(cur);
611d990201SGeorge Zhang 		}
621d990201SGeorge Zhang 	}
631d990201SGeorge Zhang }
641d990201SGeorge Zhang 
651d990201SGeorge Zhang /*
661d990201SGeorge Zhang  * Find entry. Assumes subscriber_mutex is held.
671d990201SGeorge Zhang  */
event_find(u32 sub_id)681d990201SGeorge Zhang static struct vmci_subscription *event_find(u32 sub_id)
691d990201SGeorge Zhang {
701d990201SGeorge Zhang 	int e;
711d990201SGeorge Zhang 
721d990201SGeorge Zhang 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
731d990201SGeorge Zhang 		struct vmci_subscription *cur;
741d990201SGeorge Zhang 		list_for_each_entry(cur, &subscriber_array[e], node) {
751d990201SGeorge Zhang 			if (cur->id == sub_id)
761d990201SGeorge Zhang 				return cur;
771d990201SGeorge Zhang 		}
781d990201SGeorge Zhang 	}
791d990201SGeorge Zhang 	return NULL;
801d990201SGeorge Zhang }
811d990201SGeorge Zhang 
821d990201SGeorge Zhang /*
831d990201SGeorge Zhang  * Actually delivers the events to the subscribers.
841d990201SGeorge Zhang  * The callback function for each subscriber is invoked.
851d990201SGeorge Zhang  */
event_deliver(struct vmci_event_msg * event_msg)861d990201SGeorge Zhang static void event_deliver(struct vmci_event_msg *event_msg)
871d990201SGeorge Zhang {
881d990201SGeorge Zhang 	struct vmci_subscription *cur;
891d990201SGeorge Zhang 	struct list_head *subscriber_list;
90e293c6b3SHagar Gamal Halim Hemdan 	u32 sanitized_event, max_vmci_event;
911d990201SGeorge Zhang 
921d990201SGeorge Zhang 	rcu_read_lock();
93e293c6b3SHagar Gamal Halim Hemdan 	max_vmci_event = ARRAY_SIZE(subscriber_array);
94e293c6b3SHagar Gamal Halim Hemdan 	sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event);
95e293c6b3SHagar Gamal Halim Hemdan 	subscriber_list = &subscriber_array[sanitized_event];
961d990201SGeorge Zhang 	list_for_each_entry_rcu(cur, subscriber_list, node) {
971d990201SGeorge Zhang 		cur->callback(cur->id, &event_msg->event_data,
981d990201SGeorge Zhang 			      cur->callback_data);
991d990201SGeorge Zhang 	}
1001d990201SGeorge Zhang 	rcu_read_unlock();
1011d990201SGeorge Zhang }
1021d990201SGeorge Zhang 
1031d990201SGeorge Zhang /*
1041d990201SGeorge Zhang  * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
1051d990201SGeorge Zhang  * subscribers for given event.
1061d990201SGeorge Zhang  */
vmci_event_dispatch(struct vmci_datagram * msg)1071d990201SGeorge Zhang int vmci_event_dispatch(struct vmci_datagram *msg)
1081d990201SGeorge Zhang {
1091d990201SGeorge Zhang 	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
1101d990201SGeorge Zhang 
1111d990201SGeorge Zhang 	if (msg->payload_size < sizeof(u32) ||
1121d990201SGeorge Zhang 	    msg->payload_size > sizeof(struct vmci_event_data_max))
1131d990201SGeorge Zhang 		return VMCI_ERROR_INVALID_ARGS;
1141d990201SGeorge Zhang 
1151d990201SGeorge Zhang 	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
1161d990201SGeorge Zhang 		return VMCI_ERROR_EVENT_UNKNOWN;
1171d990201SGeorge Zhang 
1181d990201SGeorge Zhang 	event_deliver(event_msg);
1191d990201SGeorge Zhang 	return VMCI_SUCCESS;
1201d990201SGeorge Zhang }
1211d990201SGeorge Zhang 
1221d990201SGeorge Zhang /*
1231d990201SGeorge Zhang  * vmci_event_subscribe() - Subscribe to a given event.
1241d990201SGeorge Zhang  * @event:      The event to subscribe to.
1251d990201SGeorge Zhang  * @callback:   The callback to invoke upon the event.
1261d990201SGeorge Zhang  * @callback_data:      Data to pass to the callback.
1271d990201SGeorge Zhang  * @subscription_id:    ID used to track subscription.  Used with
1281d990201SGeorge Zhang  *              vmci_event_unsubscribe()
1291d990201SGeorge Zhang  *
1301d990201SGeorge Zhang  * Subscribes to the provided event. The callback specified will be
1311d990201SGeorge Zhang  * fired from RCU critical section and therefore must not sleep.
1321d990201SGeorge Zhang  */
vmci_event_subscribe(u32 event,vmci_event_cb callback,void * callback_data,u32 * new_subscription_id)1331d990201SGeorge Zhang int vmci_event_subscribe(u32 event,
1341d990201SGeorge Zhang 			 vmci_event_cb callback,
1351d990201SGeorge Zhang 			 void *callback_data,
1361d990201SGeorge Zhang 			 u32 *new_subscription_id)
1371d990201SGeorge Zhang {
1381d990201SGeorge Zhang 	struct vmci_subscription *sub;
1391d990201SGeorge Zhang 	int attempts;
1401d990201SGeorge Zhang 	int retval;
1411d990201SGeorge Zhang 	bool have_new_id = false;
1421d990201SGeorge Zhang 
1431d990201SGeorge Zhang 	if (!new_subscription_id) {
1441d990201SGeorge Zhang 		pr_devel("%s: Invalid subscription (NULL)\n", __func__);
1451d990201SGeorge Zhang 		return VMCI_ERROR_INVALID_ARGS;
1461d990201SGeorge Zhang 	}
1471d990201SGeorge Zhang 
1481d990201SGeorge Zhang 	if (!VMCI_EVENT_VALID(event) || !callback) {
1491d990201SGeorge Zhang 		pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
1501d990201SGeorge Zhang 			 __func__, event, callback, callback_data);
1511d990201SGeorge Zhang 		return VMCI_ERROR_INVALID_ARGS;
1521d990201SGeorge Zhang 	}
1531d990201SGeorge Zhang 
1541d990201SGeorge Zhang 	sub = kzalloc(sizeof(*sub), GFP_KERNEL);
1551d990201SGeorge Zhang 	if (!sub)
1561d990201SGeorge Zhang 		return VMCI_ERROR_NO_MEM;
1571d990201SGeorge Zhang 
1581d990201SGeorge Zhang 	sub->id = VMCI_EVENT_MAX;
1591d990201SGeorge Zhang 	sub->event = event;
1601d990201SGeorge Zhang 	sub->callback = callback;
1611d990201SGeorge Zhang 	sub->callback_data = callback_data;
1621d990201SGeorge Zhang 	INIT_LIST_HEAD(&sub->node);
1631d990201SGeorge Zhang 
1641d990201SGeorge Zhang 	mutex_lock(&subscriber_mutex);
1651d990201SGeorge Zhang 
1661d990201SGeorge Zhang 	/* Creation of a new event is always allowed. */
1671d990201SGeorge Zhang 	for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
1681d990201SGeorge Zhang 		static u32 subscription_id;
1691d990201SGeorge Zhang 		/*
1701d990201SGeorge Zhang 		 * We try to get an id a couple of time before
1711d990201SGeorge Zhang 		 * claiming we are out of resources.
1721d990201SGeorge Zhang 		 */
1731d990201SGeorge Zhang 
1741d990201SGeorge Zhang 		/* Test for duplicate id. */
1751d990201SGeorge Zhang 		if (!event_find(++subscription_id)) {
1761d990201SGeorge Zhang 			sub->id = subscription_id;
1771d990201SGeorge Zhang 			have_new_id = true;
1781d990201SGeorge Zhang 			break;
1791d990201SGeorge Zhang 		}
1801d990201SGeorge Zhang 	}
1811d990201SGeorge Zhang 
1821d990201SGeorge Zhang 	if (have_new_id) {
1831d990201SGeorge Zhang 		list_add_rcu(&sub->node, &subscriber_array[event]);
1841d990201SGeorge Zhang 		retval = VMCI_SUCCESS;
1851d990201SGeorge Zhang 	} else {
1861d990201SGeorge Zhang 		retval = VMCI_ERROR_NO_RESOURCES;
1871d990201SGeorge Zhang 	}
1881d990201SGeorge Zhang 
1891d990201SGeorge Zhang 	mutex_unlock(&subscriber_mutex);
1901d990201SGeorge Zhang 
1911d990201SGeorge Zhang 	*new_subscription_id = sub->id;
1921d990201SGeorge Zhang 	return retval;
1931d990201SGeorge Zhang }
1941d990201SGeorge Zhang EXPORT_SYMBOL_GPL(vmci_event_subscribe);
1951d990201SGeorge Zhang 
1961d990201SGeorge Zhang /*
1971d990201SGeorge Zhang  * vmci_event_unsubscribe() - unsubscribe from an event.
1981d990201SGeorge Zhang  * @sub_id:     A subscription ID as provided by vmci_event_subscribe()
1991d990201SGeorge Zhang  *
2001d990201SGeorge Zhang  * Unsubscribe from given event. Removes it from list and frees it.
2011d990201SGeorge Zhang  * Will return callback_data if requested by caller.
2021d990201SGeorge Zhang  */
vmci_event_unsubscribe(u32 sub_id)2031d990201SGeorge Zhang int vmci_event_unsubscribe(u32 sub_id)
2041d990201SGeorge Zhang {
2051d990201SGeorge Zhang 	struct vmci_subscription *s;
2061d990201SGeorge Zhang 
2071d990201SGeorge Zhang 	mutex_lock(&subscriber_mutex);
2081d990201SGeorge Zhang 	s = event_find(sub_id);
2091d990201SGeorge Zhang 	if (s)
2101d990201SGeorge Zhang 		list_del_rcu(&s->node);
2111d990201SGeorge Zhang 	mutex_unlock(&subscriber_mutex);
2121d990201SGeorge Zhang 
2131d990201SGeorge Zhang 	if (!s)
2141d990201SGeorge Zhang 		return VMCI_ERROR_NOT_FOUND;
2151d990201SGeorge Zhang 
21609b2286aSUladzislau Rezki (Sony) 	kvfree_rcu_mightsleep(s);
2171d990201SGeorge Zhang 
2181d990201SGeorge Zhang 	return VMCI_SUCCESS;
2191d990201SGeorge Zhang }
2201d990201SGeorge Zhang EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
221