1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VMware VMCI Driver 4 * 5 * Copyright (C) 2012 VMware, Inc. All rights reserved. 6 */ 7 8 #include <linux/vmw_vmci_defs.h> 9 #include <linux/vmw_vmci_api.h> 10 #include <linux/list.h> 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/slab.h> 14 #include <linux/rculist.h> 15 16 #include "vmci_driver.h" 17 #include "vmci_event.h" 18 19 #define EVENT_MAGIC 0xEABE0000 20 #define VMCI_EVENT_MAX_ATTEMPTS 10 21 22 struct vmci_subscription { 23 u32 id; 24 u32 event; 25 vmci_event_cb callback; 26 void *callback_data; 27 struct list_head node; /* on one of subscriber lists */ 28 }; 29 30 static struct list_head subscriber_array[VMCI_EVENT_MAX]; 31 static DEFINE_MUTEX(subscriber_mutex); 32 33 int __init vmci_event_init(void) 34 { 35 int i; 36 37 for (i = 0; i < VMCI_EVENT_MAX; i++) 38 INIT_LIST_HEAD(&subscriber_array[i]); 39 40 return VMCI_SUCCESS; 41 } 42 43 void vmci_event_exit(void) 44 { 45 int e; 46 47 /* We free all memory at exit. */ 48 for (e = 0; e < VMCI_EVENT_MAX; e++) { 49 struct vmci_subscription *cur, *p2; 50 list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) { 51 52 /* 53 * We should never get here because all events 54 * should have been unregistered before we try 55 * to unload the driver module. 56 */ 57 pr_warn("Unexpected free events occurring\n"); 58 list_del(&cur->node); 59 kfree(cur); 60 } 61 } 62 } 63 64 /* 65 * Find entry. Assumes subscriber_mutex is held. 66 */ 67 static struct vmci_subscription *event_find(u32 sub_id) 68 { 69 int e; 70 71 for (e = 0; e < VMCI_EVENT_MAX; e++) { 72 struct vmci_subscription *cur; 73 list_for_each_entry(cur, &subscriber_array[e], node) { 74 if (cur->id == sub_id) 75 return cur; 76 } 77 } 78 return NULL; 79 } 80 81 /* 82 * Actually delivers the events to the subscribers. 83 * The callback function for each subscriber is invoked. 84 */ 85 static void event_deliver(struct vmci_event_msg *event_msg) 86 { 87 struct vmci_subscription *cur; 88 struct list_head *subscriber_list; 89 90 rcu_read_lock(); 91 subscriber_list = &subscriber_array[event_msg->event_data.event]; 92 list_for_each_entry_rcu(cur, subscriber_list, node) { 93 cur->callback(cur->id, &event_msg->event_data, 94 cur->callback_data); 95 } 96 rcu_read_unlock(); 97 } 98 99 /* 100 * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all 101 * subscribers for given event. 102 */ 103 int vmci_event_dispatch(struct vmci_datagram *msg) 104 { 105 struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg; 106 107 if (msg->payload_size < sizeof(u32) || 108 msg->payload_size > sizeof(struct vmci_event_data_max)) 109 return VMCI_ERROR_INVALID_ARGS; 110 111 if (!VMCI_EVENT_VALID(event_msg->event_data.event)) 112 return VMCI_ERROR_EVENT_UNKNOWN; 113 114 event_deliver(event_msg); 115 return VMCI_SUCCESS; 116 } 117 118 /* 119 * vmci_event_subscribe() - Subscribe to a given event. 120 * @event: The event to subscribe to. 121 * @callback: The callback to invoke upon the event. 122 * @callback_data: Data to pass to the callback. 123 * @subscription_id: ID used to track subscription. Used with 124 * vmci_event_unsubscribe() 125 * 126 * Subscribes to the provided event. The callback specified will be 127 * fired from RCU critical section and therefore must not sleep. 128 */ 129 int vmci_event_subscribe(u32 event, 130 vmci_event_cb callback, 131 void *callback_data, 132 u32 *new_subscription_id) 133 { 134 struct vmci_subscription *sub; 135 int attempts; 136 int retval; 137 bool have_new_id = false; 138 139 if (!new_subscription_id) { 140 pr_devel("%s: Invalid subscription (NULL)\n", __func__); 141 return VMCI_ERROR_INVALID_ARGS; 142 } 143 144 if (!VMCI_EVENT_VALID(event) || !callback) { 145 pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n", 146 __func__, event, callback, callback_data); 147 return VMCI_ERROR_INVALID_ARGS; 148 } 149 150 sub = kzalloc(sizeof(*sub), GFP_KERNEL); 151 if (!sub) 152 return VMCI_ERROR_NO_MEM; 153 154 sub->id = VMCI_EVENT_MAX; 155 sub->event = event; 156 sub->callback = callback; 157 sub->callback_data = callback_data; 158 INIT_LIST_HEAD(&sub->node); 159 160 mutex_lock(&subscriber_mutex); 161 162 /* Creation of a new event is always allowed. */ 163 for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) { 164 static u32 subscription_id; 165 /* 166 * We try to get an id a couple of time before 167 * claiming we are out of resources. 168 */ 169 170 /* Test for duplicate id. */ 171 if (!event_find(++subscription_id)) { 172 sub->id = subscription_id; 173 have_new_id = true; 174 break; 175 } 176 } 177 178 if (have_new_id) { 179 list_add_rcu(&sub->node, &subscriber_array[event]); 180 retval = VMCI_SUCCESS; 181 } else { 182 retval = VMCI_ERROR_NO_RESOURCES; 183 } 184 185 mutex_unlock(&subscriber_mutex); 186 187 *new_subscription_id = sub->id; 188 return retval; 189 } 190 EXPORT_SYMBOL_GPL(vmci_event_subscribe); 191 192 /* 193 * vmci_event_unsubscribe() - unsubscribe from an event. 194 * @sub_id: A subscription ID as provided by vmci_event_subscribe() 195 * 196 * Unsubscribe from given event. Removes it from list and frees it. 197 * Will return callback_data if requested by caller. 198 */ 199 int vmci_event_unsubscribe(u32 sub_id) 200 { 201 struct vmci_subscription *s; 202 203 mutex_lock(&subscriber_mutex); 204 s = event_find(sub_id); 205 if (s) 206 list_del_rcu(&s->node); 207 mutex_unlock(&subscriber_mutex); 208 209 if (!s) 210 return VMCI_ERROR_NOT_FOUND; 211 212 synchronize_rcu(); 213 kfree(s); 214 215 return VMCI_SUCCESS; 216 } 217 EXPORT_SYMBOL_GPL(vmci_event_unsubscribe); 218