Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * VMware VMCI Driver
  4 *
  5 * Copyright (C) 2012 VMware, Inc. All rights reserved.
  6 */
  7
  8#include <linux/vmw_vmci_defs.h>
  9#include <linux/vmw_vmci_api.h>
 10#include <linux/list.h>
 11#include <linux/module.h>
 
 12#include <linux/sched.h>
 13#include <linux/slab.h>
 14#include <linux/rculist.h>
 15
 16#include "vmci_driver.h"
 17#include "vmci_event.h"
 18
 19#define EVENT_MAGIC 0xEABE0000
 20#define VMCI_EVENT_MAX_ATTEMPTS 10
 21
 22struct vmci_subscription {
 23	u32 id;
 24	u32 event;
 25	vmci_event_cb callback;
 26	void *callback_data;
 27	struct list_head node;	/* on one of subscriber lists */
 28};
 29
 30static struct list_head subscriber_array[VMCI_EVENT_MAX];
 31static DEFINE_MUTEX(subscriber_mutex);
 32
 33int __init vmci_event_init(void)
 34{
 35	int i;
 36
 37	for (i = 0; i < VMCI_EVENT_MAX; i++)
 38		INIT_LIST_HEAD(&subscriber_array[i]);
 39
 40	return VMCI_SUCCESS;
 41}
 42
 43void vmci_event_exit(void)
 44{
 45	int e;
 46
 47	/* We free all memory at exit. */
 48	for (e = 0; e < VMCI_EVENT_MAX; e++) {
 49		struct vmci_subscription *cur, *p2;
 50		list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
 51
 52			/*
 53			 * We should never get here because all events
 54			 * should have been unregistered before we try
 55			 * to unload the driver module.
 56			 */
 57			pr_warn("Unexpected free events occurring\n");
 58			list_del(&cur->node);
 59			kfree(cur);
 60		}
 61	}
 62}
 63
 64/*
 65 * Find entry. Assumes subscriber_mutex is held.
 66 */
 67static struct vmci_subscription *event_find(u32 sub_id)
 68{
 69	int e;
 70
 71	for (e = 0; e < VMCI_EVENT_MAX; e++) {
 72		struct vmci_subscription *cur;
 73		list_for_each_entry(cur, &subscriber_array[e], node) {
 74			if (cur->id == sub_id)
 75				return cur;
 76		}
 77	}
 78	return NULL;
 79}
 80
 81/*
 82 * Actually delivers the events to the subscribers.
 83 * The callback function for each subscriber is invoked.
 84 */
 85static void event_deliver(struct vmci_event_msg *event_msg)
 86{
 87	struct vmci_subscription *cur;
 88	struct list_head *subscriber_list;
 
 89
 90	rcu_read_lock();
 91	subscriber_list = &subscriber_array[event_msg->event_data.event];
 
 
 92	list_for_each_entry_rcu(cur, subscriber_list, node) {
 93		cur->callback(cur->id, &event_msg->event_data,
 94			      cur->callback_data);
 95	}
 96	rcu_read_unlock();
 97}
 98
 99/*
100 * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
101 * subscribers for given event.
102 */
103int vmci_event_dispatch(struct vmci_datagram *msg)
104{
105	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
106
107	if (msg->payload_size < sizeof(u32) ||
108	    msg->payload_size > sizeof(struct vmci_event_data_max))
109		return VMCI_ERROR_INVALID_ARGS;
110
111	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
112		return VMCI_ERROR_EVENT_UNKNOWN;
113
114	event_deliver(event_msg);
115	return VMCI_SUCCESS;
116}
117
118/*
119 * vmci_event_subscribe() - Subscribe to a given event.
120 * @event:      The event to subscribe to.
121 * @callback:   The callback to invoke upon the event.
122 * @callback_data:      Data to pass to the callback.
123 * @subscription_id:    ID used to track subscription.  Used with
124 *              vmci_event_unsubscribe()
125 *
126 * Subscribes to the provided event. The callback specified will be
127 * fired from RCU critical section and therefore must not sleep.
128 */
129int vmci_event_subscribe(u32 event,
130			 vmci_event_cb callback,
131			 void *callback_data,
132			 u32 *new_subscription_id)
133{
134	struct vmci_subscription *sub;
135	int attempts;
136	int retval;
137	bool have_new_id = false;
138
139	if (!new_subscription_id) {
140		pr_devel("%s: Invalid subscription (NULL)\n", __func__);
141		return VMCI_ERROR_INVALID_ARGS;
142	}
143
144	if (!VMCI_EVENT_VALID(event) || !callback) {
145		pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
146			 __func__, event, callback, callback_data);
147		return VMCI_ERROR_INVALID_ARGS;
148	}
149
150	sub = kzalloc(sizeof(*sub), GFP_KERNEL);
151	if (!sub)
152		return VMCI_ERROR_NO_MEM;
153
154	sub->id = VMCI_EVENT_MAX;
155	sub->event = event;
156	sub->callback = callback;
157	sub->callback_data = callback_data;
158	INIT_LIST_HEAD(&sub->node);
159
160	mutex_lock(&subscriber_mutex);
161
162	/* Creation of a new event is always allowed. */
163	for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
164		static u32 subscription_id;
165		/*
166		 * We try to get an id a couple of time before
167		 * claiming we are out of resources.
168		 */
169
170		/* Test for duplicate id. */
171		if (!event_find(++subscription_id)) {
172			sub->id = subscription_id;
173			have_new_id = true;
174			break;
175		}
176	}
177
178	if (have_new_id) {
179		list_add_rcu(&sub->node, &subscriber_array[event]);
180		retval = VMCI_SUCCESS;
181	} else {
182		retval = VMCI_ERROR_NO_RESOURCES;
183	}
184
185	mutex_unlock(&subscriber_mutex);
186
187	*new_subscription_id = sub->id;
188	return retval;
189}
190EXPORT_SYMBOL_GPL(vmci_event_subscribe);
191
192/*
193 * vmci_event_unsubscribe() - unsubscribe from an event.
194 * @sub_id:     A subscription ID as provided by vmci_event_subscribe()
195 *
196 * Unsubscribe from given event. Removes it from list and frees it.
197 * Will return callback_data if requested by caller.
198 */
199int vmci_event_unsubscribe(u32 sub_id)
200{
201	struct vmci_subscription *s;
202
203	mutex_lock(&subscriber_mutex);
204	s = event_find(sub_id);
205	if (s)
206		list_del_rcu(&s->node);
207	mutex_unlock(&subscriber_mutex);
208
209	if (!s)
210		return VMCI_ERROR_NOT_FOUND;
211
212	kvfree_rcu_mightsleep(s);
213
214	return VMCI_SUCCESS;
215}
216EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * VMware VMCI Driver
  4 *
  5 * Copyright (C) 2012 VMware, Inc. All rights reserved.
  6 */
  7
  8#include <linux/vmw_vmci_defs.h>
  9#include <linux/vmw_vmci_api.h>
 10#include <linux/list.h>
 11#include <linux/module.h>
 12#include <linux/nospec.h>
 13#include <linux/sched.h>
 14#include <linux/slab.h>
 15#include <linux/rculist.h>
 16
 17#include "vmci_driver.h"
 18#include "vmci_event.h"
 19
 20#define EVENT_MAGIC 0xEABE0000
 21#define VMCI_EVENT_MAX_ATTEMPTS 10
 22
 23struct vmci_subscription {
 24	u32 id;
 25	u32 event;
 26	vmci_event_cb callback;
 27	void *callback_data;
 28	struct list_head node;	/* on one of subscriber lists */
 29};
 30
 31static struct list_head subscriber_array[VMCI_EVENT_MAX];
 32static DEFINE_MUTEX(subscriber_mutex);
 33
 34int __init vmci_event_init(void)
 35{
 36	int i;
 37
 38	for (i = 0; i < VMCI_EVENT_MAX; i++)
 39		INIT_LIST_HEAD(&subscriber_array[i]);
 40
 41	return VMCI_SUCCESS;
 42}
 43
 44void vmci_event_exit(void)
 45{
 46	int e;
 47
 48	/* We free all memory at exit. */
 49	for (e = 0; e < VMCI_EVENT_MAX; e++) {
 50		struct vmci_subscription *cur, *p2;
 51		list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
 52
 53			/*
 54			 * We should never get here because all events
 55			 * should have been unregistered before we try
 56			 * to unload the driver module.
 57			 */
 58			pr_warn("Unexpected free events occurring\n");
 59			list_del(&cur->node);
 60			kfree(cur);
 61		}
 62	}
 63}
 64
 65/*
 66 * Find entry. Assumes subscriber_mutex is held.
 67 */
 68static struct vmci_subscription *event_find(u32 sub_id)
 69{
 70	int e;
 71
 72	for (e = 0; e < VMCI_EVENT_MAX; e++) {
 73		struct vmci_subscription *cur;
 74		list_for_each_entry(cur, &subscriber_array[e], node) {
 75			if (cur->id == sub_id)
 76				return cur;
 77		}
 78	}
 79	return NULL;
 80}
 81
 82/*
 83 * Actually delivers the events to the subscribers.
 84 * The callback function for each subscriber is invoked.
 85 */
 86static void event_deliver(struct vmci_event_msg *event_msg)
 87{
 88	struct vmci_subscription *cur;
 89	struct list_head *subscriber_list;
 90	u32 sanitized_event, max_vmci_event;
 91
 92	rcu_read_lock();
 93	max_vmci_event = ARRAY_SIZE(subscriber_array);
 94	sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event);
 95	subscriber_list = &subscriber_array[sanitized_event];
 96	list_for_each_entry_rcu(cur, subscriber_list, node) {
 97		cur->callback(cur->id, &event_msg->event_data,
 98			      cur->callback_data);
 99	}
100	rcu_read_unlock();
101}
102
103/*
104 * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
105 * subscribers for given event.
106 */
107int vmci_event_dispatch(struct vmci_datagram *msg)
108{
109	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
110
111	if (msg->payload_size < sizeof(u32) ||
112	    msg->payload_size > sizeof(struct vmci_event_data_max))
113		return VMCI_ERROR_INVALID_ARGS;
114
115	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
116		return VMCI_ERROR_EVENT_UNKNOWN;
117
118	event_deliver(event_msg);
119	return VMCI_SUCCESS;
120}
121
122/*
123 * vmci_event_subscribe() - Subscribe to a given event.
124 * @event:      The event to subscribe to.
125 * @callback:   The callback to invoke upon the event.
126 * @callback_data:      Data to pass to the callback.
127 * @subscription_id:    ID used to track subscription.  Used with
128 *              vmci_event_unsubscribe()
129 *
130 * Subscribes to the provided event. The callback specified will be
131 * fired from RCU critical section and therefore must not sleep.
132 */
133int vmci_event_subscribe(u32 event,
134			 vmci_event_cb callback,
135			 void *callback_data,
136			 u32 *new_subscription_id)
137{
138	struct vmci_subscription *sub;
139	int attempts;
140	int retval;
141	bool have_new_id = false;
142
143	if (!new_subscription_id) {
144		pr_devel("%s: Invalid subscription (NULL)\n", __func__);
145		return VMCI_ERROR_INVALID_ARGS;
146	}
147
148	if (!VMCI_EVENT_VALID(event) || !callback) {
149		pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
150			 __func__, event, callback, callback_data);
151		return VMCI_ERROR_INVALID_ARGS;
152	}
153
154	sub = kzalloc(sizeof(*sub), GFP_KERNEL);
155	if (!sub)
156		return VMCI_ERROR_NO_MEM;
157
158	sub->id = VMCI_EVENT_MAX;
159	sub->event = event;
160	sub->callback = callback;
161	sub->callback_data = callback_data;
162	INIT_LIST_HEAD(&sub->node);
163
164	mutex_lock(&subscriber_mutex);
165
166	/* Creation of a new event is always allowed. */
167	for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
168		static u32 subscription_id;
169		/*
170		 * We try to get an id a couple of time before
171		 * claiming we are out of resources.
172		 */
173
174		/* Test for duplicate id. */
175		if (!event_find(++subscription_id)) {
176			sub->id = subscription_id;
177			have_new_id = true;
178			break;
179		}
180	}
181
182	if (have_new_id) {
183		list_add_rcu(&sub->node, &subscriber_array[event]);
184		retval = VMCI_SUCCESS;
185	} else {
186		retval = VMCI_ERROR_NO_RESOURCES;
187	}
188
189	mutex_unlock(&subscriber_mutex);
190
191	*new_subscription_id = sub->id;
192	return retval;
193}
194EXPORT_SYMBOL_GPL(vmci_event_subscribe);
195
196/*
197 * vmci_event_unsubscribe() - unsubscribe from an event.
198 * @sub_id:     A subscription ID as provided by vmci_event_subscribe()
199 *
200 * Unsubscribe from given event. Removes it from list and frees it.
201 * Will return callback_data if requested by caller.
202 */
203int vmci_event_unsubscribe(u32 sub_id)
204{
205	struct vmci_subscription *s;
206
207	mutex_lock(&subscriber_mutex);
208	s = event_find(sub_id);
209	if (s)
210		list_del_rcu(&s->node);
211	mutex_unlock(&subscriber_mutex);
212
213	if (!s)
214		return VMCI_ERROR_NOT_FOUND;
215
216	kvfree_rcu_mightsleep(s);
217
218	return VMCI_SUCCESS;
219}
220EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);