Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * VMware VMCI Driver
  4 *
  5 * Copyright (C) 2012 VMware, Inc. All rights reserved.
  6 */
  7
  8#include <linux/vmw_vmci_defs.h>
  9#include <linux/hash.h>
 10#include <linux/types.h>
 11#include <linux/rculist.h>
 12#include <linux/completion.h>
 13
 14#include "vmci_resource.h"
 15#include "vmci_driver.h"
 16
 17
 18#define VMCI_RESOURCE_HASH_BITS         7
 19#define VMCI_RESOURCE_HASH_BUCKETS      (1 << VMCI_RESOURCE_HASH_BITS)
 20
 21struct vmci_hash_table {
 22	spinlock_t lock;
 23	struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
 24};
 25
 26static struct vmci_hash_table vmci_resource_table = {
 27	.lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
 28};
 29
 30static unsigned int vmci_resource_hash(struct vmci_handle handle)
 31{
 32	return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
 33}
 34
 35/*
 36 * Gets a resource (if one exists) matching given handle from the hash table.
 37 */
 38static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
 39						  enum vmci_resource_type type)
 40{
 41	struct vmci_resource *r, *resource = NULL;
 42	unsigned int idx = vmci_resource_hash(handle);
 43
 44	rcu_read_lock();
 45	hlist_for_each_entry_rcu(r,
 46				 &vmci_resource_table.entries[idx], node) {
 47		u32 cid = r->handle.context;
 48		u32 rid = r->handle.resource;
 49
 50		if (r->type == type &&
 51		    rid == handle.resource &&
 52		    (cid == handle.context || cid == VMCI_INVALID_ID ||
 53		     handle.context == VMCI_INVALID_ID)) {
 54			resource = r;
 55			break;
 56		}
 57	}
 58	rcu_read_unlock();
 59
 60	return resource;
 61}
 62
 63/*
 64 * Find an unused resource ID and return it. The first
 65 * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
 66 * its value + 1.
 67 * Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
 68 */
 69static u32 vmci_resource_find_id(u32 context_id,
 70				 enum vmci_resource_type resource_type)
 71{
 72	static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
 73	u32 old_rid = resource_id;
 74	u32 current_rid;
 75
 76	/*
 77	 * Generate a unique resource ID.  Keep on trying until we wrap around
 78	 * in the RID space.
 79	 */
 80	do {
 81		struct vmci_handle handle;
 82
 83		current_rid = resource_id;
 84		resource_id++;
 85		if (unlikely(resource_id == VMCI_INVALID_ID)) {
 86			/* Skip the reserved rids. */
 87			resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
 88		}
 89
 90		handle = vmci_make_handle(context_id, current_rid);
 91		if (!vmci_resource_lookup(handle, resource_type))
 92			return current_rid;
 93	} while (resource_id != old_rid);
 94
 95	return VMCI_INVALID_ID;
 96}
 97
 98
 99int vmci_resource_add(struct vmci_resource *resource,
100		      enum vmci_resource_type resource_type,
101		      struct vmci_handle handle)
102
103{
104	unsigned int idx;
105	int result;
106
107	spin_lock(&vmci_resource_table.lock);
108
109	if (handle.resource == VMCI_INVALID_ID) {
110		handle.resource = vmci_resource_find_id(handle.context,
111			resource_type);
112		if (handle.resource == VMCI_INVALID_ID) {
113			result = VMCI_ERROR_NO_HANDLE;
114			goto out;
115		}
116	} else if (vmci_resource_lookup(handle, resource_type)) {
117		result = VMCI_ERROR_ALREADY_EXISTS;
118		goto out;
119	}
120
121	resource->handle = handle;
122	resource->type = resource_type;
123	INIT_HLIST_NODE(&resource->node);
124	kref_init(&resource->kref);
125	init_completion(&resource->done);
126
127	idx = vmci_resource_hash(resource->handle);
128	hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
129
130	result = VMCI_SUCCESS;
131
132out:
133	spin_unlock(&vmci_resource_table.lock);
134	return result;
135}
136
137void vmci_resource_remove(struct vmci_resource *resource)
138{
139	struct vmci_handle handle = resource->handle;
140	unsigned int idx = vmci_resource_hash(handle);
141	struct vmci_resource *r;
142
143	/* Remove resource from hash table. */
144	spin_lock(&vmci_resource_table.lock);
145
146	hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
147		if (vmci_handle_is_equal(r->handle, resource->handle)) {
148			hlist_del_init_rcu(&r->node);
149			break;
150		}
151	}
152
153	spin_unlock(&vmci_resource_table.lock);
154	synchronize_rcu();
155
156	vmci_resource_put(resource);
157	wait_for_completion(&resource->done);
158}
159
160struct vmci_resource *
161vmci_resource_by_handle(struct vmci_handle resource_handle,
162			enum vmci_resource_type resource_type)
163{
164	struct vmci_resource *r, *resource = NULL;
165
166	rcu_read_lock();
167
168	r = vmci_resource_lookup(resource_handle, resource_type);
169	if (r &&
170	    (resource_type == r->type ||
171	     resource_type == VMCI_RESOURCE_TYPE_ANY)) {
172		resource = vmci_resource_get(r);
173	}
174
175	rcu_read_unlock();
176
177	return resource;
178}
179
180/*
181 * Get a reference to given resource.
182 */
183struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
184{
185	kref_get(&resource->kref);
186
187	return resource;
188}
189
190static void vmci_release_resource(struct kref *kref)
191{
192	struct vmci_resource *resource =
193		container_of(kref, struct vmci_resource, kref);
194
195	/* Verify the resource has been unlinked from hash table */
196	WARN_ON(!hlist_unhashed(&resource->node));
197
198	/* Signal that container of this resource can now be destroyed */
199	complete(&resource->done);
200}
201
202/*
203 * Resource's release function will get called if last reference.
204 * If it is the last reference, then we are sure that nobody else
205 * can increment the count again (it's gone from the resource hash
206 * table), so there's no need for locking here.
207 */
208int vmci_resource_put(struct vmci_resource *resource)
209{
210	/*
211	 * We propagate the information back to caller in case it wants to know
212	 * whether entry was freed.
213	 */
214	return kref_put(&resource->kref, vmci_release_resource) ?
215		VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
216}
217
218struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
219{
220	return resource->handle;
221}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * VMware VMCI Driver
  4 *
  5 * Copyright (C) 2012 VMware, Inc. All rights reserved.
  6 */
  7
  8#include <linux/vmw_vmci_defs.h>
  9#include <linux/hash.h>
 10#include <linux/types.h>
 11#include <linux/rculist.h>
 12#include <linux/completion.h>
 13
 14#include "vmci_resource.h"
 15#include "vmci_driver.h"
 16
 17
 18#define VMCI_RESOURCE_HASH_BITS         7
 19#define VMCI_RESOURCE_HASH_BUCKETS      (1 << VMCI_RESOURCE_HASH_BITS)
 20
 21struct vmci_hash_table {
 22	spinlock_t lock;
 23	struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
 24};
 25
 26static struct vmci_hash_table vmci_resource_table = {
 27	.lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
 28};
 29
 30static unsigned int vmci_resource_hash(struct vmci_handle handle)
 31{
 32	return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
 33}
 34
 35/*
 36 * Gets a resource (if one exists) matching given handle from the hash table.
 37 */
 38static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
 39						  enum vmci_resource_type type)
 40{
 41	struct vmci_resource *r, *resource = NULL;
 42	unsigned int idx = vmci_resource_hash(handle);
 43
 44	rcu_read_lock();
 45	hlist_for_each_entry_rcu(r,
 46				 &vmci_resource_table.entries[idx], node) {
 47		u32 cid = r->handle.context;
 48		u32 rid = r->handle.resource;
 49
 50		if (r->type == type &&
 51		    rid == handle.resource &&
 52		    (cid == handle.context || cid == VMCI_INVALID_ID ||
 53		     handle.context == VMCI_INVALID_ID)) {
 54			resource = r;
 55			break;
 56		}
 57	}
 58	rcu_read_unlock();
 59
 60	return resource;
 61}
 62
 63/*
 64 * Find an unused resource ID and return it. The first
 65 * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
 66 * its value + 1.
 67 * Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
 68 */
 69static u32 vmci_resource_find_id(u32 context_id,
 70				 enum vmci_resource_type resource_type)
 71{
 72	static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
 73	u32 old_rid = resource_id;
 74	u32 current_rid;
 75
 76	/*
 77	 * Generate a unique resource ID.  Keep on trying until we wrap around
 78	 * in the RID space.
 79	 */
 80	do {
 81		struct vmci_handle handle;
 82
 83		current_rid = resource_id;
 84		resource_id++;
 85		if (unlikely(resource_id == VMCI_INVALID_ID)) {
 86			/* Skip the reserved rids. */
 87			resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
 88		}
 89
 90		handle = vmci_make_handle(context_id, current_rid);
 91		if (!vmci_resource_lookup(handle, resource_type))
 92			return current_rid;
 93	} while (resource_id != old_rid);
 94
 95	return VMCI_INVALID_ID;
 96}
 97
 98
 99int vmci_resource_add(struct vmci_resource *resource,
100		      enum vmci_resource_type resource_type,
101		      struct vmci_handle handle)
102
103{
104	unsigned int idx;
105	int result;
106
107	spin_lock(&vmci_resource_table.lock);
108
109	if (handle.resource == VMCI_INVALID_ID) {
110		handle.resource = vmci_resource_find_id(handle.context,
111			resource_type);
112		if (handle.resource == VMCI_INVALID_ID) {
113			result = VMCI_ERROR_NO_HANDLE;
114			goto out;
115		}
116	} else if (vmci_resource_lookup(handle, resource_type)) {
117		result = VMCI_ERROR_ALREADY_EXISTS;
118		goto out;
119	}
120
121	resource->handle = handle;
122	resource->type = resource_type;
123	INIT_HLIST_NODE(&resource->node);
124	kref_init(&resource->kref);
125	init_completion(&resource->done);
126
127	idx = vmci_resource_hash(resource->handle);
128	hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
129
130	result = VMCI_SUCCESS;
131
132out:
133	spin_unlock(&vmci_resource_table.lock);
134	return result;
135}
136
137void vmci_resource_remove(struct vmci_resource *resource)
138{
139	struct vmci_handle handle = resource->handle;
140	unsigned int idx = vmci_resource_hash(handle);
141	struct vmci_resource *r;
142
143	/* Remove resource from hash table. */
144	spin_lock(&vmci_resource_table.lock);
145
146	hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
147		if (vmci_handle_is_equal(r->handle, resource->handle)) {
148			hlist_del_init_rcu(&r->node);
149			break;
150		}
151	}
152
153	spin_unlock(&vmci_resource_table.lock);
154	synchronize_rcu();
155
156	vmci_resource_put(resource);
157	wait_for_completion(&resource->done);
158}
159
160struct vmci_resource *
161vmci_resource_by_handle(struct vmci_handle resource_handle,
162			enum vmci_resource_type resource_type)
163{
164	struct vmci_resource *r, *resource = NULL;
165
166	rcu_read_lock();
167
168	r = vmci_resource_lookup(resource_handle, resource_type);
169	if (r &&
170	    (resource_type == r->type ||
171	     resource_type == VMCI_RESOURCE_TYPE_ANY)) {
172		resource = vmci_resource_get(r);
173	}
174
175	rcu_read_unlock();
176
177	return resource;
178}
179
180/*
181 * Get a reference to given resource.
182 */
183struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
184{
185	kref_get(&resource->kref);
186
187	return resource;
188}
189
190static void vmci_release_resource(struct kref *kref)
191{
192	struct vmci_resource *resource =
193		container_of(kref, struct vmci_resource, kref);
194
195	/* Verify the resource has been unlinked from hash table */
196	WARN_ON(!hlist_unhashed(&resource->node));
197
198	/* Signal that container of this resource can now be destroyed */
199	complete(&resource->done);
200}
201
202/*
203 * Resource's release function will get called if last reference.
204 * If it is the last reference, then we are sure that nobody else
205 * can increment the count again (it's gone from the resource hash
206 * table), so there's no need for locking here.
207 */
208int vmci_resource_put(struct vmci_resource *resource)
209{
210	/*
211	 * We propagate the information back to caller in case it wants to know
212	 * whether entry was freed.
213	 */
214	return kref_put(&resource->kref, vmci_release_resource) ?
215		VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
216}
217
218struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
219{
220	return resource->handle;
221}