Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * 	cn_queue.c
  3 *
  4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  5 * All rights reserved.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License as published by
  9 * the Free Software Foundation; either version 2 of the License, or
 10 * (at your option) any later version.
 11 *
 12 * This program is distributed in the hope that it will be useful,
 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, write to the Free Software
 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 20 *
 21 */
 22
 23#include <linux/kernel.h>
 24#include <linux/module.h>
 25#include <linux/list.h>
 26#include <linux/workqueue.h>
 27#include <linux/spinlock.h>
 28#include <linux/slab.h>
 29#include <linux/skbuff.h>
 30#include <linux/suspend.h>
 31#include <linux/connector.h>
 32#include <linux/delay.h>
 33
 34static struct cn_callback_entry *
 35cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
 36			      struct cb_id *id,
 37			      void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
 
 38{
 39	struct cn_callback_entry *cbq;
 40
 41	cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
 42	if (!cbq) {
 43		printk(KERN_ERR "Failed to create new callback queue.\n");
 44		return NULL;
 45	}
 46
 47	atomic_set(&cbq->refcnt, 1);
 48
 49	atomic_inc(&dev->refcnt);
 50	cbq->pdev = dev;
 51
 52	snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
 53	memcpy(&cbq->id.id, id, sizeof(struct cb_id));
 54	cbq->callback = callback;
 55	return cbq;
 56}
 57
 58void cn_queue_release_callback(struct cn_callback_entry *cbq)
 59{
 60	if (!atomic_dec_and_test(&cbq->refcnt))
 61		return;
 62
 63	atomic_dec(&cbq->pdev->refcnt);
 64	kfree(cbq);
 65}
 66
 67int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
 68{
 69	return ((i1->idx == i2->idx) && (i1->val == i2->val));
 70}
 71
 72int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
 73			  struct cb_id *id,
 74			  void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
 
 75{
 76	struct cn_callback_entry *cbq, *__cbq;
 77	int found = 0;
 78
 79	cbq = cn_queue_alloc_callback_entry(dev, name, id, callback);
 80	if (!cbq)
 81		return -ENOMEM;
 82
 83	spin_lock_bh(&dev->queue_lock);
 84	list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
 85		if (cn_cb_equal(&__cbq->id.id, id)) {
 86			found = 1;
 87			break;
 88		}
 89	}
 90	if (!found)
 91		list_add_tail(&cbq->callback_entry, &dev->queue_list);
 92	spin_unlock_bh(&dev->queue_lock);
 93
 94	if (found) {
 95		cn_queue_release_callback(cbq);
 96		return -EINVAL;
 97	}
 98
 99	cbq->seq = 0;
100	cbq->group = cbq->id.id.idx;
101
102	return 0;
103}
104
105void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
106{
107	struct cn_callback_entry *cbq, *n;
108	int found = 0;
109
110	spin_lock_bh(&dev->queue_lock);
111	list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
112		if (cn_cb_equal(&cbq->id.id, id)) {
113			list_del(&cbq->callback_entry);
114			found = 1;
115			break;
116		}
117	}
118	spin_unlock_bh(&dev->queue_lock);
119
120	if (found)
121		cn_queue_release_callback(cbq);
122}
123
124struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
125{
126	struct cn_queue_dev *dev;
127
128	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
129	if (!dev)
130		return NULL;
131
132	snprintf(dev->name, sizeof(dev->name), "%s", name);
133	atomic_set(&dev->refcnt, 0);
134	INIT_LIST_HEAD(&dev->queue_list);
135	spin_lock_init(&dev->queue_lock);
136
137	dev->nls = nls;
138
139	return dev;
140}
141
142void cn_queue_free_dev(struct cn_queue_dev *dev)
143{
144	struct cn_callback_entry *cbq, *n;
145
146	spin_lock_bh(&dev->queue_lock);
147	list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
148		list_del(&cbq->callback_entry);
149	spin_unlock_bh(&dev->queue_lock);
150
151	while (atomic_read(&dev->refcnt)) {
152		printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n",
153		       dev->name, atomic_read(&dev->refcnt));
154		msleep(1000);
155	}
156
157	kfree(dev);
158	dev = NULL;
159}
v3.15
  1/*
  2 *	cn_queue.c
  3 *
  4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
  5 * All rights reserved.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License as published by
  9 * the Free Software Foundation; either version 2 of the License, or
 10 * (at your option) any later version.
 11 *
 12 * This program is distributed in the hope that it will be useful,
 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, write to the Free Software
 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 20 *
 21 */
 22
 23#include <linux/kernel.h>
 24#include <linux/module.h>
 25#include <linux/list.h>
 26#include <linux/workqueue.h>
 27#include <linux/spinlock.h>
 28#include <linux/slab.h>
 29#include <linux/skbuff.h>
 30#include <linux/suspend.h>
 31#include <linux/connector.h>
 32#include <linux/delay.h>
 33
 34static struct cn_callback_entry *
 35cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
 36			      struct cb_id *id,
 37			      void (*callback)(struct cn_msg *,
 38					       struct netlink_skb_parms *))
 39{
 40	struct cn_callback_entry *cbq;
 41
 42	cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
 43	if (!cbq) {
 44		pr_err("Failed to create new callback queue.\n");
 45		return NULL;
 46	}
 47
 48	atomic_set(&cbq->refcnt, 1);
 49
 50	atomic_inc(&dev->refcnt);
 51	cbq->pdev = dev;
 52
 53	snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
 54	memcpy(&cbq->id.id, id, sizeof(struct cb_id));
 55	cbq->callback = callback;
 56	return cbq;
 57}
 58
 59void cn_queue_release_callback(struct cn_callback_entry *cbq)
 60{
 61	if (!atomic_dec_and_test(&cbq->refcnt))
 62		return;
 63
 64	atomic_dec(&cbq->pdev->refcnt);
 65	kfree(cbq);
 66}
 67
 68int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
 69{
 70	return ((i1->idx == i2->idx) && (i1->val == i2->val));
 71}
 72
 73int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
 74			  struct cb_id *id,
 75			  void (*callback)(struct cn_msg *,
 76					   struct netlink_skb_parms *))
 77{
 78	struct cn_callback_entry *cbq, *__cbq;
 79	int found = 0;
 80
 81	cbq = cn_queue_alloc_callback_entry(dev, name, id, callback);
 82	if (!cbq)
 83		return -ENOMEM;
 84
 85	spin_lock_bh(&dev->queue_lock);
 86	list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
 87		if (cn_cb_equal(&__cbq->id.id, id)) {
 88			found = 1;
 89			break;
 90		}
 91	}
 92	if (!found)
 93		list_add_tail(&cbq->callback_entry, &dev->queue_list);
 94	spin_unlock_bh(&dev->queue_lock);
 95
 96	if (found) {
 97		cn_queue_release_callback(cbq);
 98		return -EINVAL;
 99	}
100
101	cbq->seq = 0;
102	cbq->group = cbq->id.id.idx;
103
104	return 0;
105}
106
107void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
108{
109	struct cn_callback_entry *cbq, *n;
110	int found = 0;
111
112	spin_lock_bh(&dev->queue_lock);
113	list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
114		if (cn_cb_equal(&cbq->id.id, id)) {
115			list_del(&cbq->callback_entry);
116			found = 1;
117			break;
118		}
119	}
120	spin_unlock_bh(&dev->queue_lock);
121
122	if (found)
123		cn_queue_release_callback(cbq);
124}
125
126struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
127{
128	struct cn_queue_dev *dev;
129
130	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
131	if (!dev)
132		return NULL;
133
134	snprintf(dev->name, sizeof(dev->name), "%s", name);
135	atomic_set(&dev->refcnt, 0);
136	INIT_LIST_HEAD(&dev->queue_list);
137	spin_lock_init(&dev->queue_lock);
138
139	dev->nls = nls;
140
141	return dev;
142}
143
144void cn_queue_free_dev(struct cn_queue_dev *dev)
145{
146	struct cn_callback_entry *cbq, *n;
147
148	spin_lock_bh(&dev->queue_lock);
149	list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
150		list_del(&cbq->callback_entry);
151	spin_unlock_bh(&dev->queue_lock);
152
153	while (atomic_read(&dev->refcnt)) {
154		pr_info("Waiting for %s to become free: refcnt=%d.\n",
155		       dev->name, atomic_read(&dev->refcnt));
156		msleep(1000);
157	}
158
159	kfree(dev);
160	dev = NULL;
161}