Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6/*
  7 * This driver supports an interface for DCA clients and providers to meet.
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/notifier.h>
 12#include <linux/device.h>
 13#include <linux/dca.h>
 14#include <linux/slab.h>
 15#include <linux/module.h>
 16
 17#define DCA_VERSION "1.12.1"
 18
 19MODULE_VERSION(DCA_VERSION);
 20MODULE_LICENSE("GPL");
 21MODULE_AUTHOR("Intel Corporation");
 22
 23static DEFINE_RAW_SPINLOCK(dca_lock);
 24
 25static LIST_HEAD(dca_domains);
 26
 27static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
 28
 29static int dca_providers_blocked;
 30
 31static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
 32{
 33	struct pci_dev *pdev = to_pci_dev(dev);
 34	struct pci_bus *bus = pdev->bus;
 35
 36	while (bus->parent)
 37		bus = bus->parent;
 38
 39	return bus;
 40}
 41
 42static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
 43{
 44	struct dca_domain *domain;
 45
 46	domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
 47	if (!domain)
 48		return NULL;
 49
 50	INIT_LIST_HEAD(&domain->dca_providers);
 51	domain->pci_rc = rc;
 52
 53	return domain;
 54}
 55
 56static void dca_free_domain(struct dca_domain *domain)
 57{
 58	list_del(&domain->node);
 59	kfree(domain);
 60}
 61
 62static int dca_provider_ioat_ver_3_0(struct device *dev)
 63{
 64	struct pci_dev *pdev = to_pci_dev(dev);
 65
 66	return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
 67		((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
 68		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
 69		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
 70		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
 71		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
 72		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
 73		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
 74		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
 75}
 76
 77static void unregister_dca_providers(void)
 78{
 79	struct dca_provider *dca, *_dca;
 80	struct list_head unregistered_providers;
 81	struct dca_domain *domain;
 82	unsigned long flags;
 83
 84	blocking_notifier_call_chain(&dca_provider_chain,
 85				     DCA_PROVIDER_REMOVE, NULL);
 86
 87	INIT_LIST_HEAD(&unregistered_providers);
 88
 89	raw_spin_lock_irqsave(&dca_lock, flags);
 90
 91	if (list_empty(&dca_domains)) {
 92		raw_spin_unlock_irqrestore(&dca_lock, flags);
 93		return;
 94	}
 95
 96	/* at this point only one domain in the list is expected */
 97	domain = list_first_entry(&dca_domains, struct dca_domain, node);
 98
 99	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
100		list_move(&dca->node, &unregistered_providers);
101
102	dca_free_domain(domain);
103
104	raw_spin_unlock_irqrestore(&dca_lock, flags);
105
106	list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
107		dca_sysfs_remove_provider(dca);
108		list_del(&dca->node);
109	}
110}
111
112static struct dca_domain *dca_find_domain(struct pci_bus *rc)
113{
114	struct dca_domain *domain;
115
116	list_for_each_entry(domain, &dca_domains, node)
117		if (domain->pci_rc == rc)
118			return domain;
119
120	return NULL;
121}
122
123static struct dca_domain *dca_get_domain(struct device *dev)
124{
125	struct pci_bus *rc;
126	struct dca_domain *domain;
127
128	rc = dca_pci_rc_from_dev(dev);
129	domain = dca_find_domain(rc);
130
131	if (!domain) {
132		if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
133			dca_providers_blocked = 1;
 
 
 
 
 
134	}
135
136	return domain;
137}
138
139static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
140{
141	struct dca_provider *dca;
142	struct pci_bus *rc;
143	struct dca_domain *domain;
144
145	if (dev) {
146		rc = dca_pci_rc_from_dev(dev);
147		domain = dca_find_domain(rc);
148		if (!domain)
149			return NULL;
150	} else {
151		if (!list_empty(&dca_domains))
152			domain = list_first_entry(&dca_domains,
153						  struct dca_domain,
154						  node);
155		else
156			return NULL;
157	}
158
159	list_for_each_entry(dca, &domain->dca_providers, node)
160		if ((!dev) || (dca->ops->dev_managed(dca, dev)))
161			return dca;
162
163	return NULL;
164}
165
166/**
167 * dca_add_requester - add a dca client to the list
168 * @dev - the device that wants dca service
169 */
170int dca_add_requester(struct device *dev)
171{
172	struct dca_provider *dca;
173	int err, slot = -ENODEV;
174	unsigned long flags;
175	struct pci_bus *pci_rc;
176	struct dca_domain *domain;
177
178	if (!dev)
179		return -EFAULT;
180
181	raw_spin_lock_irqsave(&dca_lock, flags);
182
183	/* check if the requester has not been added already */
184	dca = dca_find_provider_by_dev(dev);
185	if (dca) {
186		raw_spin_unlock_irqrestore(&dca_lock, flags);
187		return -EEXIST;
188	}
189
190	pci_rc = dca_pci_rc_from_dev(dev);
191	domain = dca_find_domain(pci_rc);
192	if (!domain) {
193		raw_spin_unlock_irqrestore(&dca_lock, flags);
194		return -ENODEV;
195	}
196
197	list_for_each_entry(dca, &domain->dca_providers, node) {
198		slot = dca->ops->add_requester(dca, dev);
199		if (slot >= 0)
200			break;
201	}
202
203	raw_spin_unlock_irqrestore(&dca_lock, flags);
204
205	if (slot < 0)
206		return slot;
207
208	err = dca_sysfs_add_req(dca, dev, slot);
209	if (err) {
210		raw_spin_lock_irqsave(&dca_lock, flags);
211		if (dca == dca_find_provider_by_dev(dev))
212			dca->ops->remove_requester(dca, dev);
213		raw_spin_unlock_irqrestore(&dca_lock, flags);
214		return err;
215	}
216
217	return 0;
218}
219EXPORT_SYMBOL_GPL(dca_add_requester);
220
221/**
222 * dca_remove_requester - remove a dca client from the list
223 * @dev - the device that wants dca service
224 */
225int dca_remove_requester(struct device *dev)
226{
227	struct dca_provider *dca;
228	int slot;
229	unsigned long flags;
230
231	if (!dev)
232		return -EFAULT;
233
234	raw_spin_lock_irqsave(&dca_lock, flags);
235	dca = dca_find_provider_by_dev(dev);
236	if (!dca) {
237		raw_spin_unlock_irqrestore(&dca_lock, flags);
238		return -ENODEV;
239	}
240	slot = dca->ops->remove_requester(dca, dev);
241	raw_spin_unlock_irqrestore(&dca_lock, flags);
242
243	if (slot < 0)
244		return slot;
245
246	dca_sysfs_remove_req(dca, slot);
247
248	return 0;
249}
250EXPORT_SYMBOL_GPL(dca_remove_requester);
251
252/**
253 * dca_common_get_tag - return the dca tag (serves both new and old api)
254 * @dev - the device that wants dca service
255 * @cpu - the cpuid as returned by get_cpu()
256 */
257static u8 dca_common_get_tag(struct device *dev, int cpu)
258{
259	struct dca_provider *dca;
260	u8 tag;
261	unsigned long flags;
262
263	raw_spin_lock_irqsave(&dca_lock, flags);
264
265	dca = dca_find_provider_by_dev(dev);
266	if (!dca) {
267		raw_spin_unlock_irqrestore(&dca_lock, flags);
268		return -ENODEV;
269	}
270	tag = dca->ops->get_tag(dca, dev, cpu);
271
272	raw_spin_unlock_irqrestore(&dca_lock, flags);
273	return tag;
274}
275
276/**
277 * dca3_get_tag - return the dca tag to the requester device
278 *                for the given cpu (new api)
279 * @dev - the device that wants dca service
280 * @cpu - the cpuid as returned by get_cpu()
281 */
282u8 dca3_get_tag(struct device *dev, int cpu)
283{
284	if (!dev)
285		return -EFAULT;
286
287	return dca_common_get_tag(dev, cpu);
288}
289EXPORT_SYMBOL_GPL(dca3_get_tag);
290
291/**
292 * dca_get_tag - return the dca tag for the given cpu (old api)
293 * @cpu - the cpuid as returned by get_cpu()
294 */
295u8 dca_get_tag(int cpu)
296{
297	struct device *dev = NULL;
298
299	return dca_common_get_tag(dev, cpu);
300}
301EXPORT_SYMBOL_GPL(dca_get_tag);
302
303/**
304 * alloc_dca_provider - get data struct for describing a dca provider
305 * @ops - pointer to struct of dca operation function pointers
306 * @priv_size - size of extra mem to be added for provider's needs
307 */
308struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
309					int priv_size)
310{
311	struct dca_provider *dca;
312	int alloc_size;
313
314	alloc_size = (sizeof(*dca) + priv_size);
315	dca = kzalloc(alloc_size, GFP_KERNEL);
316	if (!dca)
317		return NULL;
318	dca->ops = ops;
319
320	return dca;
321}
322EXPORT_SYMBOL_GPL(alloc_dca_provider);
323
324/**
325 * free_dca_provider - release the dca provider data struct
326 * @ops - pointer to struct of dca operation function pointers
327 * @priv_size - size of extra mem to be added for provider's needs
328 */
329void free_dca_provider(struct dca_provider *dca)
330{
331	kfree(dca);
332}
333EXPORT_SYMBOL_GPL(free_dca_provider);
334
335/**
336 * register_dca_provider - register a dca provider
337 * @dca - struct created by alloc_dca_provider()
338 * @dev - device providing dca services
339 */
340int register_dca_provider(struct dca_provider *dca, struct device *dev)
341{
342	int err;
343	unsigned long flags;
344	struct dca_domain *domain, *newdomain = NULL;
345
346	raw_spin_lock_irqsave(&dca_lock, flags);
347	if (dca_providers_blocked) {
348		raw_spin_unlock_irqrestore(&dca_lock, flags);
349		return -ENODEV;
350	}
351	raw_spin_unlock_irqrestore(&dca_lock, flags);
352
353	err = dca_sysfs_add_provider(dca, dev);
354	if (err)
355		return err;
356
357	raw_spin_lock_irqsave(&dca_lock, flags);
358	domain = dca_get_domain(dev);
359	if (!domain) {
360		struct pci_bus *rc;
361
362		if (dca_providers_blocked) {
363			raw_spin_unlock_irqrestore(&dca_lock, flags);
364			dca_sysfs_remove_provider(dca);
365			unregister_dca_providers();
366			return -ENODEV;
367		}
368
369		raw_spin_unlock_irqrestore(&dca_lock, flags);
370		rc = dca_pci_rc_from_dev(dev);
371		newdomain = dca_allocate_domain(rc);
372		if (!newdomain)
373			return -ENODEV;
374		raw_spin_lock_irqsave(&dca_lock, flags);
375		/* Recheck, we might have raced after dropping the lock */
376		domain = dca_get_domain(dev);
377		if (!domain) {
378			domain = newdomain;
379			newdomain = NULL;
380			list_add(&domain->node, &dca_domains);
381		}
 
382	}
383	list_add(&dca->node, &domain->dca_providers);
384	raw_spin_unlock_irqrestore(&dca_lock, flags);
385
386	blocking_notifier_call_chain(&dca_provider_chain,
387				     DCA_PROVIDER_ADD, NULL);
388	kfree(newdomain);
389	return 0;
390}
391EXPORT_SYMBOL_GPL(register_dca_provider);
392
393/**
394 * unregister_dca_provider - remove a dca provider
395 * @dca - struct created by alloc_dca_provider()
396 */
397void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
398{
399	unsigned long flags;
400	struct pci_bus *pci_rc;
401	struct dca_domain *domain;
402
403	blocking_notifier_call_chain(&dca_provider_chain,
404				     DCA_PROVIDER_REMOVE, NULL);
405
406	raw_spin_lock_irqsave(&dca_lock, flags);
407
408	if (list_empty(&dca_domains)) {
409		raw_spin_unlock_irqrestore(&dca_lock, flags);
410		return;
411	}
412
413	list_del(&dca->node);
414
415	pci_rc = dca_pci_rc_from_dev(dev);
416	domain = dca_find_domain(pci_rc);
417	if (list_empty(&domain->dca_providers))
418		dca_free_domain(domain);
419
420	raw_spin_unlock_irqrestore(&dca_lock, flags);
421
422	dca_sysfs_remove_provider(dca);
423}
424EXPORT_SYMBOL_GPL(unregister_dca_provider);
425
426/**
427 * dca_register_notify - register a client's notifier callback
428 */
429void dca_register_notify(struct notifier_block *nb)
430{
431	blocking_notifier_chain_register(&dca_provider_chain, nb);
432}
433EXPORT_SYMBOL_GPL(dca_register_notify);
434
435/**
436 * dca_unregister_notify - remove a client's notifier callback
437 */
438void dca_unregister_notify(struct notifier_block *nb)
439{
440	blocking_notifier_chain_unregister(&dca_provider_chain, nb);
441}
442EXPORT_SYMBOL_GPL(dca_unregister_notify);
443
444static int __init dca_init(void)
445{
446	pr_info("dca service started, version %s\n", DCA_VERSION);
447	return dca_sysfs_init();
448}
449
450static void __exit dca_exit(void)
451{
452	dca_sysfs_exit();
453}
454
455arch_initcall(dca_init);
456module_exit(dca_exit);
457
v3.1
 
  1/*
  2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms of the GNU General Public License as published by the Free
  6 * Software Foundation; either version 2 of the License, or (at your option)
  7 * any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program; if not, write to the Free Software Foundation, Inc., 59
 16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 17 *
 18 * The full GNU General Public License is included in this distribution in the
 19 * file called COPYING.
 20 */
 21
 22/*
 23 * This driver supports an interface for DCA clients and providers to meet.
 24 */
 25
 26#include <linux/kernel.h>
 27#include <linux/notifier.h>
 28#include <linux/device.h>
 29#include <linux/dca.h>
 30#include <linux/slab.h>
 
 31
 32#define DCA_VERSION "1.12.1"
 33
 34MODULE_VERSION(DCA_VERSION);
 35MODULE_LICENSE("GPL");
 36MODULE_AUTHOR("Intel Corporation");
 37
 38static DEFINE_SPINLOCK(dca_lock);
 39
 40static LIST_HEAD(dca_domains);
 41
 42static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
 43
 44static int dca_providers_blocked;
 45
 46static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
 47{
 48	struct pci_dev *pdev = to_pci_dev(dev);
 49	struct pci_bus *bus = pdev->bus;
 50
 51	while (bus->parent)
 52		bus = bus->parent;
 53
 54	return bus;
 55}
 56
 57static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
 58{
 59	struct dca_domain *domain;
 60
 61	domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
 62	if (!domain)
 63		return NULL;
 64
 65	INIT_LIST_HEAD(&domain->dca_providers);
 66	domain->pci_rc = rc;
 67
 68	return domain;
 69}
 70
 71static void dca_free_domain(struct dca_domain *domain)
 72{
 73	list_del(&domain->node);
 74	kfree(domain);
 75}
 76
 77static int dca_provider_ioat_ver_3_0(struct device *dev)
 78{
 79	struct pci_dev *pdev = to_pci_dev(dev);
 80
 81	return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
 82		((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
 83		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
 84		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
 85		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
 86		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
 87		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
 88		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
 89		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
 90}
 91
 92static void unregister_dca_providers(void)
 93{
 94	struct dca_provider *dca, *_dca;
 95	struct list_head unregistered_providers;
 96	struct dca_domain *domain;
 97	unsigned long flags;
 98
 99	blocking_notifier_call_chain(&dca_provider_chain,
100				     DCA_PROVIDER_REMOVE, NULL);
101
102	INIT_LIST_HEAD(&unregistered_providers);
103
104	spin_lock_irqsave(&dca_lock, flags);
105
106	if (list_empty(&dca_domains)) {
107		spin_unlock_irqrestore(&dca_lock, flags);
108		return;
109	}
110
111	/* at this point only one domain in the list is expected */
112	domain = list_first_entry(&dca_domains, struct dca_domain, node);
113
114	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
115		list_move(&dca->node, &unregistered_providers);
116
117	dca_free_domain(domain);
118
119	spin_unlock_irqrestore(&dca_lock, flags);
120
121	list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
122		dca_sysfs_remove_provider(dca);
123		list_del(&dca->node);
124	}
125}
126
127static struct dca_domain *dca_find_domain(struct pci_bus *rc)
128{
129	struct dca_domain *domain;
130
131	list_for_each_entry(domain, &dca_domains, node)
132		if (domain->pci_rc == rc)
133			return domain;
134
135	return NULL;
136}
137
138static struct dca_domain *dca_get_domain(struct device *dev)
139{
140	struct pci_bus *rc;
141	struct dca_domain *domain;
142
143	rc = dca_pci_rc_from_dev(dev);
144	domain = dca_find_domain(rc);
145
146	if (!domain) {
147		if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
148			dca_providers_blocked = 1;
149		} else {
150			domain = dca_allocate_domain(rc);
151			if (domain)
152				list_add(&domain->node, &dca_domains);
153		}
154	}
155
156	return domain;
157}
158
159static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
160{
161	struct dca_provider *dca;
162	struct pci_bus *rc;
163	struct dca_domain *domain;
164
165	if (dev) {
166		rc = dca_pci_rc_from_dev(dev);
167		domain = dca_find_domain(rc);
168		if (!domain)
169			return NULL;
170	} else {
171		if (!list_empty(&dca_domains))
172			domain = list_first_entry(&dca_domains,
173						  struct dca_domain,
174						  node);
175		else
176			return NULL;
177	}
178
179	list_for_each_entry(dca, &domain->dca_providers, node)
180		if ((!dev) || (dca->ops->dev_managed(dca, dev)))
181			return dca;
182
183	return NULL;
184}
185
186/**
187 * dca_add_requester - add a dca client to the list
188 * @dev - the device that wants dca service
189 */
190int dca_add_requester(struct device *dev)
191{
192	struct dca_provider *dca;
193	int err, slot = -ENODEV;
194	unsigned long flags;
195	struct pci_bus *pci_rc;
196	struct dca_domain *domain;
197
198	if (!dev)
199		return -EFAULT;
200
201	spin_lock_irqsave(&dca_lock, flags);
202
203	/* check if the requester has not been added already */
204	dca = dca_find_provider_by_dev(dev);
205	if (dca) {
206		spin_unlock_irqrestore(&dca_lock, flags);
207		return -EEXIST;
208	}
209
210	pci_rc = dca_pci_rc_from_dev(dev);
211	domain = dca_find_domain(pci_rc);
212	if (!domain) {
213		spin_unlock_irqrestore(&dca_lock, flags);
214		return -ENODEV;
215	}
216
217	list_for_each_entry(dca, &domain->dca_providers, node) {
218		slot = dca->ops->add_requester(dca, dev);
219		if (slot >= 0)
220			break;
221	}
222
223	spin_unlock_irqrestore(&dca_lock, flags);
224
225	if (slot < 0)
226		return slot;
227
228	err = dca_sysfs_add_req(dca, dev, slot);
229	if (err) {
230		spin_lock_irqsave(&dca_lock, flags);
231		if (dca == dca_find_provider_by_dev(dev))
232			dca->ops->remove_requester(dca, dev);
233		spin_unlock_irqrestore(&dca_lock, flags);
234		return err;
235	}
236
237	return 0;
238}
239EXPORT_SYMBOL_GPL(dca_add_requester);
240
241/**
242 * dca_remove_requester - remove a dca client from the list
243 * @dev - the device that wants dca service
244 */
245int dca_remove_requester(struct device *dev)
246{
247	struct dca_provider *dca;
248	int slot;
249	unsigned long flags;
250
251	if (!dev)
252		return -EFAULT;
253
254	spin_lock_irqsave(&dca_lock, flags);
255	dca = dca_find_provider_by_dev(dev);
256	if (!dca) {
257		spin_unlock_irqrestore(&dca_lock, flags);
258		return -ENODEV;
259	}
260	slot = dca->ops->remove_requester(dca, dev);
261	spin_unlock_irqrestore(&dca_lock, flags);
262
263	if (slot < 0)
264		return slot;
265
266	dca_sysfs_remove_req(dca, slot);
267
268	return 0;
269}
270EXPORT_SYMBOL_GPL(dca_remove_requester);
271
272/**
273 * dca_common_get_tag - return the dca tag (serves both new and old api)
274 * @dev - the device that wants dca service
275 * @cpu - the cpuid as returned by get_cpu()
276 */
277u8 dca_common_get_tag(struct device *dev, int cpu)
278{
279	struct dca_provider *dca;
280	u8 tag;
281	unsigned long flags;
282
283	spin_lock_irqsave(&dca_lock, flags);
284
285	dca = dca_find_provider_by_dev(dev);
286	if (!dca) {
287		spin_unlock_irqrestore(&dca_lock, flags);
288		return -ENODEV;
289	}
290	tag = dca->ops->get_tag(dca, dev, cpu);
291
292	spin_unlock_irqrestore(&dca_lock, flags);
293	return tag;
294}
295
296/**
297 * dca3_get_tag - return the dca tag to the requester device
298 *                for the given cpu (new api)
299 * @dev - the device that wants dca service
300 * @cpu - the cpuid as returned by get_cpu()
301 */
302u8 dca3_get_tag(struct device *dev, int cpu)
303{
304	if (!dev)
305		return -EFAULT;
306
307	return dca_common_get_tag(dev, cpu);
308}
309EXPORT_SYMBOL_GPL(dca3_get_tag);
310
311/**
312 * dca_get_tag - return the dca tag for the given cpu (old api)
313 * @cpu - the cpuid as returned by get_cpu()
314 */
315u8 dca_get_tag(int cpu)
316{
317	struct device *dev = NULL;
318
319	return dca_common_get_tag(dev, cpu);
320}
321EXPORT_SYMBOL_GPL(dca_get_tag);
322
323/**
324 * alloc_dca_provider - get data struct for describing a dca provider
325 * @ops - pointer to struct of dca operation function pointers
326 * @priv_size - size of extra mem to be added for provider's needs
327 */
328struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
 
329{
330	struct dca_provider *dca;
331	int alloc_size;
332
333	alloc_size = (sizeof(*dca) + priv_size);
334	dca = kzalloc(alloc_size, GFP_KERNEL);
335	if (!dca)
336		return NULL;
337	dca->ops = ops;
338
339	return dca;
340}
341EXPORT_SYMBOL_GPL(alloc_dca_provider);
342
343/**
344 * free_dca_provider - release the dca provider data struct
345 * @ops - pointer to struct of dca operation function pointers
346 * @priv_size - size of extra mem to be added for provider's needs
347 */
348void free_dca_provider(struct dca_provider *dca)
349{
350	kfree(dca);
351}
352EXPORT_SYMBOL_GPL(free_dca_provider);
353
354/**
355 * register_dca_provider - register a dca provider
356 * @dca - struct created by alloc_dca_provider()
357 * @dev - device providing dca services
358 */
359int register_dca_provider(struct dca_provider *dca, struct device *dev)
360{
361	int err;
362	unsigned long flags;
363	struct dca_domain *domain;
364
365	spin_lock_irqsave(&dca_lock, flags);
366	if (dca_providers_blocked) {
367		spin_unlock_irqrestore(&dca_lock, flags);
368		return -ENODEV;
369	}
370	spin_unlock_irqrestore(&dca_lock, flags);
371
372	err = dca_sysfs_add_provider(dca, dev);
373	if (err)
374		return err;
375
376	spin_lock_irqsave(&dca_lock, flags);
377	domain = dca_get_domain(dev);
378	if (!domain) {
 
 
379		if (dca_providers_blocked) {
380			spin_unlock_irqrestore(&dca_lock, flags);
381			dca_sysfs_remove_provider(dca);
382			unregister_dca_providers();
383		} else {
384			spin_unlock_irqrestore(&dca_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
385		}
386		return -ENODEV;
387	}
388	list_add(&dca->node, &domain->dca_providers);
389	spin_unlock_irqrestore(&dca_lock, flags);
390
391	blocking_notifier_call_chain(&dca_provider_chain,
392				     DCA_PROVIDER_ADD, NULL);
 
393	return 0;
394}
395EXPORT_SYMBOL_GPL(register_dca_provider);
396
397/**
398 * unregister_dca_provider - remove a dca provider
399 * @dca - struct created by alloc_dca_provider()
400 */
401void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
402{
403	unsigned long flags;
404	struct pci_bus *pci_rc;
405	struct dca_domain *domain;
406
407	blocking_notifier_call_chain(&dca_provider_chain,
408				     DCA_PROVIDER_REMOVE, NULL);
409
410	spin_lock_irqsave(&dca_lock, flags);
 
 
 
 
 
411
412	list_del(&dca->node);
413
414	pci_rc = dca_pci_rc_from_dev(dev);
415	domain = dca_find_domain(pci_rc);
416	if (list_empty(&domain->dca_providers))
417		dca_free_domain(domain);
418
419	spin_unlock_irqrestore(&dca_lock, flags);
420
421	dca_sysfs_remove_provider(dca);
422}
423EXPORT_SYMBOL_GPL(unregister_dca_provider);
424
425/**
426 * dca_register_notify - register a client's notifier callback
427 */
428void dca_register_notify(struct notifier_block *nb)
429{
430	blocking_notifier_chain_register(&dca_provider_chain, nb);
431}
432EXPORT_SYMBOL_GPL(dca_register_notify);
433
434/**
435 * dca_unregister_notify - remove a client's notifier callback
436 */
437void dca_unregister_notify(struct notifier_block *nb)
438{
439	blocking_notifier_chain_unregister(&dca_provider_chain, nb);
440}
441EXPORT_SYMBOL_GPL(dca_unregister_notify);
442
443static int __init dca_init(void)
444{
445	pr_info("dca service started, version %s\n", DCA_VERSION);
446	return dca_sysfs_init();
447}
448
449static void __exit dca_exit(void)
450{
451	dca_sysfs_exit();
452}
453
454arch_initcall(dca_init);
455module_exit(dca_exit);
456