Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6/*
  7 * This driver supports an interface for DCA clients and providers to meet.
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/notifier.h>
 12#include <linux/device.h>
 13#include <linux/dca.h>
 14#include <linux/slab.h>
 15#include <linux/module.h>
 16
 17#define DCA_VERSION "1.12.1"
 18
 19MODULE_VERSION(DCA_VERSION);
 20MODULE_DESCRIPTION("Intel Direct Cache Access (DCA) service module");
 21MODULE_LICENSE("GPL");
 22MODULE_AUTHOR("Intel Corporation");
 23
 24static DEFINE_RAW_SPINLOCK(dca_lock);
 25
 26static LIST_HEAD(dca_domains);
 27
 28static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
 29
 30static int dca_providers_blocked;
 31
 32static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
 33{
 34	struct pci_dev *pdev = to_pci_dev(dev);
 35	struct pci_bus *bus = pdev->bus;
 36
 37	while (bus->parent)
 38		bus = bus->parent;
 39
 40	return bus;
 41}
 42
 43static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
 44{
 45	struct dca_domain *domain;
 46
 47	domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
 48	if (!domain)
 49		return NULL;
 50
 51	INIT_LIST_HEAD(&domain->dca_providers);
 52	domain->pci_rc = rc;
 53
 54	return domain;
 55}
 56
 57static void dca_free_domain(struct dca_domain *domain)
 58{
 59	list_del(&domain->node);
 60	kfree(domain);
 61}
 62
 63static int dca_provider_ioat_ver_3_0(struct device *dev)
 64{
 65	struct pci_dev *pdev = to_pci_dev(dev);
 66
 67	return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
 68		((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
 69		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
 70		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
 71		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
 72		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
 73		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
 74		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
 75		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
 76}
 77
 78static void unregister_dca_providers(void)
 79{
 80	struct dca_provider *dca, *_dca;
 81	struct list_head unregistered_providers;
 82	struct dca_domain *domain;
 83	unsigned long flags;
 84
 85	blocking_notifier_call_chain(&dca_provider_chain,
 86				     DCA_PROVIDER_REMOVE, NULL);
 87
 88	INIT_LIST_HEAD(&unregistered_providers);
 89
 90	raw_spin_lock_irqsave(&dca_lock, flags);
 91
 92	if (list_empty(&dca_domains)) {
 93		raw_spin_unlock_irqrestore(&dca_lock, flags);
 94		return;
 95	}
 96
 97	/* at this point only one domain in the list is expected */
 98	domain = list_first_entry(&dca_domains, struct dca_domain, node);
 99
100	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
101		list_move(&dca->node, &unregistered_providers);
102
103	dca_free_domain(domain);
104
105	raw_spin_unlock_irqrestore(&dca_lock, flags);
106
107	list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
108		dca_sysfs_remove_provider(dca);
109		list_del(&dca->node);
110	}
111}
112
113static struct dca_domain *dca_find_domain(struct pci_bus *rc)
114{
115	struct dca_domain *domain;
116
117	list_for_each_entry(domain, &dca_domains, node)
118		if (domain->pci_rc == rc)
119			return domain;
120
121	return NULL;
122}
123
124static struct dca_domain *dca_get_domain(struct device *dev)
125{
126	struct pci_bus *rc;
127	struct dca_domain *domain;
128
129	rc = dca_pci_rc_from_dev(dev);
130	domain = dca_find_domain(rc);
131
132	if (!domain) {
133		if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
134			dca_providers_blocked = 1;
135	}
136
137	return domain;
138}
139
140static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
141{
142	struct dca_provider *dca;
143	struct pci_bus *rc;
144	struct dca_domain *domain;
145
146	if (dev) {
147		rc = dca_pci_rc_from_dev(dev);
148		domain = dca_find_domain(rc);
149		if (!domain)
150			return NULL;
151	} else {
152		if (!list_empty(&dca_domains))
153			domain = list_first_entry(&dca_domains,
154						  struct dca_domain,
155						  node);
156		else
157			return NULL;
158	}
159
160	list_for_each_entry(dca, &domain->dca_providers, node)
161		if ((!dev) || (dca->ops->dev_managed(dca, dev)))
162			return dca;
163
164	return NULL;
165}
166
167/**
168 * dca_add_requester - add a dca client to the list
169 * @dev - the device that wants dca service
170 */
171int dca_add_requester(struct device *dev)
172{
173	struct dca_provider *dca;
174	int err, slot = -ENODEV;
175	unsigned long flags;
176	struct pci_bus *pci_rc;
177	struct dca_domain *domain;
178
179	if (!dev)
180		return -EFAULT;
181
182	raw_spin_lock_irqsave(&dca_lock, flags);
183
184	/* check if the requester has not been added already */
185	dca = dca_find_provider_by_dev(dev);
186	if (dca) {
187		raw_spin_unlock_irqrestore(&dca_lock, flags);
188		return -EEXIST;
189	}
190
191	pci_rc = dca_pci_rc_from_dev(dev);
192	domain = dca_find_domain(pci_rc);
193	if (!domain) {
194		raw_spin_unlock_irqrestore(&dca_lock, flags);
195		return -ENODEV;
196	}
197
198	list_for_each_entry(dca, &domain->dca_providers, node) {
199		slot = dca->ops->add_requester(dca, dev);
200		if (slot >= 0)
201			break;
202	}
203
204	raw_spin_unlock_irqrestore(&dca_lock, flags);
205
206	if (slot < 0)
207		return slot;
208
209	err = dca_sysfs_add_req(dca, dev, slot);
210	if (err) {
211		raw_spin_lock_irqsave(&dca_lock, flags);
212		if (dca == dca_find_provider_by_dev(dev))
213			dca->ops->remove_requester(dca, dev);
214		raw_spin_unlock_irqrestore(&dca_lock, flags);
215		return err;
216	}
217
218	return 0;
219}
220EXPORT_SYMBOL_GPL(dca_add_requester);
221
222/**
223 * dca_remove_requester - remove a dca client from the list
224 * @dev - the device that wants dca service
225 */
226int dca_remove_requester(struct device *dev)
227{
228	struct dca_provider *dca;
229	int slot;
230	unsigned long flags;
231
232	if (!dev)
233		return -EFAULT;
234
235	raw_spin_lock_irqsave(&dca_lock, flags);
236	dca = dca_find_provider_by_dev(dev);
237	if (!dca) {
238		raw_spin_unlock_irqrestore(&dca_lock, flags);
239		return -ENODEV;
240	}
241	slot = dca->ops->remove_requester(dca, dev);
242	raw_spin_unlock_irqrestore(&dca_lock, flags);
243
244	if (slot < 0)
245		return slot;
246
247	dca_sysfs_remove_req(dca, slot);
248
249	return 0;
250}
251EXPORT_SYMBOL_GPL(dca_remove_requester);
252
253/**
254 * dca_common_get_tag - return the dca tag (serves both new and old api)
255 * @dev - the device that wants dca service
256 * @cpu - the cpuid as returned by get_cpu()
257 */
258static u8 dca_common_get_tag(struct device *dev, int cpu)
259{
260	struct dca_provider *dca;
261	u8 tag;
262	unsigned long flags;
263
264	raw_spin_lock_irqsave(&dca_lock, flags);
265
266	dca = dca_find_provider_by_dev(dev);
267	if (!dca) {
268		raw_spin_unlock_irqrestore(&dca_lock, flags);
269		return -ENODEV;
270	}
271	tag = dca->ops->get_tag(dca, dev, cpu);
272
273	raw_spin_unlock_irqrestore(&dca_lock, flags);
274	return tag;
275}
276
277/**
278 * dca3_get_tag - return the dca tag to the requester device
279 *                for the given cpu (new api)
280 * @dev - the device that wants dca service
281 * @cpu - the cpuid as returned by get_cpu()
282 */
283u8 dca3_get_tag(struct device *dev, int cpu)
284{
285	if (!dev)
286		return -EFAULT;
287
288	return dca_common_get_tag(dev, cpu);
289}
290EXPORT_SYMBOL_GPL(dca3_get_tag);
291
292/**
293 * dca_get_tag - return the dca tag for the given cpu (old api)
294 * @cpu - the cpuid as returned by get_cpu()
295 */
296u8 dca_get_tag(int cpu)
297{
298	return dca_common_get_tag(NULL, cpu);
 
 
299}
300EXPORT_SYMBOL_GPL(dca_get_tag);
301
302/**
303 * alloc_dca_provider - get data struct for describing a dca provider
304 * @ops - pointer to struct of dca operation function pointers
305 * @priv_size - size of extra mem to be added for provider's needs
306 */
307struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
308					int priv_size)
309{
310	struct dca_provider *dca;
311	int alloc_size;
312
313	alloc_size = (sizeof(*dca) + priv_size);
314	dca = kzalloc(alloc_size, GFP_KERNEL);
315	if (!dca)
316		return NULL;
317	dca->ops = ops;
318
319	return dca;
320}
321EXPORT_SYMBOL_GPL(alloc_dca_provider);
322
323/**
324 * free_dca_provider - release the dca provider data struct
325 * @ops - pointer to struct of dca operation function pointers
326 * @priv_size - size of extra mem to be added for provider's needs
327 */
328void free_dca_provider(struct dca_provider *dca)
329{
330	kfree(dca);
331}
332EXPORT_SYMBOL_GPL(free_dca_provider);
333
334/**
335 * register_dca_provider - register a dca provider
336 * @dca - struct created by alloc_dca_provider()
337 * @dev - device providing dca services
338 */
339int register_dca_provider(struct dca_provider *dca, struct device *dev)
340{
341	int err;
342	unsigned long flags;
343	struct dca_domain *domain, *newdomain = NULL;
344
345	raw_spin_lock_irqsave(&dca_lock, flags);
346	if (dca_providers_blocked) {
347		raw_spin_unlock_irqrestore(&dca_lock, flags);
348		return -ENODEV;
349	}
350	raw_spin_unlock_irqrestore(&dca_lock, flags);
351
352	err = dca_sysfs_add_provider(dca, dev);
353	if (err)
354		return err;
355
356	raw_spin_lock_irqsave(&dca_lock, flags);
357	domain = dca_get_domain(dev);
358	if (!domain) {
359		struct pci_bus *rc;
360
361		if (dca_providers_blocked) {
362			raw_spin_unlock_irqrestore(&dca_lock, flags);
363			dca_sysfs_remove_provider(dca);
364			unregister_dca_providers();
365			return -ENODEV;
366		}
367
368		raw_spin_unlock_irqrestore(&dca_lock, flags);
369		rc = dca_pci_rc_from_dev(dev);
370		newdomain = dca_allocate_domain(rc);
371		if (!newdomain)
372			return -ENODEV;
373		raw_spin_lock_irqsave(&dca_lock, flags);
374		/* Recheck, we might have raced after dropping the lock */
375		domain = dca_get_domain(dev);
376		if (!domain) {
377			domain = newdomain;
378			newdomain = NULL;
379			list_add(&domain->node, &dca_domains);
380		}
381	}
382	list_add(&dca->node, &domain->dca_providers);
383	raw_spin_unlock_irqrestore(&dca_lock, flags);
384
385	blocking_notifier_call_chain(&dca_provider_chain,
386				     DCA_PROVIDER_ADD, NULL);
387	kfree(newdomain);
388	return 0;
389}
390EXPORT_SYMBOL_GPL(register_dca_provider);
391
392/**
393 * unregister_dca_provider - remove a dca provider
394 * @dca - struct created by alloc_dca_provider()
395 */
396void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
397{
398	unsigned long flags;
399	struct pci_bus *pci_rc;
400	struct dca_domain *domain;
401
402	blocking_notifier_call_chain(&dca_provider_chain,
403				     DCA_PROVIDER_REMOVE, NULL);
404
405	raw_spin_lock_irqsave(&dca_lock, flags);
406
407	if (list_empty(&dca_domains)) {
408		raw_spin_unlock_irqrestore(&dca_lock, flags);
409		return;
410	}
411
412	list_del(&dca->node);
413
414	pci_rc = dca_pci_rc_from_dev(dev);
415	domain = dca_find_domain(pci_rc);
416	if (list_empty(&domain->dca_providers))
417		dca_free_domain(domain);
418
419	raw_spin_unlock_irqrestore(&dca_lock, flags);
420
421	dca_sysfs_remove_provider(dca);
422}
423EXPORT_SYMBOL_GPL(unregister_dca_provider);
424
425/**
426 * dca_register_notify - register a client's notifier callback
427 */
428void dca_register_notify(struct notifier_block *nb)
429{
430	blocking_notifier_chain_register(&dca_provider_chain, nb);
431}
432EXPORT_SYMBOL_GPL(dca_register_notify);
433
434/**
435 * dca_unregister_notify - remove a client's notifier callback
436 */
437void dca_unregister_notify(struct notifier_block *nb)
438{
439	blocking_notifier_chain_unregister(&dca_provider_chain, nb);
440}
441EXPORT_SYMBOL_GPL(dca_unregister_notify);
442
443static int __init dca_init(void)
444{
445	pr_info("dca service started, version %s\n", DCA_VERSION);
446	return dca_sysfs_init();
447}
448
449static void __exit dca_exit(void)
450{
451	dca_sysfs_exit();
452}
453
454arch_initcall(dca_init);
455module_exit(dca_exit);
456
v3.15
 
  1/*
  2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms of the GNU General Public License as published by the Free
  6 * Software Foundation; either version 2 of the License, or (at your option)
  7 * any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program; if not, write to the Free Software Foundation, Inc., 59
 16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 17 *
 18 * The full GNU General Public License is included in this distribution in the
 19 * file called COPYING.
 20 */
 21
 22/*
 23 * This driver supports an interface for DCA clients and providers to meet.
 24 */
 25
 26#include <linux/kernel.h>
 27#include <linux/notifier.h>
 28#include <linux/device.h>
 29#include <linux/dca.h>
 30#include <linux/slab.h>
 31#include <linux/module.h>
 32
 33#define DCA_VERSION "1.12.1"
 34
 35MODULE_VERSION(DCA_VERSION);
 
 36MODULE_LICENSE("GPL");
 37MODULE_AUTHOR("Intel Corporation");
 38
 39static DEFINE_RAW_SPINLOCK(dca_lock);
 40
 41static LIST_HEAD(dca_domains);
 42
 43static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
 44
 45static int dca_providers_blocked;
 46
 47static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
 48{
 49	struct pci_dev *pdev = to_pci_dev(dev);
 50	struct pci_bus *bus = pdev->bus;
 51
 52	while (bus->parent)
 53		bus = bus->parent;
 54
 55	return bus;
 56}
 57
 58static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
 59{
 60	struct dca_domain *domain;
 61
 62	domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
 63	if (!domain)
 64		return NULL;
 65
 66	INIT_LIST_HEAD(&domain->dca_providers);
 67	domain->pci_rc = rc;
 68
 69	return domain;
 70}
 71
 72static void dca_free_domain(struct dca_domain *domain)
 73{
 74	list_del(&domain->node);
 75	kfree(domain);
 76}
 77
 78static int dca_provider_ioat_ver_3_0(struct device *dev)
 79{
 80	struct pci_dev *pdev = to_pci_dev(dev);
 81
 82	return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
 83		((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
 84		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
 85		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
 86		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
 87		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
 88		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
 89		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
 90		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
 91}
 92
 93static void unregister_dca_providers(void)
 94{
 95	struct dca_provider *dca, *_dca;
 96	struct list_head unregistered_providers;
 97	struct dca_domain *domain;
 98	unsigned long flags;
 99
100	blocking_notifier_call_chain(&dca_provider_chain,
101				     DCA_PROVIDER_REMOVE, NULL);
102
103	INIT_LIST_HEAD(&unregistered_providers);
104
105	raw_spin_lock_irqsave(&dca_lock, flags);
106
107	if (list_empty(&dca_domains)) {
108		raw_spin_unlock_irqrestore(&dca_lock, flags);
109		return;
110	}
111
112	/* at this point only one domain in the list is expected */
113	domain = list_first_entry(&dca_domains, struct dca_domain, node);
114
115	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
116		list_move(&dca->node, &unregistered_providers);
117
118	dca_free_domain(domain);
119
120	raw_spin_unlock_irqrestore(&dca_lock, flags);
121
122	list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
123		dca_sysfs_remove_provider(dca);
124		list_del(&dca->node);
125	}
126}
127
128static struct dca_domain *dca_find_domain(struct pci_bus *rc)
129{
130	struct dca_domain *domain;
131
132	list_for_each_entry(domain, &dca_domains, node)
133		if (domain->pci_rc == rc)
134			return domain;
135
136	return NULL;
137}
138
139static struct dca_domain *dca_get_domain(struct device *dev)
140{
141	struct pci_bus *rc;
142	struct dca_domain *domain;
143
144	rc = dca_pci_rc_from_dev(dev);
145	domain = dca_find_domain(rc);
146
147	if (!domain) {
148		if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
149			dca_providers_blocked = 1;
150	}
151
152	return domain;
153}
154
155static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
156{
157	struct dca_provider *dca;
158	struct pci_bus *rc;
159	struct dca_domain *domain;
160
161	if (dev) {
162		rc = dca_pci_rc_from_dev(dev);
163		domain = dca_find_domain(rc);
164		if (!domain)
165			return NULL;
166	} else {
167		if (!list_empty(&dca_domains))
168			domain = list_first_entry(&dca_domains,
169						  struct dca_domain,
170						  node);
171		else
172			return NULL;
173	}
174
175	list_for_each_entry(dca, &domain->dca_providers, node)
176		if ((!dev) || (dca->ops->dev_managed(dca, dev)))
177			return dca;
178
179	return NULL;
180}
181
182/**
183 * dca_add_requester - add a dca client to the list
184 * @dev - the device that wants dca service
185 */
186int dca_add_requester(struct device *dev)
187{
188	struct dca_provider *dca;
189	int err, slot = -ENODEV;
190	unsigned long flags;
191	struct pci_bus *pci_rc;
192	struct dca_domain *domain;
193
194	if (!dev)
195		return -EFAULT;
196
197	raw_spin_lock_irqsave(&dca_lock, flags);
198
199	/* check if the requester has not been added already */
200	dca = dca_find_provider_by_dev(dev);
201	if (dca) {
202		raw_spin_unlock_irqrestore(&dca_lock, flags);
203		return -EEXIST;
204	}
205
206	pci_rc = dca_pci_rc_from_dev(dev);
207	domain = dca_find_domain(pci_rc);
208	if (!domain) {
209		raw_spin_unlock_irqrestore(&dca_lock, flags);
210		return -ENODEV;
211	}
212
213	list_for_each_entry(dca, &domain->dca_providers, node) {
214		slot = dca->ops->add_requester(dca, dev);
215		if (slot >= 0)
216			break;
217	}
218
219	raw_spin_unlock_irqrestore(&dca_lock, flags);
220
221	if (slot < 0)
222		return slot;
223
224	err = dca_sysfs_add_req(dca, dev, slot);
225	if (err) {
226		raw_spin_lock_irqsave(&dca_lock, flags);
227		if (dca == dca_find_provider_by_dev(dev))
228			dca->ops->remove_requester(dca, dev);
229		raw_spin_unlock_irqrestore(&dca_lock, flags);
230		return err;
231	}
232
233	return 0;
234}
235EXPORT_SYMBOL_GPL(dca_add_requester);
236
237/**
238 * dca_remove_requester - remove a dca client from the list
239 * @dev - the device that wants dca service
240 */
241int dca_remove_requester(struct device *dev)
242{
243	struct dca_provider *dca;
244	int slot;
245	unsigned long flags;
246
247	if (!dev)
248		return -EFAULT;
249
250	raw_spin_lock_irqsave(&dca_lock, flags);
251	dca = dca_find_provider_by_dev(dev);
252	if (!dca) {
253		raw_spin_unlock_irqrestore(&dca_lock, flags);
254		return -ENODEV;
255	}
256	slot = dca->ops->remove_requester(dca, dev);
257	raw_spin_unlock_irqrestore(&dca_lock, flags);
258
259	if (slot < 0)
260		return slot;
261
262	dca_sysfs_remove_req(dca, slot);
263
264	return 0;
265}
266EXPORT_SYMBOL_GPL(dca_remove_requester);
267
268/**
269 * dca_common_get_tag - return the dca tag (serves both new and old api)
270 * @dev - the device that wants dca service
271 * @cpu - the cpuid as returned by get_cpu()
272 */
273u8 dca_common_get_tag(struct device *dev, int cpu)
274{
275	struct dca_provider *dca;
276	u8 tag;
277	unsigned long flags;
278
279	raw_spin_lock_irqsave(&dca_lock, flags);
280
281	dca = dca_find_provider_by_dev(dev);
282	if (!dca) {
283		raw_spin_unlock_irqrestore(&dca_lock, flags);
284		return -ENODEV;
285	}
286	tag = dca->ops->get_tag(dca, dev, cpu);
287
288	raw_spin_unlock_irqrestore(&dca_lock, flags);
289	return tag;
290}
291
292/**
293 * dca3_get_tag - return the dca tag to the requester device
294 *                for the given cpu (new api)
295 * @dev - the device that wants dca service
296 * @cpu - the cpuid as returned by get_cpu()
297 */
298u8 dca3_get_tag(struct device *dev, int cpu)
299{
300	if (!dev)
301		return -EFAULT;
302
303	return dca_common_get_tag(dev, cpu);
304}
305EXPORT_SYMBOL_GPL(dca3_get_tag);
306
307/**
308 * dca_get_tag - return the dca tag for the given cpu (old api)
309 * @cpu - the cpuid as returned by get_cpu()
310 */
311u8 dca_get_tag(int cpu)
312{
313	struct device *dev = NULL;
314
315	return dca_common_get_tag(dev, cpu);
316}
317EXPORT_SYMBOL_GPL(dca_get_tag);
318
319/**
320 * alloc_dca_provider - get data struct for describing a dca provider
321 * @ops - pointer to struct of dca operation function pointers
322 * @priv_size - size of extra mem to be added for provider's needs
323 */
324struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
 
325{
326	struct dca_provider *dca;
327	int alloc_size;
328
329	alloc_size = (sizeof(*dca) + priv_size);
330	dca = kzalloc(alloc_size, GFP_KERNEL);
331	if (!dca)
332		return NULL;
333	dca->ops = ops;
334
335	return dca;
336}
337EXPORT_SYMBOL_GPL(alloc_dca_provider);
338
339/**
340 * free_dca_provider - release the dca provider data struct
341 * @ops - pointer to struct of dca operation function pointers
342 * @priv_size - size of extra mem to be added for provider's needs
343 */
344void free_dca_provider(struct dca_provider *dca)
345{
346	kfree(dca);
347}
348EXPORT_SYMBOL_GPL(free_dca_provider);
349
350/**
351 * register_dca_provider - register a dca provider
352 * @dca - struct created by alloc_dca_provider()
353 * @dev - device providing dca services
354 */
355int register_dca_provider(struct dca_provider *dca, struct device *dev)
356{
357	int err;
358	unsigned long flags;
359	struct dca_domain *domain, *newdomain = NULL;
360
361	raw_spin_lock_irqsave(&dca_lock, flags);
362	if (dca_providers_blocked) {
363		raw_spin_unlock_irqrestore(&dca_lock, flags);
364		return -ENODEV;
365	}
366	raw_spin_unlock_irqrestore(&dca_lock, flags);
367
368	err = dca_sysfs_add_provider(dca, dev);
369	if (err)
370		return err;
371
372	raw_spin_lock_irqsave(&dca_lock, flags);
373	domain = dca_get_domain(dev);
374	if (!domain) {
375		struct pci_bus *rc;
376
377		if (dca_providers_blocked) {
378			raw_spin_unlock_irqrestore(&dca_lock, flags);
379			dca_sysfs_remove_provider(dca);
380			unregister_dca_providers();
381			return -ENODEV;
382		}
383
384		raw_spin_unlock_irqrestore(&dca_lock, flags);
385		rc = dca_pci_rc_from_dev(dev);
386		newdomain = dca_allocate_domain(rc);
387		if (!newdomain)
388			return -ENODEV;
389		raw_spin_lock_irqsave(&dca_lock, flags);
390		/* Recheck, we might have raced after dropping the lock */
391		domain = dca_get_domain(dev);
392		if (!domain) {
393			domain = newdomain;
394			newdomain = NULL;
395			list_add(&domain->node, &dca_domains);
396		}
397	}
398	list_add(&dca->node, &domain->dca_providers);
399	raw_spin_unlock_irqrestore(&dca_lock, flags);
400
401	blocking_notifier_call_chain(&dca_provider_chain,
402				     DCA_PROVIDER_ADD, NULL);
403	kfree(newdomain);
404	return 0;
405}
406EXPORT_SYMBOL_GPL(register_dca_provider);
407
408/**
409 * unregister_dca_provider - remove a dca provider
410 * @dca - struct created by alloc_dca_provider()
411 */
412void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
413{
414	unsigned long flags;
415	struct pci_bus *pci_rc;
416	struct dca_domain *domain;
417
418	blocking_notifier_call_chain(&dca_provider_chain,
419				     DCA_PROVIDER_REMOVE, NULL);
420
421	raw_spin_lock_irqsave(&dca_lock, flags);
422
423	if (list_empty(&dca_domains)) {
424		raw_spin_unlock_irqrestore(&dca_lock, flags);
425		return;
426	}
427
428	list_del(&dca->node);
429
430	pci_rc = dca_pci_rc_from_dev(dev);
431	domain = dca_find_domain(pci_rc);
432	if (list_empty(&domain->dca_providers))
433		dca_free_domain(domain);
434
435	raw_spin_unlock_irqrestore(&dca_lock, flags);
436
437	dca_sysfs_remove_provider(dca);
438}
439EXPORT_SYMBOL_GPL(unregister_dca_provider);
440
441/**
442 * dca_register_notify - register a client's notifier callback
443 */
444void dca_register_notify(struct notifier_block *nb)
445{
446	blocking_notifier_chain_register(&dca_provider_chain, nb);
447}
448EXPORT_SYMBOL_GPL(dca_register_notify);
449
450/**
451 * dca_unregister_notify - remove a client's notifier callback
452 */
453void dca_unregister_notify(struct notifier_block *nb)
454{
455	blocking_notifier_chain_unregister(&dca_provider_chain, nb);
456}
457EXPORT_SYMBOL_GPL(dca_unregister_notify);
458
459static int __init dca_init(void)
460{
461	pr_info("dca service started, version %s\n", DCA_VERSION);
462	return dca_sysfs_init();
463}
464
465static void __exit dca_exit(void)
466{
467	dca_sysfs_exit();
468}
469
470arch_initcall(dca_init);
471module_exit(dca_exit);
472