Loading...
1/*
2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This driver supports an interface for DCA clients and providers to meet.
24 */
25
26#include <linux/kernel.h>
27#include <linux/notifier.h>
28#include <linux/device.h>
29#include <linux/dca.h>
30#include <linux/slab.h>
31
32#define DCA_VERSION "1.12.1"
33
34MODULE_VERSION(DCA_VERSION);
35MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Intel Corporation");
37
38static DEFINE_SPINLOCK(dca_lock);
39
40static LIST_HEAD(dca_domains);
41
42static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
43
44static int dca_providers_blocked;
45
46static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
47{
48 struct pci_dev *pdev = to_pci_dev(dev);
49 struct pci_bus *bus = pdev->bus;
50
51 while (bus->parent)
52 bus = bus->parent;
53
54 return bus;
55}
56
57static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
58{
59 struct dca_domain *domain;
60
61 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
62 if (!domain)
63 return NULL;
64
65 INIT_LIST_HEAD(&domain->dca_providers);
66 domain->pci_rc = rc;
67
68 return domain;
69}
70
71static void dca_free_domain(struct dca_domain *domain)
72{
73 list_del(&domain->node);
74 kfree(domain);
75}
76
77static int dca_provider_ioat_ver_3_0(struct device *dev)
78{
79 struct pci_dev *pdev = to_pci_dev(dev);
80
81 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
82 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
83 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
84 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
85 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
86 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
87 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
88 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
89 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
90}
91
92static void unregister_dca_providers(void)
93{
94 struct dca_provider *dca, *_dca;
95 struct list_head unregistered_providers;
96 struct dca_domain *domain;
97 unsigned long flags;
98
99 blocking_notifier_call_chain(&dca_provider_chain,
100 DCA_PROVIDER_REMOVE, NULL);
101
102 INIT_LIST_HEAD(&unregistered_providers);
103
104 spin_lock_irqsave(&dca_lock, flags);
105
106 if (list_empty(&dca_domains)) {
107 spin_unlock_irqrestore(&dca_lock, flags);
108 return;
109 }
110
111 /* at this point only one domain in the list is expected */
112 domain = list_first_entry(&dca_domains, struct dca_domain, node);
113
114 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
115 list_move(&dca->node, &unregistered_providers);
116
117 dca_free_domain(domain);
118
119 spin_unlock_irqrestore(&dca_lock, flags);
120
121 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
122 dca_sysfs_remove_provider(dca);
123 list_del(&dca->node);
124 }
125}
126
127static struct dca_domain *dca_find_domain(struct pci_bus *rc)
128{
129 struct dca_domain *domain;
130
131 list_for_each_entry(domain, &dca_domains, node)
132 if (domain->pci_rc == rc)
133 return domain;
134
135 return NULL;
136}
137
138static struct dca_domain *dca_get_domain(struct device *dev)
139{
140 struct pci_bus *rc;
141 struct dca_domain *domain;
142
143 rc = dca_pci_rc_from_dev(dev);
144 domain = dca_find_domain(rc);
145
146 if (!domain) {
147 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
148 dca_providers_blocked = 1;
149 } else {
150 domain = dca_allocate_domain(rc);
151 if (domain)
152 list_add(&domain->node, &dca_domains);
153 }
154 }
155
156 return domain;
157}
158
159static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
160{
161 struct dca_provider *dca;
162 struct pci_bus *rc;
163 struct dca_domain *domain;
164
165 if (dev) {
166 rc = dca_pci_rc_from_dev(dev);
167 domain = dca_find_domain(rc);
168 if (!domain)
169 return NULL;
170 } else {
171 if (!list_empty(&dca_domains))
172 domain = list_first_entry(&dca_domains,
173 struct dca_domain,
174 node);
175 else
176 return NULL;
177 }
178
179 list_for_each_entry(dca, &domain->dca_providers, node)
180 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
181 return dca;
182
183 return NULL;
184}
185
186/**
187 * dca_add_requester - add a dca client to the list
188 * @dev - the device that wants dca service
189 */
190int dca_add_requester(struct device *dev)
191{
192 struct dca_provider *dca;
193 int err, slot = -ENODEV;
194 unsigned long flags;
195 struct pci_bus *pci_rc;
196 struct dca_domain *domain;
197
198 if (!dev)
199 return -EFAULT;
200
201 spin_lock_irqsave(&dca_lock, flags);
202
203 /* check if the requester has not been added already */
204 dca = dca_find_provider_by_dev(dev);
205 if (dca) {
206 spin_unlock_irqrestore(&dca_lock, flags);
207 return -EEXIST;
208 }
209
210 pci_rc = dca_pci_rc_from_dev(dev);
211 domain = dca_find_domain(pci_rc);
212 if (!domain) {
213 spin_unlock_irqrestore(&dca_lock, flags);
214 return -ENODEV;
215 }
216
217 list_for_each_entry(dca, &domain->dca_providers, node) {
218 slot = dca->ops->add_requester(dca, dev);
219 if (slot >= 0)
220 break;
221 }
222
223 spin_unlock_irqrestore(&dca_lock, flags);
224
225 if (slot < 0)
226 return slot;
227
228 err = dca_sysfs_add_req(dca, dev, slot);
229 if (err) {
230 spin_lock_irqsave(&dca_lock, flags);
231 if (dca == dca_find_provider_by_dev(dev))
232 dca->ops->remove_requester(dca, dev);
233 spin_unlock_irqrestore(&dca_lock, flags);
234 return err;
235 }
236
237 return 0;
238}
239EXPORT_SYMBOL_GPL(dca_add_requester);
240
241/**
242 * dca_remove_requester - remove a dca client from the list
243 * @dev - the device that wants dca service
244 */
245int dca_remove_requester(struct device *dev)
246{
247 struct dca_provider *dca;
248 int slot;
249 unsigned long flags;
250
251 if (!dev)
252 return -EFAULT;
253
254 spin_lock_irqsave(&dca_lock, flags);
255 dca = dca_find_provider_by_dev(dev);
256 if (!dca) {
257 spin_unlock_irqrestore(&dca_lock, flags);
258 return -ENODEV;
259 }
260 slot = dca->ops->remove_requester(dca, dev);
261 spin_unlock_irqrestore(&dca_lock, flags);
262
263 if (slot < 0)
264 return slot;
265
266 dca_sysfs_remove_req(dca, slot);
267
268 return 0;
269}
270EXPORT_SYMBOL_GPL(dca_remove_requester);
271
272/**
273 * dca_common_get_tag - return the dca tag (serves both new and old api)
274 * @dev - the device that wants dca service
275 * @cpu - the cpuid as returned by get_cpu()
276 */
277u8 dca_common_get_tag(struct device *dev, int cpu)
278{
279 struct dca_provider *dca;
280 u8 tag;
281 unsigned long flags;
282
283 spin_lock_irqsave(&dca_lock, flags);
284
285 dca = dca_find_provider_by_dev(dev);
286 if (!dca) {
287 spin_unlock_irqrestore(&dca_lock, flags);
288 return -ENODEV;
289 }
290 tag = dca->ops->get_tag(dca, dev, cpu);
291
292 spin_unlock_irqrestore(&dca_lock, flags);
293 return tag;
294}
295
296/**
297 * dca3_get_tag - return the dca tag to the requester device
298 * for the given cpu (new api)
299 * @dev - the device that wants dca service
300 * @cpu - the cpuid as returned by get_cpu()
301 */
302u8 dca3_get_tag(struct device *dev, int cpu)
303{
304 if (!dev)
305 return -EFAULT;
306
307 return dca_common_get_tag(dev, cpu);
308}
309EXPORT_SYMBOL_GPL(dca3_get_tag);
310
311/**
312 * dca_get_tag - return the dca tag for the given cpu (old api)
313 * @cpu - the cpuid as returned by get_cpu()
314 */
315u8 dca_get_tag(int cpu)
316{
317 struct device *dev = NULL;
318
319 return dca_common_get_tag(dev, cpu);
320}
321EXPORT_SYMBOL_GPL(dca_get_tag);
322
323/**
324 * alloc_dca_provider - get data struct for describing a dca provider
325 * @ops - pointer to struct of dca operation function pointers
326 * @priv_size - size of extra mem to be added for provider's needs
327 */
328struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
329{
330 struct dca_provider *dca;
331 int alloc_size;
332
333 alloc_size = (sizeof(*dca) + priv_size);
334 dca = kzalloc(alloc_size, GFP_KERNEL);
335 if (!dca)
336 return NULL;
337 dca->ops = ops;
338
339 return dca;
340}
341EXPORT_SYMBOL_GPL(alloc_dca_provider);
342
343/**
344 * free_dca_provider - release the dca provider data struct
345 * @ops - pointer to struct of dca operation function pointers
346 * @priv_size - size of extra mem to be added for provider's needs
347 */
348void free_dca_provider(struct dca_provider *dca)
349{
350 kfree(dca);
351}
352EXPORT_SYMBOL_GPL(free_dca_provider);
353
354/**
355 * register_dca_provider - register a dca provider
356 * @dca - struct created by alloc_dca_provider()
357 * @dev - device providing dca services
358 */
359int register_dca_provider(struct dca_provider *dca, struct device *dev)
360{
361 int err;
362 unsigned long flags;
363 struct dca_domain *domain;
364
365 spin_lock_irqsave(&dca_lock, flags);
366 if (dca_providers_blocked) {
367 spin_unlock_irqrestore(&dca_lock, flags);
368 return -ENODEV;
369 }
370 spin_unlock_irqrestore(&dca_lock, flags);
371
372 err = dca_sysfs_add_provider(dca, dev);
373 if (err)
374 return err;
375
376 spin_lock_irqsave(&dca_lock, flags);
377 domain = dca_get_domain(dev);
378 if (!domain) {
379 if (dca_providers_blocked) {
380 spin_unlock_irqrestore(&dca_lock, flags);
381 dca_sysfs_remove_provider(dca);
382 unregister_dca_providers();
383 } else {
384 spin_unlock_irqrestore(&dca_lock, flags);
385 }
386 return -ENODEV;
387 }
388 list_add(&dca->node, &domain->dca_providers);
389 spin_unlock_irqrestore(&dca_lock, flags);
390
391 blocking_notifier_call_chain(&dca_provider_chain,
392 DCA_PROVIDER_ADD, NULL);
393 return 0;
394}
395EXPORT_SYMBOL_GPL(register_dca_provider);
396
397/**
398 * unregister_dca_provider - remove a dca provider
399 * @dca - struct created by alloc_dca_provider()
400 */
401void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
402{
403 unsigned long flags;
404 struct pci_bus *pci_rc;
405 struct dca_domain *domain;
406
407 blocking_notifier_call_chain(&dca_provider_chain,
408 DCA_PROVIDER_REMOVE, NULL);
409
410 spin_lock_irqsave(&dca_lock, flags);
411
412 list_del(&dca->node);
413
414 pci_rc = dca_pci_rc_from_dev(dev);
415 domain = dca_find_domain(pci_rc);
416 if (list_empty(&domain->dca_providers))
417 dca_free_domain(domain);
418
419 spin_unlock_irqrestore(&dca_lock, flags);
420
421 dca_sysfs_remove_provider(dca);
422}
423EXPORT_SYMBOL_GPL(unregister_dca_provider);
424
425/**
426 * dca_register_notify - register a client's notifier callback
427 */
428void dca_register_notify(struct notifier_block *nb)
429{
430 blocking_notifier_chain_register(&dca_provider_chain, nb);
431}
432EXPORT_SYMBOL_GPL(dca_register_notify);
433
434/**
435 * dca_unregister_notify - remove a client's notifier callback
436 */
437void dca_unregister_notify(struct notifier_block *nb)
438{
439 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
440}
441EXPORT_SYMBOL_GPL(dca_unregister_notify);
442
443static int __init dca_init(void)
444{
445 pr_info("dca service started, version %s\n", DCA_VERSION);
446 return dca_sysfs_init();
447}
448
449static void __exit dca_exit(void)
450{
451 dca_sysfs_exit();
452}
453
454arch_initcall(dca_init);
455module_exit(dca_exit);
456
1/*
2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This driver supports an interface for DCA clients and providers to meet.
24 */
25
26#include <linux/kernel.h>
27#include <linux/notifier.h>
28#include <linux/device.h>
29#include <linux/dca.h>
30#include <linux/slab.h>
31#include <linux/module.h>
32
33#define DCA_VERSION "1.12.1"
34
35MODULE_VERSION(DCA_VERSION);
36MODULE_LICENSE("GPL");
37MODULE_AUTHOR("Intel Corporation");
38
39static DEFINE_RAW_SPINLOCK(dca_lock);
40
41static LIST_HEAD(dca_domains);
42
43static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
44
45static int dca_providers_blocked;
46
47static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
48{
49 struct pci_dev *pdev = to_pci_dev(dev);
50 struct pci_bus *bus = pdev->bus;
51
52 while (bus->parent)
53 bus = bus->parent;
54
55 return bus;
56}
57
58static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
59{
60 struct dca_domain *domain;
61
62 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
63 if (!domain)
64 return NULL;
65
66 INIT_LIST_HEAD(&domain->dca_providers);
67 domain->pci_rc = rc;
68
69 return domain;
70}
71
72static void dca_free_domain(struct dca_domain *domain)
73{
74 list_del(&domain->node);
75 kfree(domain);
76}
77
78static int dca_provider_ioat_ver_3_0(struct device *dev)
79{
80 struct pci_dev *pdev = to_pci_dev(dev);
81
82 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
83 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
84 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
85 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
86 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
87 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
88 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
89 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
90 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
91}
92
93static void unregister_dca_providers(void)
94{
95 struct dca_provider *dca, *_dca;
96 struct list_head unregistered_providers;
97 struct dca_domain *domain;
98 unsigned long flags;
99
100 blocking_notifier_call_chain(&dca_provider_chain,
101 DCA_PROVIDER_REMOVE, NULL);
102
103 INIT_LIST_HEAD(&unregistered_providers);
104
105 raw_spin_lock_irqsave(&dca_lock, flags);
106
107 if (list_empty(&dca_domains)) {
108 raw_spin_unlock_irqrestore(&dca_lock, flags);
109 return;
110 }
111
112 /* at this point only one domain in the list is expected */
113 domain = list_first_entry(&dca_domains, struct dca_domain, node);
114
115 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
116 list_move(&dca->node, &unregistered_providers);
117
118 dca_free_domain(domain);
119
120 raw_spin_unlock_irqrestore(&dca_lock, flags);
121
122 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
123 dca_sysfs_remove_provider(dca);
124 list_del(&dca->node);
125 }
126}
127
128static struct dca_domain *dca_find_domain(struct pci_bus *rc)
129{
130 struct dca_domain *domain;
131
132 list_for_each_entry(domain, &dca_domains, node)
133 if (domain->pci_rc == rc)
134 return domain;
135
136 return NULL;
137}
138
139static struct dca_domain *dca_get_domain(struct device *dev)
140{
141 struct pci_bus *rc;
142 struct dca_domain *domain;
143
144 rc = dca_pci_rc_from_dev(dev);
145 domain = dca_find_domain(rc);
146
147 if (!domain) {
148 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
149 dca_providers_blocked = 1;
150 }
151
152 return domain;
153}
154
155static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
156{
157 struct dca_provider *dca;
158 struct pci_bus *rc;
159 struct dca_domain *domain;
160
161 if (dev) {
162 rc = dca_pci_rc_from_dev(dev);
163 domain = dca_find_domain(rc);
164 if (!domain)
165 return NULL;
166 } else {
167 if (!list_empty(&dca_domains))
168 domain = list_first_entry(&dca_domains,
169 struct dca_domain,
170 node);
171 else
172 return NULL;
173 }
174
175 list_for_each_entry(dca, &domain->dca_providers, node)
176 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
177 return dca;
178
179 return NULL;
180}
181
182/**
183 * dca_add_requester - add a dca client to the list
184 * @dev - the device that wants dca service
185 */
186int dca_add_requester(struct device *dev)
187{
188 struct dca_provider *dca;
189 int err, slot = -ENODEV;
190 unsigned long flags;
191 struct pci_bus *pci_rc;
192 struct dca_domain *domain;
193
194 if (!dev)
195 return -EFAULT;
196
197 raw_spin_lock_irqsave(&dca_lock, flags);
198
199 /* check if the requester has not been added already */
200 dca = dca_find_provider_by_dev(dev);
201 if (dca) {
202 raw_spin_unlock_irqrestore(&dca_lock, flags);
203 return -EEXIST;
204 }
205
206 pci_rc = dca_pci_rc_from_dev(dev);
207 domain = dca_find_domain(pci_rc);
208 if (!domain) {
209 raw_spin_unlock_irqrestore(&dca_lock, flags);
210 return -ENODEV;
211 }
212
213 list_for_each_entry(dca, &domain->dca_providers, node) {
214 slot = dca->ops->add_requester(dca, dev);
215 if (slot >= 0)
216 break;
217 }
218
219 raw_spin_unlock_irqrestore(&dca_lock, flags);
220
221 if (slot < 0)
222 return slot;
223
224 err = dca_sysfs_add_req(dca, dev, slot);
225 if (err) {
226 raw_spin_lock_irqsave(&dca_lock, flags);
227 if (dca == dca_find_provider_by_dev(dev))
228 dca->ops->remove_requester(dca, dev);
229 raw_spin_unlock_irqrestore(&dca_lock, flags);
230 return err;
231 }
232
233 return 0;
234}
235EXPORT_SYMBOL_GPL(dca_add_requester);
236
237/**
238 * dca_remove_requester - remove a dca client from the list
239 * @dev - the device that wants dca service
240 */
241int dca_remove_requester(struct device *dev)
242{
243 struct dca_provider *dca;
244 int slot;
245 unsigned long flags;
246
247 if (!dev)
248 return -EFAULT;
249
250 raw_spin_lock_irqsave(&dca_lock, flags);
251 dca = dca_find_provider_by_dev(dev);
252 if (!dca) {
253 raw_spin_unlock_irqrestore(&dca_lock, flags);
254 return -ENODEV;
255 }
256 slot = dca->ops->remove_requester(dca, dev);
257 raw_spin_unlock_irqrestore(&dca_lock, flags);
258
259 if (slot < 0)
260 return slot;
261
262 dca_sysfs_remove_req(dca, slot);
263
264 return 0;
265}
266EXPORT_SYMBOL_GPL(dca_remove_requester);
267
268/**
269 * dca_common_get_tag - return the dca tag (serves both new and old api)
270 * @dev - the device that wants dca service
271 * @cpu - the cpuid as returned by get_cpu()
272 */
273u8 dca_common_get_tag(struct device *dev, int cpu)
274{
275 struct dca_provider *dca;
276 u8 tag;
277 unsigned long flags;
278
279 raw_spin_lock_irqsave(&dca_lock, flags);
280
281 dca = dca_find_provider_by_dev(dev);
282 if (!dca) {
283 raw_spin_unlock_irqrestore(&dca_lock, flags);
284 return -ENODEV;
285 }
286 tag = dca->ops->get_tag(dca, dev, cpu);
287
288 raw_spin_unlock_irqrestore(&dca_lock, flags);
289 return tag;
290}
291
292/**
293 * dca3_get_tag - return the dca tag to the requester device
294 * for the given cpu (new api)
295 * @dev - the device that wants dca service
296 * @cpu - the cpuid as returned by get_cpu()
297 */
298u8 dca3_get_tag(struct device *dev, int cpu)
299{
300 if (!dev)
301 return -EFAULT;
302
303 return dca_common_get_tag(dev, cpu);
304}
305EXPORT_SYMBOL_GPL(dca3_get_tag);
306
307/**
308 * dca_get_tag - return the dca tag for the given cpu (old api)
309 * @cpu - the cpuid as returned by get_cpu()
310 */
311u8 dca_get_tag(int cpu)
312{
313 struct device *dev = NULL;
314
315 return dca_common_get_tag(dev, cpu);
316}
317EXPORT_SYMBOL_GPL(dca_get_tag);
318
319/**
320 * alloc_dca_provider - get data struct for describing a dca provider
321 * @ops - pointer to struct of dca operation function pointers
322 * @priv_size - size of extra mem to be added for provider's needs
323 */
324struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
325 int priv_size)
326{
327 struct dca_provider *dca;
328 int alloc_size;
329
330 alloc_size = (sizeof(*dca) + priv_size);
331 dca = kzalloc(alloc_size, GFP_KERNEL);
332 if (!dca)
333 return NULL;
334 dca->ops = ops;
335
336 return dca;
337}
338EXPORT_SYMBOL_GPL(alloc_dca_provider);
339
340/**
341 * free_dca_provider - release the dca provider data struct
342 * @ops - pointer to struct of dca operation function pointers
343 * @priv_size - size of extra mem to be added for provider's needs
344 */
345void free_dca_provider(struct dca_provider *dca)
346{
347 kfree(dca);
348}
349EXPORT_SYMBOL_GPL(free_dca_provider);
350
351/**
352 * register_dca_provider - register a dca provider
353 * @dca - struct created by alloc_dca_provider()
354 * @dev - device providing dca services
355 */
356int register_dca_provider(struct dca_provider *dca, struct device *dev)
357{
358 int err;
359 unsigned long flags;
360 struct dca_domain *domain, *newdomain = NULL;
361
362 raw_spin_lock_irqsave(&dca_lock, flags);
363 if (dca_providers_blocked) {
364 raw_spin_unlock_irqrestore(&dca_lock, flags);
365 return -ENODEV;
366 }
367 raw_spin_unlock_irqrestore(&dca_lock, flags);
368
369 err = dca_sysfs_add_provider(dca, dev);
370 if (err)
371 return err;
372
373 raw_spin_lock_irqsave(&dca_lock, flags);
374 domain = dca_get_domain(dev);
375 if (!domain) {
376 struct pci_bus *rc;
377
378 if (dca_providers_blocked) {
379 raw_spin_unlock_irqrestore(&dca_lock, flags);
380 dca_sysfs_remove_provider(dca);
381 unregister_dca_providers();
382 return -ENODEV;
383 }
384
385 raw_spin_unlock_irqrestore(&dca_lock, flags);
386 rc = dca_pci_rc_from_dev(dev);
387 newdomain = dca_allocate_domain(rc);
388 if (!newdomain)
389 return -ENODEV;
390 raw_spin_lock_irqsave(&dca_lock, flags);
391 /* Recheck, we might have raced after dropping the lock */
392 domain = dca_get_domain(dev);
393 if (!domain) {
394 domain = newdomain;
395 newdomain = NULL;
396 list_add(&domain->node, &dca_domains);
397 }
398 }
399 list_add(&dca->node, &domain->dca_providers);
400 raw_spin_unlock_irqrestore(&dca_lock, flags);
401
402 blocking_notifier_call_chain(&dca_provider_chain,
403 DCA_PROVIDER_ADD, NULL);
404 kfree(newdomain);
405 return 0;
406}
407EXPORT_SYMBOL_GPL(register_dca_provider);
408
409/**
410 * unregister_dca_provider - remove a dca provider
411 * @dca - struct created by alloc_dca_provider()
412 */
413void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
414{
415 unsigned long flags;
416 struct pci_bus *pci_rc;
417 struct dca_domain *domain;
418
419 blocking_notifier_call_chain(&dca_provider_chain,
420 DCA_PROVIDER_REMOVE, NULL);
421
422 raw_spin_lock_irqsave(&dca_lock, flags);
423
424 if (list_empty(&dca_domains)) {
425 raw_spin_unlock_irqrestore(&dca_lock, flags);
426 return;
427 }
428
429 list_del(&dca->node);
430
431 pci_rc = dca_pci_rc_from_dev(dev);
432 domain = dca_find_domain(pci_rc);
433 if (list_empty(&domain->dca_providers))
434 dca_free_domain(domain);
435
436 raw_spin_unlock_irqrestore(&dca_lock, flags);
437
438 dca_sysfs_remove_provider(dca);
439}
440EXPORT_SYMBOL_GPL(unregister_dca_provider);
441
442/**
443 * dca_register_notify - register a client's notifier callback
444 */
445void dca_register_notify(struct notifier_block *nb)
446{
447 blocking_notifier_chain_register(&dca_provider_chain, nb);
448}
449EXPORT_SYMBOL_GPL(dca_register_notify);
450
451/**
452 * dca_unregister_notify - remove a client's notifier callback
453 */
454void dca_unregister_notify(struct notifier_block *nb)
455{
456 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
457}
458EXPORT_SYMBOL_GPL(dca_unregister_notify);
459
460static int __init dca_init(void)
461{
462 pr_info("dca service started, version %s\n", DCA_VERSION);
463 return dca_sysfs_init();
464}
465
466static void __exit dca_exit(void)
467{
468 dca_sysfs_exit();
469}
470
471arch_initcall(dca_init);
472module_exit(dca_exit);
473