Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 */
5
6/*
7 * This driver supports an interface for DCA clients and providers to meet.
8 */
9
10#include <linux/kernel.h>
11#include <linux/notifier.h>
12#include <linux/device.h>
13#include <linux/dca.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16
17#define DCA_VERSION "1.12.1"
18
19MODULE_VERSION(DCA_VERSION);
20MODULE_DESCRIPTION("Intel Direct Cache Access (DCA) service module");
21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Intel Corporation");
23
24static DEFINE_RAW_SPINLOCK(dca_lock);
25
26static LIST_HEAD(dca_domains);
27
28static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
29
30static int dca_providers_blocked;
31
32static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
33{
34 struct pci_dev *pdev = to_pci_dev(dev);
35 struct pci_bus *bus = pdev->bus;
36
37 while (bus->parent)
38 bus = bus->parent;
39
40 return bus;
41}
42
43static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
44{
45 struct dca_domain *domain;
46
47 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
48 if (!domain)
49 return NULL;
50
51 INIT_LIST_HEAD(&domain->dca_providers);
52 domain->pci_rc = rc;
53
54 return domain;
55}
56
57static void dca_free_domain(struct dca_domain *domain)
58{
59 list_del(&domain->node);
60 kfree(domain);
61}
62
63static int dca_provider_ioat_ver_3_0(struct device *dev)
64{
65 struct pci_dev *pdev = to_pci_dev(dev);
66
67 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
68 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
69 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
70 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
71 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
72 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
73 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
74 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
75 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
76}
77
78static void unregister_dca_providers(void)
79{
80 struct dca_provider *dca, *_dca;
81 struct list_head unregistered_providers;
82 struct dca_domain *domain;
83 unsigned long flags;
84
85 blocking_notifier_call_chain(&dca_provider_chain,
86 DCA_PROVIDER_REMOVE, NULL);
87
88 INIT_LIST_HEAD(&unregistered_providers);
89
90 raw_spin_lock_irqsave(&dca_lock, flags);
91
92 if (list_empty(&dca_domains)) {
93 raw_spin_unlock_irqrestore(&dca_lock, flags);
94 return;
95 }
96
97 /* at this point only one domain in the list is expected */
98 domain = list_first_entry(&dca_domains, struct dca_domain, node);
99
100 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
101 list_move(&dca->node, &unregistered_providers);
102
103 dca_free_domain(domain);
104
105 raw_spin_unlock_irqrestore(&dca_lock, flags);
106
107 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
108 dca_sysfs_remove_provider(dca);
109 list_del(&dca->node);
110 }
111}
112
113static struct dca_domain *dca_find_domain(struct pci_bus *rc)
114{
115 struct dca_domain *domain;
116
117 list_for_each_entry(domain, &dca_domains, node)
118 if (domain->pci_rc == rc)
119 return domain;
120
121 return NULL;
122}
123
124static struct dca_domain *dca_get_domain(struct device *dev)
125{
126 struct pci_bus *rc;
127 struct dca_domain *domain;
128
129 rc = dca_pci_rc_from_dev(dev);
130 domain = dca_find_domain(rc);
131
132 if (!domain) {
133 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
134 dca_providers_blocked = 1;
135 }
136
137 return domain;
138}
139
140static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
141{
142 struct dca_provider *dca;
143 struct pci_bus *rc;
144 struct dca_domain *domain;
145
146 if (dev) {
147 rc = dca_pci_rc_from_dev(dev);
148 domain = dca_find_domain(rc);
149 if (!domain)
150 return NULL;
151 } else {
152 if (!list_empty(&dca_domains))
153 domain = list_first_entry(&dca_domains,
154 struct dca_domain,
155 node);
156 else
157 return NULL;
158 }
159
160 list_for_each_entry(dca, &domain->dca_providers, node)
161 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
162 return dca;
163
164 return NULL;
165}
166
167/**
168 * dca_add_requester - add a dca client to the list
169 * @dev - the device that wants dca service
170 */
171int dca_add_requester(struct device *dev)
172{
173 struct dca_provider *dca;
174 int err, slot = -ENODEV;
175 unsigned long flags;
176 struct pci_bus *pci_rc;
177 struct dca_domain *domain;
178
179 if (!dev)
180 return -EFAULT;
181
182 raw_spin_lock_irqsave(&dca_lock, flags);
183
184 /* check if the requester has not been added already */
185 dca = dca_find_provider_by_dev(dev);
186 if (dca) {
187 raw_spin_unlock_irqrestore(&dca_lock, flags);
188 return -EEXIST;
189 }
190
191 pci_rc = dca_pci_rc_from_dev(dev);
192 domain = dca_find_domain(pci_rc);
193 if (!domain) {
194 raw_spin_unlock_irqrestore(&dca_lock, flags);
195 return -ENODEV;
196 }
197
198 list_for_each_entry(dca, &domain->dca_providers, node) {
199 slot = dca->ops->add_requester(dca, dev);
200 if (slot >= 0)
201 break;
202 }
203
204 raw_spin_unlock_irqrestore(&dca_lock, flags);
205
206 if (slot < 0)
207 return slot;
208
209 err = dca_sysfs_add_req(dca, dev, slot);
210 if (err) {
211 raw_spin_lock_irqsave(&dca_lock, flags);
212 if (dca == dca_find_provider_by_dev(dev))
213 dca->ops->remove_requester(dca, dev);
214 raw_spin_unlock_irqrestore(&dca_lock, flags);
215 return err;
216 }
217
218 return 0;
219}
220EXPORT_SYMBOL_GPL(dca_add_requester);
221
222/**
223 * dca_remove_requester - remove a dca client from the list
224 * @dev - the device that wants dca service
225 */
226int dca_remove_requester(struct device *dev)
227{
228 struct dca_provider *dca;
229 int slot;
230 unsigned long flags;
231
232 if (!dev)
233 return -EFAULT;
234
235 raw_spin_lock_irqsave(&dca_lock, flags);
236 dca = dca_find_provider_by_dev(dev);
237 if (!dca) {
238 raw_spin_unlock_irqrestore(&dca_lock, flags);
239 return -ENODEV;
240 }
241 slot = dca->ops->remove_requester(dca, dev);
242 raw_spin_unlock_irqrestore(&dca_lock, flags);
243
244 if (slot < 0)
245 return slot;
246
247 dca_sysfs_remove_req(dca, slot);
248
249 return 0;
250}
251EXPORT_SYMBOL_GPL(dca_remove_requester);
252
253/**
254 * dca_common_get_tag - return the dca tag (serves both new and old api)
255 * @dev - the device that wants dca service
256 * @cpu - the cpuid as returned by get_cpu()
257 */
258static u8 dca_common_get_tag(struct device *dev, int cpu)
259{
260 struct dca_provider *dca;
261 u8 tag;
262 unsigned long flags;
263
264 raw_spin_lock_irqsave(&dca_lock, flags);
265
266 dca = dca_find_provider_by_dev(dev);
267 if (!dca) {
268 raw_spin_unlock_irqrestore(&dca_lock, flags);
269 return -ENODEV;
270 }
271 tag = dca->ops->get_tag(dca, dev, cpu);
272
273 raw_spin_unlock_irqrestore(&dca_lock, flags);
274 return tag;
275}
276
277/**
278 * dca3_get_tag - return the dca tag to the requester device
279 * for the given cpu (new api)
280 * @dev - the device that wants dca service
281 * @cpu - the cpuid as returned by get_cpu()
282 */
283u8 dca3_get_tag(struct device *dev, int cpu)
284{
285 if (!dev)
286 return -EFAULT;
287
288 return dca_common_get_tag(dev, cpu);
289}
290EXPORT_SYMBOL_GPL(dca3_get_tag);
291
292/**
293 * dca_get_tag - return the dca tag for the given cpu (old api)
294 * @cpu - the cpuid as returned by get_cpu()
295 */
296u8 dca_get_tag(int cpu)
297{
298 return dca_common_get_tag(NULL, cpu);
299}
300EXPORT_SYMBOL_GPL(dca_get_tag);
301
302/**
303 * alloc_dca_provider - get data struct for describing a dca provider
304 * @ops - pointer to struct of dca operation function pointers
305 * @priv_size - size of extra mem to be added for provider's needs
306 */
307struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
308 int priv_size)
309{
310 struct dca_provider *dca;
311 int alloc_size;
312
313 alloc_size = (sizeof(*dca) + priv_size);
314 dca = kzalloc(alloc_size, GFP_KERNEL);
315 if (!dca)
316 return NULL;
317 dca->ops = ops;
318
319 return dca;
320}
321EXPORT_SYMBOL_GPL(alloc_dca_provider);
322
323/**
324 * free_dca_provider - release the dca provider data struct
325 * @ops - pointer to struct of dca operation function pointers
326 * @priv_size - size of extra mem to be added for provider's needs
327 */
328void free_dca_provider(struct dca_provider *dca)
329{
330 kfree(dca);
331}
332EXPORT_SYMBOL_GPL(free_dca_provider);
333
334/**
335 * register_dca_provider - register a dca provider
336 * @dca - struct created by alloc_dca_provider()
337 * @dev - device providing dca services
338 */
339int register_dca_provider(struct dca_provider *dca, struct device *dev)
340{
341 int err;
342 unsigned long flags;
343 struct dca_domain *domain, *newdomain = NULL;
344
345 raw_spin_lock_irqsave(&dca_lock, flags);
346 if (dca_providers_blocked) {
347 raw_spin_unlock_irqrestore(&dca_lock, flags);
348 return -ENODEV;
349 }
350 raw_spin_unlock_irqrestore(&dca_lock, flags);
351
352 err = dca_sysfs_add_provider(dca, dev);
353 if (err)
354 return err;
355
356 raw_spin_lock_irqsave(&dca_lock, flags);
357 domain = dca_get_domain(dev);
358 if (!domain) {
359 struct pci_bus *rc;
360
361 if (dca_providers_blocked) {
362 raw_spin_unlock_irqrestore(&dca_lock, flags);
363 dca_sysfs_remove_provider(dca);
364 unregister_dca_providers();
365 return -ENODEV;
366 }
367
368 raw_spin_unlock_irqrestore(&dca_lock, flags);
369 rc = dca_pci_rc_from_dev(dev);
370 newdomain = dca_allocate_domain(rc);
371 if (!newdomain)
372 return -ENODEV;
373 raw_spin_lock_irqsave(&dca_lock, flags);
374 /* Recheck, we might have raced after dropping the lock */
375 domain = dca_get_domain(dev);
376 if (!domain) {
377 domain = newdomain;
378 newdomain = NULL;
379 list_add(&domain->node, &dca_domains);
380 }
381 }
382 list_add(&dca->node, &domain->dca_providers);
383 raw_spin_unlock_irqrestore(&dca_lock, flags);
384
385 blocking_notifier_call_chain(&dca_provider_chain,
386 DCA_PROVIDER_ADD, NULL);
387 kfree(newdomain);
388 return 0;
389}
390EXPORT_SYMBOL_GPL(register_dca_provider);
391
392/**
393 * unregister_dca_provider - remove a dca provider
394 * @dca - struct created by alloc_dca_provider()
395 */
396void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
397{
398 unsigned long flags;
399 struct pci_bus *pci_rc;
400 struct dca_domain *domain;
401
402 blocking_notifier_call_chain(&dca_provider_chain,
403 DCA_PROVIDER_REMOVE, NULL);
404
405 raw_spin_lock_irqsave(&dca_lock, flags);
406
407 if (list_empty(&dca_domains)) {
408 raw_spin_unlock_irqrestore(&dca_lock, flags);
409 return;
410 }
411
412 list_del(&dca->node);
413
414 pci_rc = dca_pci_rc_from_dev(dev);
415 domain = dca_find_domain(pci_rc);
416 if (list_empty(&domain->dca_providers))
417 dca_free_domain(domain);
418
419 raw_spin_unlock_irqrestore(&dca_lock, flags);
420
421 dca_sysfs_remove_provider(dca);
422}
423EXPORT_SYMBOL_GPL(unregister_dca_provider);
424
425/**
426 * dca_register_notify - register a client's notifier callback
427 */
428void dca_register_notify(struct notifier_block *nb)
429{
430 blocking_notifier_chain_register(&dca_provider_chain, nb);
431}
432EXPORT_SYMBOL_GPL(dca_register_notify);
433
434/**
435 * dca_unregister_notify - remove a client's notifier callback
436 */
437void dca_unregister_notify(struct notifier_block *nb)
438{
439 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
440}
441EXPORT_SYMBOL_GPL(dca_unregister_notify);
442
443static int __init dca_init(void)
444{
445 pr_info("dca service started, version %s\n", DCA_VERSION);
446 return dca_sysfs_init();
447}
448
449static void __exit dca_exit(void)
450{
451 dca_sysfs_exit();
452}
453
454arch_initcall(dca_init);
455module_exit(dca_exit);
456
1/*
2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This driver supports an interface for DCA clients and providers to meet.
24 */
25
26#include <linux/kernel.h>
27#include <linux/notifier.h>
28#include <linux/device.h>
29#include <linux/dca.h>
30#include <linux/slab.h>
31
32#define DCA_VERSION "1.12.1"
33
34MODULE_VERSION(DCA_VERSION);
35MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Intel Corporation");
37
38static DEFINE_SPINLOCK(dca_lock);
39
40static LIST_HEAD(dca_domains);
41
42static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
43
44static int dca_providers_blocked;
45
46static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
47{
48 struct pci_dev *pdev = to_pci_dev(dev);
49 struct pci_bus *bus = pdev->bus;
50
51 while (bus->parent)
52 bus = bus->parent;
53
54 return bus;
55}
56
57static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
58{
59 struct dca_domain *domain;
60
61 domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
62 if (!domain)
63 return NULL;
64
65 INIT_LIST_HEAD(&domain->dca_providers);
66 domain->pci_rc = rc;
67
68 return domain;
69}
70
71static void dca_free_domain(struct dca_domain *domain)
72{
73 list_del(&domain->node);
74 kfree(domain);
75}
76
77static int dca_provider_ioat_ver_3_0(struct device *dev)
78{
79 struct pci_dev *pdev = to_pci_dev(dev);
80
81 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
82 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
83 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
84 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
85 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
86 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
87 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
88 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
89 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
90}
91
92static void unregister_dca_providers(void)
93{
94 struct dca_provider *dca, *_dca;
95 struct list_head unregistered_providers;
96 struct dca_domain *domain;
97 unsigned long flags;
98
99 blocking_notifier_call_chain(&dca_provider_chain,
100 DCA_PROVIDER_REMOVE, NULL);
101
102 INIT_LIST_HEAD(&unregistered_providers);
103
104 spin_lock_irqsave(&dca_lock, flags);
105
106 if (list_empty(&dca_domains)) {
107 spin_unlock_irqrestore(&dca_lock, flags);
108 return;
109 }
110
111 /* at this point only one domain in the list is expected */
112 domain = list_first_entry(&dca_domains, struct dca_domain, node);
113
114 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
115 list_move(&dca->node, &unregistered_providers);
116
117 dca_free_domain(domain);
118
119 spin_unlock_irqrestore(&dca_lock, flags);
120
121 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
122 dca_sysfs_remove_provider(dca);
123 list_del(&dca->node);
124 }
125}
126
127static struct dca_domain *dca_find_domain(struct pci_bus *rc)
128{
129 struct dca_domain *domain;
130
131 list_for_each_entry(domain, &dca_domains, node)
132 if (domain->pci_rc == rc)
133 return domain;
134
135 return NULL;
136}
137
138static struct dca_domain *dca_get_domain(struct device *dev)
139{
140 struct pci_bus *rc;
141 struct dca_domain *domain;
142
143 rc = dca_pci_rc_from_dev(dev);
144 domain = dca_find_domain(rc);
145
146 if (!domain) {
147 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
148 dca_providers_blocked = 1;
149 } else {
150 domain = dca_allocate_domain(rc);
151 if (domain)
152 list_add(&domain->node, &dca_domains);
153 }
154 }
155
156 return domain;
157}
158
159static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
160{
161 struct dca_provider *dca;
162 struct pci_bus *rc;
163 struct dca_domain *domain;
164
165 if (dev) {
166 rc = dca_pci_rc_from_dev(dev);
167 domain = dca_find_domain(rc);
168 if (!domain)
169 return NULL;
170 } else {
171 if (!list_empty(&dca_domains))
172 domain = list_first_entry(&dca_domains,
173 struct dca_domain,
174 node);
175 else
176 return NULL;
177 }
178
179 list_for_each_entry(dca, &domain->dca_providers, node)
180 if ((!dev) || (dca->ops->dev_managed(dca, dev)))
181 return dca;
182
183 return NULL;
184}
185
186/**
187 * dca_add_requester - add a dca client to the list
188 * @dev - the device that wants dca service
189 */
190int dca_add_requester(struct device *dev)
191{
192 struct dca_provider *dca;
193 int err, slot = -ENODEV;
194 unsigned long flags;
195 struct pci_bus *pci_rc;
196 struct dca_domain *domain;
197
198 if (!dev)
199 return -EFAULT;
200
201 spin_lock_irqsave(&dca_lock, flags);
202
203 /* check if the requester has not been added already */
204 dca = dca_find_provider_by_dev(dev);
205 if (dca) {
206 spin_unlock_irqrestore(&dca_lock, flags);
207 return -EEXIST;
208 }
209
210 pci_rc = dca_pci_rc_from_dev(dev);
211 domain = dca_find_domain(pci_rc);
212 if (!domain) {
213 spin_unlock_irqrestore(&dca_lock, flags);
214 return -ENODEV;
215 }
216
217 list_for_each_entry(dca, &domain->dca_providers, node) {
218 slot = dca->ops->add_requester(dca, dev);
219 if (slot >= 0)
220 break;
221 }
222
223 spin_unlock_irqrestore(&dca_lock, flags);
224
225 if (slot < 0)
226 return slot;
227
228 err = dca_sysfs_add_req(dca, dev, slot);
229 if (err) {
230 spin_lock_irqsave(&dca_lock, flags);
231 if (dca == dca_find_provider_by_dev(dev))
232 dca->ops->remove_requester(dca, dev);
233 spin_unlock_irqrestore(&dca_lock, flags);
234 return err;
235 }
236
237 return 0;
238}
239EXPORT_SYMBOL_GPL(dca_add_requester);
240
241/**
242 * dca_remove_requester - remove a dca client from the list
243 * @dev - the device that wants dca service
244 */
245int dca_remove_requester(struct device *dev)
246{
247 struct dca_provider *dca;
248 int slot;
249 unsigned long flags;
250
251 if (!dev)
252 return -EFAULT;
253
254 spin_lock_irqsave(&dca_lock, flags);
255 dca = dca_find_provider_by_dev(dev);
256 if (!dca) {
257 spin_unlock_irqrestore(&dca_lock, flags);
258 return -ENODEV;
259 }
260 slot = dca->ops->remove_requester(dca, dev);
261 spin_unlock_irqrestore(&dca_lock, flags);
262
263 if (slot < 0)
264 return slot;
265
266 dca_sysfs_remove_req(dca, slot);
267
268 return 0;
269}
270EXPORT_SYMBOL_GPL(dca_remove_requester);
271
272/**
273 * dca_common_get_tag - return the dca tag (serves both new and old api)
274 * @dev - the device that wants dca service
275 * @cpu - the cpuid as returned by get_cpu()
276 */
277u8 dca_common_get_tag(struct device *dev, int cpu)
278{
279 struct dca_provider *dca;
280 u8 tag;
281 unsigned long flags;
282
283 spin_lock_irqsave(&dca_lock, flags);
284
285 dca = dca_find_provider_by_dev(dev);
286 if (!dca) {
287 spin_unlock_irqrestore(&dca_lock, flags);
288 return -ENODEV;
289 }
290 tag = dca->ops->get_tag(dca, dev, cpu);
291
292 spin_unlock_irqrestore(&dca_lock, flags);
293 return tag;
294}
295
296/**
297 * dca3_get_tag - return the dca tag to the requester device
298 * for the given cpu (new api)
299 * @dev - the device that wants dca service
300 * @cpu - the cpuid as returned by get_cpu()
301 */
302u8 dca3_get_tag(struct device *dev, int cpu)
303{
304 if (!dev)
305 return -EFAULT;
306
307 return dca_common_get_tag(dev, cpu);
308}
309EXPORT_SYMBOL_GPL(dca3_get_tag);
310
311/**
312 * dca_get_tag - return the dca tag for the given cpu (old api)
313 * @cpu - the cpuid as returned by get_cpu()
314 */
315u8 dca_get_tag(int cpu)
316{
317 struct device *dev = NULL;
318
319 return dca_common_get_tag(dev, cpu);
320}
321EXPORT_SYMBOL_GPL(dca_get_tag);
322
323/**
324 * alloc_dca_provider - get data struct for describing a dca provider
325 * @ops - pointer to struct of dca operation function pointers
326 * @priv_size - size of extra mem to be added for provider's needs
327 */
328struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size)
329{
330 struct dca_provider *dca;
331 int alloc_size;
332
333 alloc_size = (sizeof(*dca) + priv_size);
334 dca = kzalloc(alloc_size, GFP_KERNEL);
335 if (!dca)
336 return NULL;
337 dca->ops = ops;
338
339 return dca;
340}
341EXPORT_SYMBOL_GPL(alloc_dca_provider);
342
343/**
344 * free_dca_provider - release the dca provider data struct
345 * @ops - pointer to struct of dca operation function pointers
346 * @priv_size - size of extra mem to be added for provider's needs
347 */
348void free_dca_provider(struct dca_provider *dca)
349{
350 kfree(dca);
351}
352EXPORT_SYMBOL_GPL(free_dca_provider);
353
354/**
355 * register_dca_provider - register a dca provider
356 * @dca - struct created by alloc_dca_provider()
357 * @dev - device providing dca services
358 */
359int register_dca_provider(struct dca_provider *dca, struct device *dev)
360{
361 int err;
362 unsigned long flags;
363 struct dca_domain *domain;
364
365 spin_lock_irqsave(&dca_lock, flags);
366 if (dca_providers_blocked) {
367 spin_unlock_irqrestore(&dca_lock, flags);
368 return -ENODEV;
369 }
370 spin_unlock_irqrestore(&dca_lock, flags);
371
372 err = dca_sysfs_add_provider(dca, dev);
373 if (err)
374 return err;
375
376 spin_lock_irqsave(&dca_lock, flags);
377 domain = dca_get_domain(dev);
378 if (!domain) {
379 if (dca_providers_blocked) {
380 spin_unlock_irqrestore(&dca_lock, flags);
381 dca_sysfs_remove_provider(dca);
382 unregister_dca_providers();
383 } else {
384 spin_unlock_irqrestore(&dca_lock, flags);
385 }
386 return -ENODEV;
387 }
388 list_add(&dca->node, &domain->dca_providers);
389 spin_unlock_irqrestore(&dca_lock, flags);
390
391 blocking_notifier_call_chain(&dca_provider_chain,
392 DCA_PROVIDER_ADD, NULL);
393 return 0;
394}
395EXPORT_SYMBOL_GPL(register_dca_provider);
396
397/**
398 * unregister_dca_provider - remove a dca provider
399 * @dca - struct created by alloc_dca_provider()
400 */
401void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
402{
403 unsigned long flags;
404 struct pci_bus *pci_rc;
405 struct dca_domain *domain;
406
407 blocking_notifier_call_chain(&dca_provider_chain,
408 DCA_PROVIDER_REMOVE, NULL);
409
410 spin_lock_irqsave(&dca_lock, flags);
411
412 list_del(&dca->node);
413
414 pci_rc = dca_pci_rc_from_dev(dev);
415 domain = dca_find_domain(pci_rc);
416 if (list_empty(&domain->dca_providers))
417 dca_free_domain(domain);
418
419 spin_unlock_irqrestore(&dca_lock, flags);
420
421 dca_sysfs_remove_provider(dca);
422}
423EXPORT_SYMBOL_GPL(unregister_dca_provider);
424
425/**
426 * dca_register_notify - register a client's notifier callback
427 */
428void dca_register_notify(struct notifier_block *nb)
429{
430 blocking_notifier_chain_register(&dca_provider_chain, nb);
431}
432EXPORT_SYMBOL_GPL(dca_register_notify);
433
434/**
435 * dca_unregister_notify - remove a client's notifier callback
436 */
437void dca_unregister_notify(struct notifier_block *nb)
438{
439 blocking_notifier_chain_unregister(&dca_provider_chain, nb);
440}
441EXPORT_SYMBOL_GPL(dca_unregister_notify);
442
443static int __init dca_init(void)
444{
445 pr_info("dca service started, version %s\n", DCA_VERSION);
446 return dca_sysfs_init();
447}
448
449static void __exit dca_exit(void)
450{
451 dca_sysfs_exit();
452}
453
454arch_initcall(dca_init);
455module_exit(dca_exit);
456