Loading...
1/*
2 * padata.c - generic interface to process data streams in parallel
3 *
4 * See Documentation/padata.txt for an api documentation.
5 *
6 * Copyright (C) 2008, 2009 secunet Security Networks AG
7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <linux/export.h>
24#include <linux/cpumask.h>
25#include <linux/err.h>
26#include <linux/cpu.h>
27#include <linux/padata.h>
28#include <linux/mutex.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/sysfs.h>
32#include <linux/rcupdate.h>
33#include <linux/module.h>
34
35#define MAX_OBJ_NUM 1000
36
37static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
38{
39 int cpu, target_cpu;
40
41 target_cpu = cpumask_first(pd->cpumask.pcpu);
42 for (cpu = 0; cpu < cpu_index; cpu++)
43 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
44
45 return target_cpu;
46}
47
48static int padata_cpu_hash(struct parallel_data *pd)
49{
50 unsigned int seq_nr;
51 int cpu_index;
52
53 /*
54 * Hash the sequence numbers to the cpus by taking
55 * seq_nr mod. number of cpus in use.
56 */
57
58 seq_nr = atomic_inc_return(&pd->seq_nr);
59 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
60
61 return padata_index_to_cpu(pd, cpu_index);
62}
63
64static void padata_parallel_worker(struct work_struct *parallel_work)
65{
66 struct padata_parallel_queue *pqueue;
67 LIST_HEAD(local_list);
68
69 local_bh_disable();
70 pqueue = container_of(parallel_work,
71 struct padata_parallel_queue, work);
72
73 spin_lock(&pqueue->parallel.lock);
74 list_replace_init(&pqueue->parallel.list, &local_list);
75 spin_unlock(&pqueue->parallel.lock);
76
77 while (!list_empty(&local_list)) {
78 struct padata_priv *padata;
79
80 padata = list_entry(local_list.next,
81 struct padata_priv, list);
82
83 list_del_init(&padata->list);
84
85 padata->parallel(padata);
86 }
87
88 local_bh_enable();
89}
90
91/**
92 * padata_do_parallel - padata parallelization function
93 *
94 * @pinst: padata instance
95 * @padata: object to be parallelized
96 * @cb_cpu: cpu the serialization callback function will run on,
97 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
98 *
99 * The parallelization callback function will run with BHs off.
100 * Note: Every object which is parallelized by padata_do_parallel
101 * must be seen by padata_do_serial.
102 */
103int padata_do_parallel(struct padata_instance *pinst,
104 struct padata_priv *padata, int cb_cpu)
105{
106 int target_cpu, err;
107 struct padata_parallel_queue *queue;
108 struct parallel_data *pd;
109
110 rcu_read_lock_bh();
111
112 pd = rcu_dereference_bh(pinst->pd);
113
114 err = -EINVAL;
115 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
116 goto out;
117
118 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
119 goto out;
120
121 err = -EBUSY;
122 if ((pinst->flags & PADATA_RESET))
123 goto out;
124
125 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
126 goto out;
127
128 err = 0;
129 atomic_inc(&pd->refcnt);
130 padata->pd = pd;
131 padata->cb_cpu = cb_cpu;
132
133 target_cpu = padata_cpu_hash(pd);
134 queue = per_cpu_ptr(pd->pqueue, target_cpu);
135
136 spin_lock(&queue->parallel.lock);
137 list_add_tail(&padata->list, &queue->parallel.list);
138 spin_unlock(&queue->parallel.lock);
139
140 queue_work_on(target_cpu, pinst->wq, &queue->work);
141
142out:
143 rcu_read_unlock_bh();
144
145 return err;
146}
147EXPORT_SYMBOL(padata_do_parallel);
148
149/*
150 * padata_get_next - Get the next object that needs serialization.
151 *
152 * Return values are:
153 *
154 * A pointer to the control struct of the next object that needs
155 * serialization, if present in one of the percpu reorder queues.
156 *
157 * NULL, if all percpu reorder queues are empty.
158 *
159 * -EINPROGRESS, if the next object that needs serialization will
160 * be parallel processed by another cpu and is not yet present in
161 * the cpu's reorder queue.
162 *
163 * -ENODATA, if this cpu has to do the parallel processing for
164 * the next object.
165 */
166static struct padata_priv *padata_get_next(struct parallel_data *pd)
167{
168 int cpu, num_cpus;
169 unsigned int next_nr, next_index;
170 struct padata_parallel_queue *next_queue;
171 struct padata_priv *padata;
172 struct padata_list *reorder;
173
174 num_cpus = cpumask_weight(pd->cpumask.pcpu);
175
176 /*
177 * Calculate the percpu reorder queue and the sequence
178 * number of the next object.
179 */
180 next_nr = pd->processed;
181 next_index = next_nr % num_cpus;
182 cpu = padata_index_to_cpu(pd, next_index);
183 next_queue = per_cpu_ptr(pd->pqueue, cpu);
184
185 padata = NULL;
186
187 reorder = &next_queue->reorder;
188
189 spin_lock(&reorder->lock);
190 if (!list_empty(&reorder->list)) {
191 padata = list_entry(reorder->list.next,
192 struct padata_priv, list);
193
194 list_del_init(&padata->list);
195 atomic_dec(&pd->reorder_objects);
196
197 pd->processed++;
198
199 spin_unlock(&reorder->lock);
200 goto out;
201 }
202 spin_unlock(&reorder->lock);
203
204 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
205 padata = ERR_PTR(-ENODATA);
206 goto out;
207 }
208
209 padata = ERR_PTR(-EINPROGRESS);
210out:
211 return padata;
212}
213
214static void padata_reorder(struct parallel_data *pd)
215{
216 int cb_cpu;
217 struct padata_priv *padata;
218 struct padata_serial_queue *squeue;
219 struct padata_instance *pinst = pd->pinst;
220
221 /*
222 * We need to ensure that only one cpu can work on dequeueing of
223 * the reorder queue the time. Calculating in which percpu reorder
224 * queue the next object will arrive takes some time. A spinlock
225 * would be highly contended. Also it is not clear in which order
226 * the objects arrive to the reorder queues. So a cpu could wait to
227 * get the lock just to notice that there is nothing to do at the
228 * moment. Therefore we use a trylock and let the holder of the lock
229 * care for all the objects enqueued during the holdtime of the lock.
230 */
231 if (!spin_trylock_bh(&pd->lock))
232 return;
233
234 while (1) {
235 padata = padata_get_next(pd);
236
237 /*
238 * All reorder queues are empty, or the next object that needs
239 * serialization is parallel processed by another cpu and is
240 * still on it's way to the cpu's reorder queue, nothing to
241 * do for now.
242 */
243 if (!padata || PTR_ERR(padata) == -EINPROGRESS)
244 break;
245
246 /*
247 * This cpu has to do the parallel processing of the next
248 * object. It's waiting in the cpu's parallelization queue,
249 * so exit immediately.
250 */
251 if (PTR_ERR(padata) == -ENODATA) {
252 del_timer(&pd->timer);
253 spin_unlock_bh(&pd->lock);
254 return;
255 }
256
257 cb_cpu = padata->cb_cpu;
258 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
259
260 spin_lock(&squeue->serial.lock);
261 list_add_tail(&padata->list, &squeue->serial.list);
262 spin_unlock(&squeue->serial.lock);
263
264 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
265 }
266
267 spin_unlock_bh(&pd->lock);
268
269 /*
270 * The next object that needs serialization might have arrived to
271 * the reorder queues in the meantime, we will be called again
272 * from the timer function if no one else cares for it.
273 */
274 if (atomic_read(&pd->reorder_objects)
275 && !(pinst->flags & PADATA_RESET))
276 mod_timer(&pd->timer, jiffies + HZ);
277 else
278 del_timer(&pd->timer);
279
280 return;
281}
282
283static void padata_reorder_timer(unsigned long arg)
284{
285 struct parallel_data *pd = (struct parallel_data *)arg;
286
287 padata_reorder(pd);
288}
289
290static void padata_serial_worker(struct work_struct *serial_work)
291{
292 struct padata_serial_queue *squeue;
293 struct parallel_data *pd;
294 LIST_HEAD(local_list);
295
296 local_bh_disable();
297 squeue = container_of(serial_work, struct padata_serial_queue, work);
298 pd = squeue->pd;
299
300 spin_lock(&squeue->serial.lock);
301 list_replace_init(&squeue->serial.list, &local_list);
302 spin_unlock(&squeue->serial.lock);
303
304 while (!list_empty(&local_list)) {
305 struct padata_priv *padata;
306
307 padata = list_entry(local_list.next,
308 struct padata_priv, list);
309
310 list_del_init(&padata->list);
311
312 padata->serial(padata);
313 atomic_dec(&pd->refcnt);
314 }
315 local_bh_enable();
316}
317
318/**
319 * padata_do_serial - padata serialization function
320 *
321 * @padata: object to be serialized.
322 *
323 * padata_do_serial must be called for every parallelized object.
324 * The serialization callback function will run with BHs off.
325 */
326void padata_do_serial(struct padata_priv *padata)
327{
328 int cpu;
329 struct padata_parallel_queue *pqueue;
330 struct parallel_data *pd;
331
332 pd = padata->pd;
333
334 cpu = get_cpu();
335 pqueue = per_cpu_ptr(pd->pqueue, cpu);
336
337 spin_lock(&pqueue->reorder.lock);
338 atomic_inc(&pd->reorder_objects);
339 list_add_tail(&padata->list, &pqueue->reorder.list);
340 spin_unlock(&pqueue->reorder.lock);
341
342 put_cpu();
343
344 padata_reorder(pd);
345}
346EXPORT_SYMBOL(padata_do_serial);
347
348static int padata_setup_cpumasks(struct parallel_data *pd,
349 const struct cpumask *pcpumask,
350 const struct cpumask *cbcpumask)
351{
352 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
353 return -ENOMEM;
354
355 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
356 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
357 free_cpumask_var(pd->cpumask.cbcpu);
358 return -ENOMEM;
359 }
360
361 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
362 return 0;
363}
364
365static void __padata_list_init(struct padata_list *pd_list)
366{
367 INIT_LIST_HEAD(&pd_list->list);
368 spin_lock_init(&pd_list->lock);
369}
370
371/* Initialize all percpu queues used by serial workers */
372static void padata_init_squeues(struct parallel_data *pd)
373{
374 int cpu;
375 struct padata_serial_queue *squeue;
376
377 for_each_cpu(cpu, pd->cpumask.cbcpu) {
378 squeue = per_cpu_ptr(pd->squeue, cpu);
379 squeue->pd = pd;
380 __padata_list_init(&squeue->serial);
381 INIT_WORK(&squeue->work, padata_serial_worker);
382 }
383}
384
385/* Initialize all percpu queues used by parallel workers */
386static void padata_init_pqueues(struct parallel_data *pd)
387{
388 int cpu_index, cpu;
389 struct padata_parallel_queue *pqueue;
390
391 cpu_index = 0;
392 for_each_cpu(cpu, pd->cpumask.pcpu) {
393 pqueue = per_cpu_ptr(pd->pqueue, cpu);
394 pqueue->pd = pd;
395 pqueue->cpu_index = cpu_index;
396 cpu_index++;
397
398 __padata_list_init(&pqueue->reorder);
399 __padata_list_init(&pqueue->parallel);
400 INIT_WORK(&pqueue->work, padata_parallel_worker);
401 atomic_set(&pqueue->num_obj, 0);
402 }
403}
404
405/* Allocate and initialize the internal cpumask dependend resources. */
406static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
407 const struct cpumask *pcpumask,
408 const struct cpumask *cbcpumask)
409{
410 struct parallel_data *pd;
411
412 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
413 if (!pd)
414 goto err;
415
416 pd->pqueue = alloc_percpu(struct padata_parallel_queue);
417 if (!pd->pqueue)
418 goto err_free_pd;
419
420 pd->squeue = alloc_percpu(struct padata_serial_queue);
421 if (!pd->squeue)
422 goto err_free_pqueue;
423 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
424 goto err_free_squeue;
425
426 padata_init_pqueues(pd);
427 padata_init_squeues(pd);
428 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
429 atomic_set(&pd->seq_nr, -1);
430 atomic_set(&pd->reorder_objects, 0);
431 atomic_set(&pd->refcnt, 0);
432 pd->pinst = pinst;
433 spin_lock_init(&pd->lock);
434
435 return pd;
436
437err_free_squeue:
438 free_percpu(pd->squeue);
439err_free_pqueue:
440 free_percpu(pd->pqueue);
441err_free_pd:
442 kfree(pd);
443err:
444 return NULL;
445}
446
447static void padata_free_pd(struct parallel_data *pd)
448{
449 free_cpumask_var(pd->cpumask.pcpu);
450 free_cpumask_var(pd->cpumask.cbcpu);
451 free_percpu(pd->pqueue);
452 free_percpu(pd->squeue);
453 kfree(pd);
454}
455
456/* Flush all objects out of the padata queues. */
457static void padata_flush_queues(struct parallel_data *pd)
458{
459 int cpu;
460 struct padata_parallel_queue *pqueue;
461 struct padata_serial_queue *squeue;
462
463 for_each_cpu(cpu, pd->cpumask.pcpu) {
464 pqueue = per_cpu_ptr(pd->pqueue, cpu);
465 flush_work(&pqueue->work);
466 }
467
468 del_timer_sync(&pd->timer);
469
470 if (atomic_read(&pd->reorder_objects))
471 padata_reorder(pd);
472
473 for_each_cpu(cpu, pd->cpumask.cbcpu) {
474 squeue = per_cpu_ptr(pd->squeue, cpu);
475 flush_work(&squeue->work);
476 }
477
478 BUG_ON(atomic_read(&pd->refcnt) != 0);
479}
480
481static void __padata_start(struct padata_instance *pinst)
482{
483 pinst->flags |= PADATA_INIT;
484}
485
486static void __padata_stop(struct padata_instance *pinst)
487{
488 if (!(pinst->flags & PADATA_INIT))
489 return;
490
491 pinst->flags &= ~PADATA_INIT;
492
493 synchronize_rcu();
494
495 get_online_cpus();
496 padata_flush_queues(pinst->pd);
497 put_online_cpus();
498}
499
500/* Replace the internal control structure with a new one. */
501static void padata_replace(struct padata_instance *pinst,
502 struct parallel_data *pd_new)
503{
504 struct parallel_data *pd_old = pinst->pd;
505 int notification_mask = 0;
506
507 pinst->flags |= PADATA_RESET;
508
509 rcu_assign_pointer(pinst->pd, pd_new);
510
511 synchronize_rcu();
512
513 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
514 notification_mask |= PADATA_CPU_PARALLEL;
515 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
516 notification_mask |= PADATA_CPU_SERIAL;
517
518 padata_flush_queues(pd_old);
519 padata_free_pd(pd_old);
520
521 if (notification_mask)
522 blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
523 notification_mask,
524 &pd_new->cpumask);
525
526 pinst->flags &= ~PADATA_RESET;
527}
528
529/**
530 * padata_register_cpumask_notifier - Registers a notifier that will be called
531 * if either pcpu or cbcpu or both cpumasks change.
532 *
533 * @pinst: A poineter to padata instance
534 * @nblock: A pointer to notifier block.
535 */
536int padata_register_cpumask_notifier(struct padata_instance *pinst,
537 struct notifier_block *nblock)
538{
539 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
540 nblock);
541}
542EXPORT_SYMBOL(padata_register_cpumask_notifier);
543
544/**
545 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
546 * registered earlier using padata_register_cpumask_notifier
547 *
548 * @pinst: A pointer to data instance.
549 * @nlock: A pointer to notifier block.
550 */
551int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
552 struct notifier_block *nblock)
553{
554 return blocking_notifier_chain_unregister(
555 &pinst->cpumask_change_notifier,
556 nblock);
557}
558EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
559
560
561/* If cpumask contains no active cpu, we mark the instance as invalid. */
562static bool padata_validate_cpumask(struct padata_instance *pinst,
563 const struct cpumask *cpumask)
564{
565 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
566 pinst->flags |= PADATA_INVALID;
567 return false;
568 }
569
570 pinst->flags &= ~PADATA_INVALID;
571 return true;
572}
573
574static int __padata_set_cpumasks(struct padata_instance *pinst,
575 cpumask_var_t pcpumask,
576 cpumask_var_t cbcpumask)
577{
578 int valid;
579 struct parallel_data *pd;
580
581 valid = padata_validate_cpumask(pinst, pcpumask);
582 if (!valid) {
583 __padata_stop(pinst);
584 goto out_replace;
585 }
586
587 valid = padata_validate_cpumask(pinst, cbcpumask);
588 if (!valid)
589 __padata_stop(pinst);
590
591out_replace:
592 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
593 if (!pd)
594 return -ENOMEM;
595
596 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
597 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
598
599 padata_replace(pinst, pd);
600
601 if (valid)
602 __padata_start(pinst);
603
604 return 0;
605}
606
607/**
608 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
609 * equivalent to @cpumask.
610 *
611 * @pinst: padata instance
612 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
613 * to parallel and serial cpumasks respectively.
614 * @cpumask: the cpumask to use
615 */
616int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
617 cpumask_var_t cpumask)
618{
619 struct cpumask *serial_mask, *parallel_mask;
620 int err = -EINVAL;
621
622 mutex_lock(&pinst->lock);
623 get_online_cpus();
624
625 switch (cpumask_type) {
626 case PADATA_CPU_PARALLEL:
627 serial_mask = pinst->cpumask.cbcpu;
628 parallel_mask = cpumask;
629 break;
630 case PADATA_CPU_SERIAL:
631 parallel_mask = pinst->cpumask.pcpu;
632 serial_mask = cpumask;
633 break;
634 default:
635 goto out;
636 }
637
638 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
639
640out:
641 put_online_cpus();
642 mutex_unlock(&pinst->lock);
643
644 return err;
645}
646EXPORT_SYMBOL(padata_set_cpumask);
647
648/**
649 * padata_start - start the parallel processing
650 *
651 * @pinst: padata instance to start
652 */
653int padata_start(struct padata_instance *pinst)
654{
655 int err = 0;
656
657 mutex_lock(&pinst->lock);
658
659 if (pinst->flags & PADATA_INVALID)
660 err = -EINVAL;
661
662 __padata_start(pinst);
663
664 mutex_unlock(&pinst->lock);
665
666 return err;
667}
668EXPORT_SYMBOL(padata_start);
669
670/**
671 * padata_stop - stop the parallel processing
672 *
673 * @pinst: padata instance to stop
674 */
675void padata_stop(struct padata_instance *pinst)
676{
677 mutex_lock(&pinst->lock);
678 __padata_stop(pinst);
679 mutex_unlock(&pinst->lock);
680}
681EXPORT_SYMBOL(padata_stop);
682
683#ifdef CONFIG_HOTPLUG_CPU
684
685static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
686{
687 struct parallel_data *pd;
688
689 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
690 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
691 pinst->cpumask.cbcpu);
692 if (!pd)
693 return -ENOMEM;
694
695 padata_replace(pinst, pd);
696
697 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
698 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
699 __padata_start(pinst);
700 }
701
702 return 0;
703}
704
705static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
706{
707 struct parallel_data *pd = NULL;
708
709 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
710
711 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
712 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
713 __padata_stop(pinst);
714
715 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
716 pinst->cpumask.cbcpu);
717 if (!pd)
718 return -ENOMEM;
719
720 padata_replace(pinst, pd);
721
722 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
723 cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
724 }
725
726 return 0;
727}
728
729 /**
730 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
731 * padata cpumasks.
732 *
733 * @pinst: padata instance
734 * @cpu: cpu to remove
735 * @mask: bitmask specifying from which cpumask @cpu should be removed
736 * The @mask may be any combination of the following flags:
737 * PADATA_CPU_SERIAL - serial cpumask
738 * PADATA_CPU_PARALLEL - parallel cpumask
739 */
740int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
741{
742 int err;
743
744 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
745 return -EINVAL;
746
747 mutex_lock(&pinst->lock);
748
749 get_online_cpus();
750 if (mask & PADATA_CPU_SERIAL)
751 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
752 if (mask & PADATA_CPU_PARALLEL)
753 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
754
755 err = __padata_remove_cpu(pinst, cpu);
756 put_online_cpus();
757
758 mutex_unlock(&pinst->lock);
759
760 return err;
761}
762EXPORT_SYMBOL(padata_remove_cpu);
763
764static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
765{
766 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
767 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
768}
769
770static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
771{
772 struct padata_instance *pinst;
773 int ret;
774
775 pinst = hlist_entry_safe(node, struct padata_instance, node);
776 if (!pinst_has_cpu(pinst, cpu))
777 return 0;
778
779 mutex_lock(&pinst->lock);
780 ret = __padata_add_cpu(pinst, cpu);
781 mutex_unlock(&pinst->lock);
782 return ret;
783}
784
785static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
786{
787 struct padata_instance *pinst;
788 int ret;
789
790 pinst = hlist_entry_safe(node, struct padata_instance, node);
791 if (!pinst_has_cpu(pinst, cpu))
792 return 0;
793
794 mutex_lock(&pinst->lock);
795 ret = __padata_remove_cpu(pinst, cpu);
796 mutex_unlock(&pinst->lock);
797 return ret;
798}
799
800static enum cpuhp_state hp_online;
801#endif
802
803static void __padata_free(struct padata_instance *pinst)
804{
805#ifdef CONFIG_HOTPLUG_CPU
806 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
807#endif
808
809 padata_stop(pinst);
810 padata_free_pd(pinst->pd);
811 free_cpumask_var(pinst->cpumask.pcpu);
812 free_cpumask_var(pinst->cpumask.cbcpu);
813 kfree(pinst);
814}
815
816#define kobj2pinst(_kobj) \
817 container_of(_kobj, struct padata_instance, kobj)
818#define attr2pentry(_attr) \
819 container_of(_attr, struct padata_sysfs_entry, attr)
820
821static void padata_sysfs_release(struct kobject *kobj)
822{
823 struct padata_instance *pinst = kobj2pinst(kobj);
824 __padata_free(pinst);
825}
826
827struct padata_sysfs_entry {
828 struct attribute attr;
829 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
830 ssize_t (*store)(struct padata_instance *, struct attribute *,
831 const char *, size_t);
832};
833
834static ssize_t show_cpumask(struct padata_instance *pinst,
835 struct attribute *attr, char *buf)
836{
837 struct cpumask *cpumask;
838 ssize_t len;
839
840 mutex_lock(&pinst->lock);
841 if (!strcmp(attr->name, "serial_cpumask"))
842 cpumask = pinst->cpumask.cbcpu;
843 else
844 cpumask = pinst->cpumask.pcpu;
845
846 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
847 nr_cpu_ids, cpumask_bits(cpumask));
848 mutex_unlock(&pinst->lock);
849 return len < PAGE_SIZE ? len : -EINVAL;
850}
851
852static ssize_t store_cpumask(struct padata_instance *pinst,
853 struct attribute *attr,
854 const char *buf, size_t count)
855{
856 cpumask_var_t new_cpumask;
857 ssize_t ret;
858 int mask_type;
859
860 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
861 return -ENOMEM;
862
863 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
864 nr_cpumask_bits);
865 if (ret < 0)
866 goto out;
867
868 mask_type = !strcmp(attr->name, "serial_cpumask") ?
869 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
870 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
871 if (!ret)
872 ret = count;
873
874out:
875 free_cpumask_var(new_cpumask);
876 return ret;
877}
878
879#define PADATA_ATTR_RW(_name, _show_name, _store_name) \
880 static struct padata_sysfs_entry _name##_attr = \
881 __ATTR(_name, 0644, _show_name, _store_name)
882#define PADATA_ATTR_RO(_name, _show_name) \
883 static struct padata_sysfs_entry _name##_attr = \
884 __ATTR(_name, 0400, _show_name, NULL)
885
886PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
887PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
888
889/*
890 * Padata sysfs provides the following objects:
891 * serial_cpumask [RW] - cpumask for serial workers
892 * parallel_cpumask [RW] - cpumask for parallel workers
893 */
894static struct attribute *padata_default_attrs[] = {
895 &serial_cpumask_attr.attr,
896 ¶llel_cpumask_attr.attr,
897 NULL,
898};
899
900static ssize_t padata_sysfs_show(struct kobject *kobj,
901 struct attribute *attr, char *buf)
902{
903 struct padata_instance *pinst;
904 struct padata_sysfs_entry *pentry;
905 ssize_t ret = -EIO;
906
907 pinst = kobj2pinst(kobj);
908 pentry = attr2pentry(attr);
909 if (pentry->show)
910 ret = pentry->show(pinst, attr, buf);
911
912 return ret;
913}
914
915static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
916 const char *buf, size_t count)
917{
918 struct padata_instance *pinst;
919 struct padata_sysfs_entry *pentry;
920 ssize_t ret = -EIO;
921
922 pinst = kobj2pinst(kobj);
923 pentry = attr2pentry(attr);
924 if (pentry->show)
925 ret = pentry->store(pinst, attr, buf, count);
926
927 return ret;
928}
929
930static const struct sysfs_ops padata_sysfs_ops = {
931 .show = padata_sysfs_show,
932 .store = padata_sysfs_store,
933};
934
935static struct kobj_type padata_attr_type = {
936 .sysfs_ops = &padata_sysfs_ops,
937 .default_attrs = padata_default_attrs,
938 .release = padata_sysfs_release,
939};
940
941/**
942 * padata_alloc_possible - Allocate and initialize padata instance.
943 * Use the cpu_possible_mask for serial and
944 * parallel workers.
945 *
946 * @wq: workqueue to use for the allocated padata instance
947 */
948struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
949{
950 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
951}
952EXPORT_SYMBOL(padata_alloc_possible);
953
954/**
955 * padata_alloc - allocate and initialize a padata instance and specify
956 * cpumasks for serial and parallel workers.
957 *
958 * @wq: workqueue to use for the allocated padata instance
959 * @pcpumask: cpumask that will be used for padata parallelization
960 * @cbcpumask: cpumask that will be used for padata serialization
961 */
962struct padata_instance *padata_alloc(struct workqueue_struct *wq,
963 const struct cpumask *pcpumask,
964 const struct cpumask *cbcpumask)
965{
966 struct padata_instance *pinst;
967 struct parallel_data *pd = NULL;
968
969 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
970 if (!pinst)
971 goto err;
972
973 get_online_cpus();
974 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
975 goto err_free_inst;
976 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
977 free_cpumask_var(pinst->cpumask.pcpu);
978 goto err_free_inst;
979 }
980 if (!padata_validate_cpumask(pinst, pcpumask) ||
981 !padata_validate_cpumask(pinst, cbcpumask))
982 goto err_free_masks;
983
984 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
985 if (!pd)
986 goto err_free_masks;
987
988 rcu_assign_pointer(pinst->pd, pd);
989
990 pinst->wq = wq;
991
992 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
993 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
994
995 pinst->flags = 0;
996
997 put_online_cpus();
998
999 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1000 kobject_init(&pinst->kobj, &padata_attr_type);
1001 mutex_init(&pinst->lock);
1002
1003#ifdef CONFIG_HOTPLUG_CPU
1004 cpuhp_state_add_instance_nocalls(hp_online, &pinst->node);
1005#endif
1006 return pinst;
1007
1008err_free_masks:
1009 free_cpumask_var(pinst->cpumask.pcpu);
1010 free_cpumask_var(pinst->cpumask.cbcpu);
1011err_free_inst:
1012 kfree(pinst);
1013 put_online_cpus();
1014err:
1015 return NULL;
1016}
1017
1018/**
1019 * padata_free - free a padata instance
1020 *
1021 * @padata_inst: padata instance to free
1022 */
1023void padata_free(struct padata_instance *pinst)
1024{
1025 kobject_put(&pinst->kobj);
1026}
1027EXPORT_SYMBOL(padata_free);
1028
1029#ifdef CONFIG_HOTPLUG_CPU
1030
1031static __init int padata_driver_init(void)
1032{
1033 int ret;
1034
1035 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1036 padata_cpu_online,
1037 padata_cpu_prep_down);
1038 if (ret < 0)
1039 return ret;
1040 hp_online = ret;
1041 return 0;
1042}
1043module_init(padata_driver_init);
1044
1045static __exit void padata_driver_exit(void)
1046{
1047 cpuhp_remove_multi_state(hp_online);
1048}
1049module_exit(padata_driver_exit);
1050#endif
1/*
2 * padata.c - generic interface to process data streams in parallel
3 *
4 * See Documentation/padata.txt for an api documentation.
5 *
6 * Copyright (C) 2008, 2009 secunet Security Networks AG
7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <linux/export.h>
24#include <linux/cpumask.h>
25#include <linux/err.h>
26#include <linux/cpu.h>
27#include <linux/padata.h>
28#include <linux/mutex.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/sysfs.h>
32#include <linux/rcupdate.h>
33
34#define MAX_OBJ_NUM 1000
35
36static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
37{
38 int cpu, target_cpu;
39
40 target_cpu = cpumask_first(pd->cpumask.pcpu);
41 for (cpu = 0; cpu < cpu_index; cpu++)
42 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
43
44 return target_cpu;
45}
46
47static int padata_cpu_hash(struct parallel_data *pd)
48{
49 unsigned int seq_nr;
50 int cpu_index;
51
52 /*
53 * Hash the sequence numbers to the cpus by taking
54 * seq_nr mod. number of cpus in use.
55 */
56
57 seq_nr = atomic_inc_return(&pd->seq_nr);
58 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
59
60 return padata_index_to_cpu(pd, cpu_index);
61}
62
63static void padata_parallel_worker(struct work_struct *parallel_work)
64{
65 struct padata_parallel_queue *pqueue;
66 struct parallel_data *pd;
67 struct padata_instance *pinst;
68 LIST_HEAD(local_list);
69
70 local_bh_disable();
71 pqueue = container_of(parallel_work,
72 struct padata_parallel_queue, work);
73 pd = pqueue->pd;
74 pinst = pd->pinst;
75
76 spin_lock(&pqueue->parallel.lock);
77 list_replace_init(&pqueue->parallel.list, &local_list);
78 spin_unlock(&pqueue->parallel.lock);
79
80 while (!list_empty(&local_list)) {
81 struct padata_priv *padata;
82
83 padata = list_entry(local_list.next,
84 struct padata_priv, list);
85
86 list_del_init(&padata->list);
87
88 padata->parallel(padata);
89 }
90
91 local_bh_enable();
92}
93
94/**
95 * padata_do_parallel - padata parallelization function
96 *
97 * @pinst: padata instance
98 * @padata: object to be parallelized
99 * @cb_cpu: cpu the serialization callback function will run on,
100 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
101 *
102 * The parallelization callback function will run with BHs off.
103 * Note: Every object which is parallelized by padata_do_parallel
104 * must be seen by padata_do_serial.
105 */
106int padata_do_parallel(struct padata_instance *pinst,
107 struct padata_priv *padata, int cb_cpu)
108{
109 int target_cpu, err;
110 struct padata_parallel_queue *queue;
111 struct parallel_data *pd;
112
113 rcu_read_lock_bh();
114
115 pd = rcu_dereference_bh(pinst->pd);
116
117 err = -EINVAL;
118 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
119 goto out;
120
121 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
122 goto out;
123
124 err = -EBUSY;
125 if ((pinst->flags & PADATA_RESET))
126 goto out;
127
128 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
129 goto out;
130
131 err = 0;
132 atomic_inc(&pd->refcnt);
133 padata->pd = pd;
134 padata->cb_cpu = cb_cpu;
135
136 target_cpu = padata_cpu_hash(pd);
137 queue = per_cpu_ptr(pd->pqueue, target_cpu);
138
139 spin_lock(&queue->parallel.lock);
140 list_add_tail(&padata->list, &queue->parallel.list);
141 spin_unlock(&queue->parallel.lock);
142
143 queue_work_on(target_cpu, pinst->wq, &queue->work);
144
145out:
146 rcu_read_unlock_bh();
147
148 return err;
149}
150EXPORT_SYMBOL(padata_do_parallel);
151
152/*
153 * padata_get_next - Get the next object that needs serialization.
154 *
155 * Return values are:
156 *
157 * A pointer to the control struct of the next object that needs
158 * serialization, if present in one of the percpu reorder queues.
159 *
160 * NULL, if all percpu reorder queues are empty.
161 *
162 * -EINPROGRESS, if the next object that needs serialization will
163 * be parallel processed by another cpu and is not yet present in
164 * the cpu's reorder queue.
165 *
166 * -ENODATA, if this cpu has to do the parallel processing for
167 * the next object.
168 */
169static struct padata_priv *padata_get_next(struct parallel_data *pd)
170{
171 int cpu, num_cpus;
172 unsigned int next_nr, next_index;
173 struct padata_parallel_queue *next_queue;
174 struct padata_priv *padata;
175 struct padata_list *reorder;
176
177 num_cpus = cpumask_weight(pd->cpumask.pcpu);
178
179 /*
180 * Calculate the percpu reorder queue and the sequence
181 * number of the next object.
182 */
183 next_nr = pd->processed;
184 next_index = next_nr % num_cpus;
185 cpu = padata_index_to_cpu(pd, next_index);
186 next_queue = per_cpu_ptr(pd->pqueue, cpu);
187
188 padata = NULL;
189
190 reorder = &next_queue->reorder;
191
192 if (!list_empty(&reorder->list)) {
193 padata = list_entry(reorder->list.next,
194 struct padata_priv, list);
195
196 spin_lock(&reorder->lock);
197 list_del_init(&padata->list);
198 atomic_dec(&pd->reorder_objects);
199 spin_unlock(&reorder->lock);
200
201 pd->processed++;
202
203 goto out;
204 }
205
206 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
207 padata = ERR_PTR(-ENODATA);
208 goto out;
209 }
210
211 padata = ERR_PTR(-EINPROGRESS);
212out:
213 return padata;
214}
215
216static void padata_reorder(struct parallel_data *pd)
217{
218 int cb_cpu;
219 struct padata_priv *padata;
220 struct padata_serial_queue *squeue;
221 struct padata_instance *pinst = pd->pinst;
222
223 /*
224 * We need to ensure that only one cpu can work on dequeueing of
225 * the reorder queue the time. Calculating in which percpu reorder
226 * queue the next object will arrive takes some time. A spinlock
227 * would be highly contended. Also it is not clear in which order
228 * the objects arrive to the reorder queues. So a cpu could wait to
229 * get the lock just to notice that there is nothing to do at the
230 * moment. Therefore we use a trylock and let the holder of the lock
231 * care for all the objects enqueued during the holdtime of the lock.
232 */
233 if (!spin_trylock_bh(&pd->lock))
234 return;
235
236 while (1) {
237 padata = padata_get_next(pd);
238
239 /*
240 * All reorder queues are empty, or the next object that needs
241 * serialization is parallel processed by another cpu and is
242 * still on it's way to the cpu's reorder queue, nothing to
243 * do for now.
244 */
245 if (!padata || PTR_ERR(padata) == -EINPROGRESS)
246 break;
247
248 /*
249 * This cpu has to do the parallel processing of the next
250 * object. It's waiting in the cpu's parallelization queue,
251 * so exit immediately.
252 */
253 if (PTR_ERR(padata) == -ENODATA) {
254 del_timer(&pd->timer);
255 spin_unlock_bh(&pd->lock);
256 return;
257 }
258
259 cb_cpu = padata->cb_cpu;
260 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
261
262 spin_lock(&squeue->serial.lock);
263 list_add_tail(&padata->list, &squeue->serial.list);
264 spin_unlock(&squeue->serial.lock);
265
266 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
267 }
268
269 spin_unlock_bh(&pd->lock);
270
271 /*
272 * The next object that needs serialization might have arrived to
273 * the reorder queues in the meantime, we will be called again
274 * from the timer function if no one else cares for it.
275 */
276 if (atomic_read(&pd->reorder_objects)
277 && !(pinst->flags & PADATA_RESET))
278 mod_timer(&pd->timer, jiffies + HZ);
279 else
280 del_timer(&pd->timer);
281
282 return;
283}
284
285static void padata_reorder_timer(unsigned long arg)
286{
287 struct parallel_data *pd = (struct parallel_data *)arg;
288
289 padata_reorder(pd);
290}
291
292static void padata_serial_worker(struct work_struct *serial_work)
293{
294 struct padata_serial_queue *squeue;
295 struct parallel_data *pd;
296 LIST_HEAD(local_list);
297
298 local_bh_disable();
299 squeue = container_of(serial_work, struct padata_serial_queue, work);
300 pd = squeue->pd;
301
302 spin_lock(&squeue->serial.lock);
303 list_replace_init(&squeue->serial.list, &local_list);
304 spin_unlock(&squeue->serial.lock);
305
306 while (!list_empty(&local_list)) {
307 struct padata_priv *padata;
308
309 padata = list_entry(local_list.next,
310 struct padata_priv, list);
311
312 list_del_init(&padata->list);
313
314 padata->serial(padata);
315 atomic_dec(&pd->refcnt);
316 }
317 local_bh_enable();
318}
319
320/**
321 * padata_do_serial - padata serialization function
322 *
323 * @padata: object to be serialized.
324 *
325 * padata_do_serial must be called for every parallelized object.
326 * The serialization callback function will run with BHs off.
327 */
328void padata_do_serial(struct padata_priv *padata)
329{
330 int cpu;
331 struct padata_parallel_queue *pqueue;
332 struct parallel_data *pd;
333
334 pd = padata->pd;
335
336 cpu = get_cpu();
337 pqueue = per_cpu_ptr(pd->pqueue, cpu);
338
339 spin_lock(&pqueue->reorder.lock);
340 atomic_inc(&pd->reorder_objects);
341 list_add_tail(&padata->list, &pqueue->reorder.list);
342 spin_unlock(&pqueue->reorder.lock);
343
344 put_cpu();
345
346 padata_reorder(pd);
347}
348EXPORT_SYMBOL(padata_do_serial);
349
350static int padata_setup_cpumasks(struct parallel_data *pd,
351 const struct cpumask *pcpumask,
352 const struct cpumask *cbcpumask)
353{
354 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
355 return -ENOMEM;
356
357 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
358 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
359 free_cpumask_var(pd->cpumask.cbcpu);
360 return -ENOMEM;
361 }
362
363 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
364 return 0;
365}
366
367static void __padata_list_init(struct padata_list *pd_list)
368{
369 INIT_LIST_HEAD(&pd_list->list);
370 spin_lock_init(&pd_list->lock);
371}
372
373/* Initialize all percpu queues used by serial workers */
374static void padata_init_squeues(struct parallel_data *pd)
375{
376 int cpu;
377 struct padata_serial_queue *squeue;
378
379 for_each_cpu(cpu, pd->cpumask.cbcpu) {
380 squeue = per_cpu_ptr(pd->squeue, cpu);
381 squeue->pd = pd;
382 __padata_list_init(&squeue->serial);
383 INIT_WORK(&squeue->work, padata_serial_worker);
384 }
385}
386
387/* Initialize all percpu queues used by parallel workers */
388static void padata_init_pqueues(struct parallel_data *pd)
389{
390 int cpu_index, cpu;
391 struct padata_parallel_queue *pqueue;
392
393 cpu_index = 0;
394 for_each_cpu(cpu, pd->cpumask.pcpu) {
395 pqueue = per_cpu_ptr(pd->pqueue, cpu);
396 pqueue->pd = pd;
397 pqueue->cpu_index = cpu_index;
398 cpu_index++;
399
400 __padata_list_init(&pqueue->reorder);
401 __padata_list_init(&pqueue->parallel);
402 INIT_WORK(&pqueue->work, padata_parallel_worker);
403 atomic_set(&pqueue->num_obj, 0);
404 }
405}
406
407/* Allocate and initialize the internal cpumask dependend resources. */
408static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
409 const struct cpumask *pcpumask,
410 const struct cpumask *cbcpumask)
411{
412 struct parallel_data *pd;
413
414 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
415 if (!pd)
416 goto err;
417
418 pd->pqueue = alloc_percpu(struct padata_parallel_queue);
419 if (!pd->pqueue)
420 goto err_free_pd;
421
422 pd->squeue = alloc_percpu(struct padata_serial_queue);
423 if (!pd->squeue)
424 goto err_free_pqueue;
425 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
426 goto err_free_squeue;
427
428 padata_init_pqueues(pd);
429 padata_init_squeues(pd);
430 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
431 atomic_set(&pd->seq_nr, -1);
432 atomic_set(&pd->reorder_objects, 0);
433 atomic_set(&pd->refcnt, 0);
434 pd->pinst = pinst;
435 spin_lock_init(&pd->lock);
436
437 return pd;
438
439err_free_squeue:
440 free_percpu(pd->squeue);
441err_free_pqueue:
442 free_percpu(pd->pqueue);
443err_free_pd:
444 kfree(pd);
445err:
446 return NULL;
447}
448
449static void padata_free_pd(struct parallel_data *pd)
450{
451 free_cpumask_var(pd->cpumask.pcpu);
452 free_cpumask_var(pd->cpumask.cbcpu);
453 free_percpu(pd->pqueue);
454 free_percpu(pd->squeue);
455 kfree(pd);
456}
457
458/* Flush all objects out of the padata queues. */
459static void padata_flush_queues(struct parallel_data *pd)
460{
461 int cpu;
462 struct padata_parallel_queue *pqueue;
463 struct padata_serial_queue *squeue;
464
465 for_each_cpu(cpu, pd->cpumask.pcpu) {
466 pqueue = per_cpu_ptr(pd->pqueue, cpu);
467 flush_work(&pqueue->work);
468 }
469
470 del_timer_sync(&pd->timer);
471
472 if (atomic_read(&pd->reorder_objects))
473 padata_reorder(pd);
474
475 for_each_cpu(cpu, pd->cpumask.cbcpu) {
476 squeue = per_cpu_ptr(pd->squeue, cpu);
477 flush_work(&squeue->work);
478 }
479
480 BUG_ON(atomic_read(&pd->refcnt) != 0);
481}
482
483static void __padata_start(struct padata_instance *pinst)
484{
485 pinst->flags |= PADATA_INIT;
486}
487
488static void __padata_stop(struct padata_instance *pinst)
489{
490 if (!(pinst->flags & PADATA_INIT))
491 return;
492
493 pinst->flags &= ~PADATA_INIT;
494
495 synchronize_rcu();
496
497 get_online_cpus();
498 padata_flush_queues(pinst->pd);
499 put_online_cpus();
500}
501
502/* Replace the internal control structure with a new one. */
503static void padata_replace(struct padata_instance *pinst,
504 struct parallel_data *pd_new)
505{
506 struct parallel_data *pd_old = pinst->pd;
507 int notification_mask = 0;
508
509 pinst->flags |= PADATA_RESET;
510
511 rcu_assign_pointer(pinst->pd, pd_new);
512
513 synchronize_rcu();
514
515 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
516 notification_mask |= PADATA_CPU_PARALLEL;
517 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
518 notification_mask |= PADATA_CPU_SERIAL;
519
520 padata_flush_queues(pd_old);
521 padata_free_pd(pd_old);
522
523 if (notification_mask)
524 blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
525 notification_mask,
526 &pd_new->cpumask);
527
528 pinst->flags &= ~PADATA_RESET;
529}
530
531/**
532 * padata_register_cpumask_notifier - Registers a notifier that will be called
533 * if either pcpu or cbcpu or both cpumasks change.
534 *
535 * @pinst: A poineter to padata instance
536 * @nblock: A pointer to notifier block.
537 */
538int padata_register_cpumask_notifier(struct padata_instance *pinst,
539 struct notifier_block *nblock)
540{
541 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
542 nblock);
543}
544EXPORT_SYMBOL(padata_register_cpumask_notifier);
545
546/**
547 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
548 * registered earlier using padata_register_cpumask_notifier
549 *
550 * @pinst: A pointer to data instance.
551 * @nlock: A pointer to notifier block.
552 */
553int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
554 struct notifier_block *nblock)
555{
556 return blocking_notifier_chain_unregister(
557 &pinst->cpumask_change_notifier,
558 nblock);
559}
560EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
561
562
563/* If cpumask contains no active cpu, we mark the instance as invalid. */
564static bool padata_validate_cpumask(struct padata_instance *pinst,
565 const struct cpumask *cpumask)
566{
567 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
568 pinst->flags |= PADATA_INVALID;
569 return false;
570 }
571
572 pinst->flags &= ~PADATA_INVALID;
573 return true;
574}
575
576static int __padata_set_cpumasks(struct padata_instance *pinst,
577 cpumask_var_t pcpumask,
578 cpumask_var_t cbcpumask)
579{
580 int valid;
581 struct parallel_data *pd;
582
583 valid = padata_validate_cpumask(pinst, pcpumask);
584 if (!valid) {
585 __padata_stop(pinst);
586 goto out_replace;
587 }
588
589 valid = padata_validate_cpumask(pinst, cbcpumask);
590 if (!valid)
591 __padata_stop(pinst);
592
593out_replace:
594 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
595 if (!pd)
596 return -ENOMEM;
597
598 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
599 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
600
601 padata_replace(pinst, pd);
602
603 if (valid)
604 __padata_start(pinst);
605
606 return 0;
607}
608
609/**
610 * padata_set_cpumasks - Set both parallel and serial cpumasks. The first
611 * one is used by parallel workers and the second one
612 * by the wokers doing serialization.
613 *
614 * @pinst: padata instance
615 * @pcpumask: the cpumask to use for parallel workers
616 * @cbcpumask: the cpumsak to use for serial workers
617 */
618int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask,
619 cpumask_var_t cbcpumask)
620{
621 int err;
622
623 mutex_lock(&pinst->lock);
624 get_online_cpus();
625
626 err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask);
627
628 put_online_cpus();
629 mutex_unlock(&pinst->lock);
630
631 return err;
632
633}
634EXPORT_SYMBOL(padata_set_cpumasks);
635
636/**
637 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
638 * equivalent to @cpumask.
639 *
640 * @pinst: padata instance
641 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
642 * to parallel and serial cpumasks respectively.
643 * @cpumask: the cpumask to use
644 */
645int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
646 cpumask_var_t cpumask)
647{
648 struct cpumask *serial_mask, *parallel_mask;
649 int err = -EINVAL;
650
651 mutex_lock(&pinst->lock);
652 get_online_cpus();
653
654 switch (cpumask_type) {
655 case PADATA_CPU_PARALLEL:
656 serial_mask = pinst->cpumask.cbcpu;
657 parallel_mask = cpumask;
658 break;
659 case PADATA_CPU_SERIAL:
660 parallel_mask = pinst->cpumask.pcpu;
661 serial_mask = cpumask;
662 break;
663 default:
664 goto out;
665 }
666
667 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
668
669out:
670 put_online_cpus();
671 mutex_unlock(&pinst->lock);
672
673 return err;
674}
675EXPORT_SYMBOL(padata_set_cpumask);
676
677static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
678{
679 struct parallel_data *pd;
680
681 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
682 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
683 pinst->cpumask.cbcpu);
684 if (!pd)
685 return -ENOMEM;
686
687 padata_replace(pinst, pd);
688
689 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
690 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
691 __padata_start(pinst);
692 }
693
694 return 0;
695}
696
697 /**
698 * padata_add_cpu - add a cpu to one or both(parallel and serial)
699 * padata cpumasks.
700 *
701 * @pinst: padata instance
702 * @cpu: cpu to add
703 * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added.
704 * The @mask may be any combination of the following flags:
705 * PADATA_CPU_SERIAL - serial cpumask
706 * PADATA_CPU_PARALLEL - parallel cpumask
707 */
708
709int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask)
710{
711 int err;
712
713 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
714 return -EINVAL;
715
716 mutex_lock(&pinst->lock);
717
718 get_online_cpus();
719 if (mask & PADATA_CPU_SERIAL)
720 cpumask_set_cpu(cpu, pinst->cpumask.cbcpu);
721 if (mask & PADATA_CPU_PARALLEL)
722 cpumask_set_cpu(cpu, pinst->cpumask.pcpu);
723
724 err = __padata_add_cpu(pinst, cpu);
725 put_online_cpus();
726
727 mutex_unlock(&pinst->lock);
728
729 return err;
730}
731EXPORT_SYMBOL(padata_add_cpu);
732
733static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
734{
735 struct parallel_data *pd = NULL;
736
737 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
738
739 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
740 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
741 __padata_stop(pinst);
742
743 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
744 pinst->cpumask.cbcpu);
745 if (!pd)
746 return -ENOMEM;
747
748 padata_replace(pinst, pd);
749
750 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
751 cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
752 }
753
754 return 0;
755}
756
757 /**
758 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
759 * padata cpumasks.
760 *
761 * @pinst: padata instance
762 * @cpu: cpu to remove
763 * @mask: bitmask specifying from which cpumask @cpu should be removed
764 * The @mask may be any combination of the following flags:
765 * PADATA_CPU_SERIAL - serial cpumask
766 * PADATA_CPU_PARALLEL - parallel cpumask
767 */
768int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
769{
770 int err;
771
772 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
773 return -EINVAL;
774
775 mutex_lock(&pinst->lock);
776
777 get_online_cpus();
778 if (mask & PADATA_CPU_SERIAL)
779 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
780 if (mask & PADATA_CPU_PARALLEL)
781 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
782
783 err = __padata_remove_cpu(pinst, cpu);
784 put_online_cpus();
785
786 mutex_unlock(&pinst->lock);
787
788 return err;
789}
790EXPORT_SYMBOL(padata_remove_cpu);
791
792/**
793 * padata_start - start the parallel processing
794 *
795 * @pinst: padata instance to start
796 */
797int padata_start(struct padata_instance *pinst)
798{
799 int err = 0;
800
801 mutex_lock(&pinst->lock);
802
803 if (pinst->flags & PADATA_INVALID)
804 err =-EINVAL;
805
806 __padata_start(pinst);
807
808 mutex_unlock(&pinst->lock);
809
810 return err;
811}
812EXPORT_SYMBOL(padata_start);
813
814/**
815 * padata_stop - stop the parallel processing
816 *
817 * @pinst: padata instance to stop
818 */
819void padata_stop(struct padata_instance *pinst)
820{
821 mutex_lock(&pinst->lock);
822 __padata_stop(pinst);
823 mutex_unlock(&pinst->lock);
824}
825EXPORT_SYMBOL(padata_stop);
826
827#ifdef CONFIG_HOTPLUG_CPU
828
829static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
830{
831 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
832 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
833}
834
835
836static int padata_cpu_callback(struct notifier_block *nfb,
837 unsigned long action, void *hcpu)
838{
839 int err;
840 struct padata_instance *pinst;
841 int cpu = (unsigned long)hcpu;
842
843 pinst = container_of(nfb, struct padata_instance, cpu_notifier);
844
845 switch (action) {
846 case CPU_ONLINE:
847 case CPU_ONLINE_FROZEN:
848 case CPU_DOWN_FAILED:
849 case CPU_DOWN_FAILED_FROZEN:
850 if (!pinst_has_cpu(pinst, cpu))
851 break;
852 mutex_lock(&pinst->lock);
853 err = __padata_add_cpu(pinst, cpu);
854 mutex_unlock(&pinst->lock);
855 if (err)
856 return notifier_from_errno(err);
857 break;
858
859 case CPU_DOWN_PREPARE:
860 case CPU_DOWN_PREPARE_FROZEN:
861 case CPU_UP_CANCELED:
862 case CPU_UP_CANCELED_FROZEN:
863 if (!pinst_has_cpu(pinst, cpu))
864 break;
865 mutex_lock(&pinst->lock);
866 err = __padata_remove_cpu(pinst, cpu);
867 mutex_unlock(&pinst->lock);
868 if (err)
869 return notifier_from_errno(err);
870 break;
871 }
872
873 return NOTIFY_OK;
874}
875#endif
876
877static void __padata_free(struct padata_instance *pinst)
878{
879#ifdef CONFIG_HOTPLUG_CPU
880 unregister_hotcpu_notifier(&pinst->cpu_notifier);
881#endif
882
883 padata_stop(pinst);
884 padata_free_pd(pinst->pd);
885 free_cpumask_var(pinst->cpumask.pcpu);
886 free_cpumask_var(pinst->cpumask.cbcpu);
887 kfree(pinst);
888}
889
890#define kobj2pinst(_kobj) \
891 container_of(_kobj, struct padata_instance, kobj)
892#define attr2pentry(_attr) \
893 container_of(_attr, struct padata_sysfs_entry, attr)
894
895static void padata_sysfs_release(struct kobject *kobj)
896{
897 struct padata_instance *pinst = kobj2pinst(kobj);
898 __padata_free(pinst);
899}
900
901struct padata_sysfs_entry {
902 struct attribute attr;
903 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
904 ssize_t (*store)(struct padata_instance *, struct attribute *,
905 const char *, size_t);
906};
907
908static ssize_t show_cpumask(struct padata_instance *pinst,
909 struct attribute *attr, char *buf)
910{
911 struct cpumask *cpumask;
912 ssize_t len;
913
914 mutex_lock(&pinst->lock);
915 if (!strcmp(attr->name, "serial_cpumask"))
916 cpumask = pinst->cpumask.cbcpu;
917 else
918 cpumask = pinst->cpumask.pcpu;
919
920 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
921 nr_cpu_ids, cpumask_bits(cpumask));
922 mutex_unlock(&pinst->lock);
923 return len < PAGE_SIZE ? len : -EINVAL;
924}
925
926static ssize_t store_cpumask(struct padata_instance *pinst,
927 struct attribute *attr,
928 const char *buf, size_t count)
929{
930 cpumask_var_t new_cpumask;
931 ssize_t ret;
932 int mask_type;
933
934 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
935 return -ENOMEM;
936
937 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
938 nr_cpumask_bits);
939 if (ret < 0)
940 goto out;
941
942 mask_type = !strcmp(attr->name, "serial_cpumask") ?
943 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
944 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
945 if (!ret)
946 ret = count;
947
948out:
949 free_cpumask_var(new_cpumask);
950 return ret;
951}
952
953#define PADATA_ATTR_RW(_name, _show_name, _store_name) \
954 static struct padata_sysfs_entry _name##_attr = \
955 __ATTR(_name, 0644, _show_name, _store_name)
956#define PADATA_ATTR_RO(_name, _show_name) \
957 static struct padata_sysfs_entry _name##_attr = \
958 __ATTR(_name, 0400, _show_name, NULL)
959
960PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
961PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
962
963/*
964 * Padata sysfs provides the following objects:
965 * serial_cpumask [RW] - cpumask for serial workers
966 * parallel_cpumask [RW] - cpumask for parallel workers
967 */
968static struct attribute *padata_default_attrs[] = {
969 &serial_cpumask_attr.attr,
970 ¶llel_cpumask_attr.attr,
971 NULL,
972};
973
974static ssize_t padata_sysfs_show(struct kobject *kobj,
975 struct attribute *attr, char *buf)
976{
977 struct padata_instance *pinst;
978 struct padata_sysfs_entry *pentry;
979 ssize_t ret = -EIO;
980
981 pinst = kobj2pinst(kobj);
982 pentry = attr2pentry(attr);
983 if (pentry->show)
984 ret = pentry->show(pinst, attr, buf);
985
986 return ret;
987}
988
989static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
990 const char *buf, size_t count)
991{
992 struct padata_instance *pinst;
993 struct padata_sysfs_entry *pentry;
994 ssize_t ret = -EIO;
995
996 pinst = kobj2pinst(kobj);
997 pentry = attr2pentry(attr);
998 if (pentry->show)
999 ret = pentry->store(pinst, attr, buf, count);
1000
1001 return ret;
1002}
1003
1004static const struct sysfs_ops padata_sysfs_ops = {
1005 .show = padata_sysfs_show,
1006 .store = padata_sysfs_store,
1007};
1008
1009static struct kobj_type padata_attr_type = {
1010 .sysfs_ops = &padata_sysfs_ops,
1011 .default_attrs = padata_default_attrs,
1012 .release = padata_sysfs_release,
1013};
1014
1015/**
1016 * padata_alloc_possible - Allocate and initialize padata instance.
1017 * Use the cpu_possible_mask for serial and
1018 * parallel workers.
1019 *
1020 * @wq: workqueue to use for the allocated padata instance
1021 */
1022struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
1023{
1024 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1025}
1026EXPORT_SYMBOL(padata_alloc_possible);
1027
1028/**
1029 * padata_alloc - allocate and initialize a padata instance and specify
1030 * cpumasks for serial and parallel workers.
1031 *
1032 * @wq: workqueue to use for the allocated padata instance
1033 * @pcpumask: cpumask that will be used for padata parallelization
1034 * @cbcpumask: cpumask that will be used for padata serialization
1035 */
1036struct padata_instance *padata_alloc(struct workqueue_struct *wq,
1037 const struct cpumask *pcpumask,
1038 const struct cpumask *cbcpumask)
1039{
1040 struct padata_instance *pinst;
1041 struct parallel_data *pd = NULL;
1042
1043 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1044 if (!pinst)
1045 goto err;
1046
1047 get_online_cpus();
1048 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1049 goto err_free_inst;
1050 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1051 free_cpumask_var(pinst->cpumask.pcpu);
1052 goto err_free_inst;
1053 }
1054 if (!padata_validate_cpumask(pinst, pcpumask) ||
1055 !padata_validate_cpumask(pinst, cbcpumask))
1056 goto err_free_masks;
1057
1058 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
1059 if (!pd)
1060 goto err_free_masks;
1061
1062 rcu_assign_pointer(pinst->pd, pd);
1063
1064 pinst->wq = wq;
1065
1066 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
1067 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1068
1069 pinst->flags = 0;
1070
1071 put_online_cpus();
1072
1073 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1074 kobject_init(&pinst->kobj, &padata_attr_type);
1075 mutex_init(&pinst->lock);
1076
1077#ifdef CONFIG_HOTPLUG_CPU
1078 pinst->cpu_notifier.notifier_call = padata_cpu_callback;
1079 pinst->cpu_notifier.priority = 0;
1080 register_hotcpu_notifier(&pinst->cpu_notifier);
1081#endif
1082
1083 return pinst;
1084
1085err_free_masks:
1086 free_cpumask_var(pinst->cpumask.pcpu);
1087 free_cpumask_var(pinst->cpumask.cbcpu);
1088err_free_inst:
1089 kfree(pinst);
1090 put_online_cpus();
1091err:
1092 return NULL;
1093}
1094EXPORT_SYMBOL(padata_alloc);
1095
1096/**
1097 * padata_free - free a padata instance
1098 *
1099 * @padata_inst: padata instance to free
1100 */
1101void padata_free(struct padata_instance *pinst)
1102{
1103 kobject_put(&pinst->kobj);
1104}
1105EXPORT_SYMBOL(padata_free);