Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * padata.c - generic interface to process data streams in parallel
4 *
5 * See Documentation/core-api/padata.rst for more information.
6 *
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 *
10 * Copyright (c) 2020 Oracle and/or its affiliates.
11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
12 */
13
14#include <linux/completion.h>
15#include <linux/export.h>
16#include <linux/cpumask.h>
17#include <linux/err.h>
18#include <linux/cpu.h>
19#include <linux/padata.h>
20#include <linux/mutex.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/sysfs.h>
24#include <linux/rcupdate.h>
25
26#define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */
27
28struct padata_work {
29 struct work_struct pw_work;
30 struct list_head pw_list; /* padata_free_works linkage */
31 void *pw_data;
32};
33
34static DEFINE_SPINLOCK(padata_works_lock);
35static struct padata_work *padata_works;
36static LIST_HEAD(padata_free_works);
37
38struct padata_mt_job_state {
39 spinlock_t lock;
40 struct completion completion;
41 struct padata_mt_job *job;
42 int nworks;
43 int nworks_fini;
44 unsigned long chunk_size;
45};
46
47static void padata_free_pd(struct parallel_data *pd);
48static void __init padata_mt_helper(struct work_struct *work);
49
50static inline void padata_get_pd(struct parallel_data *pd)
51{
52 refcount_inc(&pd->refcnt);
53}
54
55static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
56{
57 if (refcount_sub_and_test(cnt, &pd->refcnt))
58 padata_free_pd(pd);
59}
60
61static inline void padata_put_pd(struct parallel_data *pd)
62{
63 padata_put_pd_cnt(pd, 1);
64}
65
66static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
67{
68 int cpu, target_cpu;
69
70 target_cpu = cpumask_first(pd->cpumask.pcpu);
71 for (cpu = 0; cpu < cpu_index; cpu++)
72 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
73
74 return target_cpu;
75}
76
77static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
78{
79 /*
80 * Hash the sequence numbers to the cpus by taking
81 * seq_nr mod. number of cpus in use.
82 */
83 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
84
85 return padata_index_to_cpu(pd, cpu_index);
86}
87
88static struct padata_work *padata_work_alloc(void)
89{
90 struct padata_work *pw;
91
92 lockdep_assert_held(&padata_works_lock);
93
94 if (list_empty(&padata_free_works))
95 return NULL; /* No more work items allowed to be queued. */
96
97 pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
98 list_del(&pw->pw_list);
99 return pw;
100}
101
102/*
103 * This function is marked __ref because this function may be optimized in such
104 * a way that it directly refers to work_fn's address, which causes modpost to
105 * complain when work_fn is marked __init. This scenario was observed with clang
106 * LTO, where padata_work_init() was optimized to refer directly to
107 * padata_mt_helper() because the calls to padata_work_init() with other work_fn
108 * values were eliminated or inlined.
109 */
110static void __ref padata_work_init(struct padata_work *pw, work_func_t work_fn,
111 void *data, int flags)
112{
113 if (flags & PADATA_WORK_ONSTACK)
114 INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
115 else
116 INIT_WORK(&pw->pw_work, work_fn);
117 pw->pw_data = data;
118}
119
120static int __init padata_work_alloc_mt(int nworks, void *data,
121 struct list_head *head)
122{
123 int i;
124
125 spin_lock_bh(&padata_works_lock);
126 /* Start at 1 because the current task participates in the job. */
127 for (i = 1; i < nworks; ++i) {
128 struct padata_work *pw = padata_work_alloc();
129
130 if (!pw)
131 break;
132 padata_work_init(pw, padata_mt_helper, data, 0);
133 list_add(&pw->pw_list, head);
134 }
135 spin_unlock_bh(&padata_works_lock);
136
137 return i;
138}
139
140static void padata_work_free(struct padata_work *pw)
141{
142 lockdep_assert_held(&padata_works_lock);
143 list_add(&pw->pw_list, &padata_free_works);
144}
145
146static void __init padata_works_free(struct list_head *works)
147{
148 struct padata_work *cur, *next;
149
150 if (list_empty(works))
151 return;
152
153 spin_lock_bh(&padata_works_lock);
154 list_for_each_entry_safe(cur, next, works, pw_list) {
155 list_del(&cur->pw_list);
156 padata_work_free(cur);
157 }
158 spin_unlock_bh(&padata_works_lock);
159}
160
161static void padata_parallel_worker(struct work_struct *parallel_work)
162{
163 struct padata_work *pw = container_of(parallel_work, struct padata_work,
164 pw_work);
165 struct padata_priv *padata = pw->pw_data;
166
167 local_bh_disable();
168 padata->parallel(padata);
169 spin_lock(&padata_works_lock);
170 padata_work_free(pw);
171 spin_unlock(&padata_works_lock);
172 local_bh_enable();
173}
174
175/**
176 * padata_do_parallel - padata parallelization function
177 *
178 * @ps: padatashell
179 * @padata: object to be parallelized
180 * @cb_cpu: pointer to the CPU that the serialization callback function should
181 * run on. If it's not in the serial cpumask of @pinst
182 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
183 * none found, returns -EINVAL.
184 *
185 * The parallelization callback function will run with BHs off.
186 * Note: Every object which is parallelized by padata_do_parallel
187 * must be seen by padata_do_serial.
188 *
189 * Return: 0 on success or else negative error code.
190 */
191int padata_do_parallel(struct padata_shell *ps,
192 struct padata_priv *padata, int *cb_cpu)
193{
194 struct padata_instance *pinst = ps->pinst;
195 int i, cpu, cpu_index, err;
196 struct parallel_data *pd;
197 struct padata_work *pw;
198
199 rcu_read_lock_bh();
200
201 pd = rcu_dereference_bh(ps->pd);
202
203 err = -EINVAL;
204 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
205 goto out;
206
207 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
208 if (cpumask_empty(pd->cpumask.cbcpu))
209 goto out;
210
211 /* Select an alternate fallback CPU and notify the caller. */
212 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
213
214 cpu = cpumask_first(pd->cpumask.cbcpu);
215 for (i = 0; i < cpu_index; i++)
216 cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
217
218 *cb_cpu = cpu;
219 }
220
221 err = -EBUSY;
222 if ((pinst->flags & PADATA_RESET))
223 goto out;
224
225 padata_get_pd(pd);
226 padata->pd = pd;
227 padata->cb_cpu = *cb_cpu;
228
229 spin_lock(&padata_works_lock);
230 padata->seq_nr = ++pd->seq_nr;
231 pw = padata_work_alloc();
232 spin_unlock(&padata_works_lock);
233
234 if (!pw) {
235 /* Maximum works limit exceeded, run in the current task. */
236 padata->parallel(padata);
237 }
238
239 rcu_read_unlock_bh();
240
241 if (pw) {
242 padata_work_init(pw, padata_parallel_worker, padata, 0);
243 queue_work(pinst->parallel_wq, &pw->pw_work);
244 }
245
246 return 0;
247out:
248 rcu_read_unlock_bh();
249
250 return err;
251}
252EXPORT_SYMBOL(padata_do_parallel);
253
254/*
255 * padata_find_next - Find the next object that needs serialization.
256 *
257 * Return:
258 * * A pointer to the control struct of the next object that needs
259 * serialization, if present in one of the percpu reorder queues.
260 * * NULL, if the next object that needs serialization will
261 * be parallel processed by another cpu and is not yet present in
262 * the cpu's reorder queue.
263 */
264static struct padata_priv *padata_find_next(struct parallel_data *pd,
265 bool remove_object)
266{
267 struct padata_priv *padata;
268 struct padata_list *reorder;
269 int cpu = pd->cpu;
270
271 reorder = per_cpu_ptr(pd->reorder_list, cpu);
272
273 spin_lock(&reorder->lock);
274 if (list_empty(&reorder->list)) {
275 spin_unlock(&reorder->lock);
276 return NULL;
277 }
278
279 padata = list_entry(reorder->list.next, struct padata_priv, list);
280
281 /*
282 * Checks the rare case where two or more parallel jobs have hashed to
283 * the same CPU and one of the later ones finishes first.
284 */
285 if (padata->seq_nr != pd->processed) {
286 spin_unlock(&reorder->lock);
287 return NULL;
288 }
289
290 if (remove_object) {
291 list_del_init(&padata->list);
292 ++pd->processed;
293 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
294 }
295
296 spin_unlock(&reorder->lock);
297 return padata;
298}
299
300static void padata_reorder(struct parallel_data *pd)
301{
302 struct padata_instance *pinst = pd->ps->pinst;
303 int cb_cpu;
304 struct padata_priv *padata;
305 struct padata_serial_queue *squeue;
306 struct padata_list *reorder;
307
308 /*
309 * We need to ensure that only one cpu can work on dequeueing of
310 * the reorder queue the time. Calculating in which percpu reorder
311 * queue the next object will arrive takes some time. A spinlock
312 * would be highly contended. Also it is not clear in which order
313 * the objects arrive to the reorder queues. So a cpu could wait to
314 * get the lock just to notice that there is nothing to do at the
315 * moment. Therefore we use a trylock and let the holder of the lock
316 * care for all the objects enqueued during the holdtime of the lock.
317 */
318 if (!spin_trylock_bh(&pd->lock))
319 return;
320
321 while (1) {
322 padata = padata_find_next(pd, true);
323
324 /*
325 * If the next object that needs serialization is parallel
326 * processed by another cpu and is still on it's way to the
327 * cpu's reorder queue, nothing to do for now.
328 */
329 if (!padata)
330 break;
331
332 cb_cpu = padata->cb_cpu;
333 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
334
335 spin_lock(&squeue->serial.lock);
336 list_add_tail(&padata->list, &squeue->serial.list);
337 spin_unlock(&squeue->serial.lock);
338
339 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
340 }
341
342 spin_unlock_bh(&pd->lock);
343
344 /*
345 * The next object that needs serialization might have arrived to
346 * the reorder queues in the meantime.
347 *
348 * Ensure reorder queue is read after pd->lock is dropped so we see
349 * new objects from another task in padata_do_serial. Pairs with
350 * smp_mb in padata_do_serial.
351 */
352 smp_mb();
353
354 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
355 if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
356 /*
357 * Other context(eg. the padata_serial_worker) can finish the request.
358 * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
359 */
360 padata_get_pd(pd);
361 queue_work(pinst->serial_wq, &pd->reorder_work);
362 }
363}
364
365static void invoke_padata_reorder(struct work_struct *work)
366{
367 struct parallel_data *pd;
368
369 local_bh_disable();
370 pd = container_of(work, struct parallel_data, reorder_work);
371 padata_reorder(pd);
372 local_bh_enable();
373 /* Pairs with putting the reorder_work in the serial_wq */
374 padata_put_pd(pd);
375}
376
377static void padata_serial_worker(struct work_struct *serial_work)
378{
379 struct padata_serial_queue *squeue;
380 struct parallel_data *pd;
381 LIST_HEAD(local_list);
382 int cnt;
383
384 local_bh_disable();
385 squeue = container_of(serial_work, struct padata_serial_queue, work);
386 pd = squeue->pd;
387
388 spin_lock(&squeue->serial.lock);
389 list_replace_init(&squeue->serial.list, &local_list);
390 spin_unlock(&squeue->serial.lock);
391
392 cnt = 0;
393
394 while (!list_empty(&local_list)) {
395 struct padata_priv *padata;
396
397 padata = list_entry(local_list.next,
398 struct padata_priv, list);
399
400 list_del_init(&padata->list);
401
402 padata->serial(padata);
403 cnt++;
404 }
405 local_bh_enable();
406
407 padata_put_pd_cnt(pd, cnt);
408}
409
410/**
411 * padata_do_serial - padata serialization function
412 *
413 * @padata: object to be serialized.
414 *
415 * padata_do_serial must be called for every parallelized object.
416 * The serialization callback function will run with BHs off.
417 */
418void padata_do_serial(struct padata_priv *padata)
419{
420 struct parallel_data *pd = padata->pd;
421 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
422 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
423 struct padata_priv *cur;
424 struct list_head *pos;
425
426 spin_lock(&reorder->lock);
427 /* Sort in ascending order of sequence number. */
428 list_for_each_prev(pos, &reorder->list) {
429 cur = list_entry(pos, struct padata_priv, list);
430 /* Compare by difference to consider integer wrap around */
431 if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
432 break;
433 }
434 list_add(&padata->list, pos);
435 spin_unlock(&reorder->lock);
436
437 /*
438 * Ensure the addition to the reorder list is ordered correctly
439 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
440 * in padata_reorder.
441 */
442 smp_mb();
443
444 padata_reorder(pd);
445}
446EXPORT_SYMBOL(padata_do_serial);
447
448static int padata_setup_cpumasks(struct padata_instance *pinst)
449{
450 struct workqueue_attrs *attrs;
451 int err;
452
453 attrs = alloc_workqueue_attrs();
454 if (!attrs)
455 return -ENOMEM;
456
457 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
458 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
459 err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
460 free_workqueue_attrs(attrs);
461
462 return err;
463}
464
465static void __init padata_mt_helper(struct work_struct *w)
466{
467 struct padata_work *pw = container_of(w, struct padata_work, pw_work);
468 struct padata_mt_job_state *ps = pw->pw_data;
469 struct padata_mt_job *job = ps->job;
470 bool done;
471
472 spin_lock(&ps->lock);
473
474 while (job->size > 0) {
475 unsigned long start, size, end;
476
477 start = job->start;
478 /* So end is chunk size aligned if enough work remains. */
479 size = roundup(start + 1, ps->chunk_size) - start;
480 size = min(size, job->size);
481 end = start + size;
482
483 job->start = end;
484 job->size -= size;
485
486 spin_unlock(&ps->lock);
487 job->thread_fn(start, end, job->fn_arg);
488 spin_lock(&ps->lock);
489 }
490
491 ++ps->nworks_fini;
492 done = (ps->nworks_fini == ps->nworks);
493 spin_unlock(&ps->lock);
494
495 if (done)
496 complete(&ps->completion);
497}
498
499/**
500 * padata_do_multithreaded - run a multithreaded job
501 * @job: Description of the job.
502 *
503 * See the definition of struct padata_mt_job for more details.
504 */
505void __init padata_do_multithreaded(struct padata_mt_job *job)
506{
507 /* In case threads finish at different times. */
508 static const unsigned long load_balance_factor = 4;
509 struct padata_work my_work, *pw;
510 struct padata_mt_job_state ps;
511 LIST_HEAD(works);
512 int nworks, nid;
513 static atomic_t last_used_nid __initdata;
514
515 if (job->size == 0)
516 return;
517
518 /* Ensure at least one thread when size < min_chunk. */
519 nworks = max(job->size / max(job->min_chunk, job->align), 1ul);
520 nworks = min(nworks, job->max_threads);
521
522 if (nworks == 1) {
523 /* Single thread, no coordination needed, cut to the chase. */
524 job->thread_fn(job->start, job->start + job->size, job->fn_arg);
525 return;
526 }
527
528 spin_lock_init(&ps.lock);
529 init_completion(&ps.completion);
530 ps.job = job;
531 ps.nworks = padata_work_alloc_mt(nworks, &ps, &works);
532 ps.nworks_fini = 0;
533
534 /*
535 * Chunk size is the amount of work a helper does per call to the
536 * thread function. Load balance large jobs between threads by
537 * increasing the number of chunks, guarantee at least the minimum
538 * chunk size from the caller, and honor the caller's alignment.
539 * Ensure chunk_size is at least 1 to prevent divide-by-0
540 * panic in padata_mt_helper().
541 */
542 ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
543 ps.chunk_size = max(ps.chunk_size, job->min_chunk);
544 ps.chunk_size = max(ps.chunk_size, 1ul);
545 ps.chunk_size = roundup(ps.chunk_size, job->align);
546
547 list_for_each_entry(pw, &works, pw_list)
548 if (job->numa_aware) {
549 int old_node = atomic_read(&last_used_nid);
550
551 do {
552 nid = next_node_in(old_node, node_states[N_CPU]);
553 } while (!atomic_try_cmpxchg(&last_used_nid, &old_node, nid));
554 queue_work_node(nid, system_unbound_wq, &pw->pw_work);
555 } else {
556 queue_work(system_unbound_wq, &pw->pw_work);
557 }
558
559 /* Use the current thread, which saves starting a workqueue worker. */
560 padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
561 padata_mt_helper(&my_work.pw_work);
562
563 /* Wait for all the helpers to finish. */
564 wait_for_completion(&ps.completion);
565
566 destroy_work_on_stack(&my_work.pw_work);
567 padata_works_free(&works);
568}
569
570static void __padata_list_init(struct padata_list *pd_list)
571{
572 INIT_LIST_HEAD(&pd_list->list);
573 spin_lock_init(&pd_list->lock);
574}
575
576/* Initialize all percpu queues used by serial workers */
577static void padata_init_squeues(struct parallel_data *pd)
578{
579 int cpu;
580 struct padata_serial_queue *squeue;
581
582 for_each_cpu(cpu, pd->cpumask.cbcpu) {
583 squeue = per_cpu_ptr(pd->squeue, cpu);
584 squeue->pd = pd;
585 __padata_list_init(&squeue->serial);
586 INIT_WORK(&squeue->work, padata_serial_worker);
587 }
588}
589
590/* Initialize per-CPU reorder lists */
591static void padata_init_reorder_list(struct parallel_data *pd)
592{
593 int cpu;
594 struct padata_list *list;
595
596 for_each_cpu(cpu, pd->cpumask.pcpu) {
597 list = per_cpu_ptr(pd->reorder_list, cpu);
598 __padata_list_init(list);
599 }
600}
601
602/* Allocate and initialize the internal cpumask dependend resources. */
603static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
604{
605 struct padata_instance *pinst = ps->pinst;
606 struct parallel_data *pd;
607
608 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
609 if (!pd)
610 goto err;
611
612 pd->reorder_list = alloc_percpu(struct padata_list);
613 if (!pd->reorder_list)
614 goto err_free_pd;
615
616 pd->squeue = alloc_percpu(struct padata_serial_queue);
617 if (!pd->squeue)
618 goto err_free_reorder_list;
619
620 pd->ps = ps;
621
622 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
623 goto err_free_squeue;
624 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
625 goto err_free_pcpu;
626
627 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
628 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
629
630 padata_init_reorder_list(pd);
631 padata_init_squeues(pd);
632 pd->seq_nr = -1;
633 refcount_set(&pd->refcnt, 1);
634 spin_lock_init(&pd->lock);
635 pd->cpu = cpumask_first(pd->cpumask.pcpu);
636 INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
637
638 return pd;
639
640err_free_pcpu:
641 free_cpumask_var(pd->cpumask.pcpu);
642err_free_squeue:
643 free_percpu(pd->squeue);
644err_free_reorder_list:
645 free_percpu(pd->reorder_list);
646err_free_pd:
647 kfree(pd);
648err:
649 return NULL;
650}
651
652static void padata_free_pd(struct parallel_data *pd)
653{
654 free_cpumask_var(pd->cpumask.pcpu);
655 free_cpumask_var(pd->cpumask.cbcpu);
656 free_percpu(pd->reorder_list);
657 free_percpu(pd->squeue);
658 kfree(pd);
659}
660
661static void __padata_start(struct padata_instance *pinst)
662{
663 pinst->flags |= PADATA_INIT;
664}
665
666static void __padata_stop(struct padata_instance *pinst)
667{
668 if (!(pinst->flags & PADATA_INIT))
669 return;
670
671 pinst->flags &= ~PADATA_INIT;
672
673 synchronize_rcu();
674}
675
676/* Replace the internal control structure with a new one. */
677static int padata_replace_one(struct padata_shell *ps)
678{
679 struct parallel_data *pd_new;
680
681 pd_new = padata_alloc_pd(ps);
682 if (!pd_new)
683 return -ENOMEM;
684
685 ps->opd = rcu_dereference_protected(ps->pd, 1);
686 rcu_assign_pointer(ps->pd, pd_new);
687
688 return 0;
689}
690
691static int padata_replace(struct padata_instance *pinst)
692{
693 struct padata_shell *ps;
694 int err = 0;
695
696 pinst->flags |= PADATA_RESET;
697
698 list_for_each_entry(ps, &pinst->pslist, list) {
699 err = padata_replace_one(ps);
700 if (err)
701 break;
702 }
703
704 synchronize_rcu();
705
706 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
707 padata_put_pd(ps->opd);
708
709 pinst->flags &= ~PADATA_RESET;
710
711 return err;
712}
713
714/* If cpumask contains no active cpu, we mark the instance as invalid. */
715static bool padata_validate_cpumask(struct padata_instance *pinst,
716 const struct cpumask *cpumask)
717{
718 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
719 pinst->flags |= PADATA_INVALID;
720 return false;
721 }
722
723 pinst->flags &= ~PADATA_INVALID;
724 return true;
725}
726
727static int __padata_set_cpumasks(struct padata_instance *pinst,
728 cpumask_var_t pcpumask,
729 cpumask_var_t cbcpumask)
730{
731 int valid;
732 int err;
733
734 valid = padata_validate_cpumask(pinst, pcpumask);
735 if (!valid) {
736 __padata_stop(pinst);
737 goto out_replace;
738 }
739
740 valid = padata_validate_cpumask(pinst, cbcpumask);
741 if (!valid)
742 __padata_stop(pinst);
743
744out_replace:
745 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
746 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
747
748 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
749
750 if (valid)
751 __padata_start(pinst);
752
753 return err;
754}
755
756/**
757 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
758 * equivalent to @cpumask.
759 * @pinst: padata instance
760 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
761 * to parallel and serial cpumasks respectively.
762 * @cpumask: the cpumask to use
763 *
764 * Return: 0 on success or negative error code
765 */
766int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
767 cpumask_var_t cpumask)
768{
769 struct cpumask *serial_mask, *parallel_mask;
770 int err = -EINVAL;
771
772 cpus_read_lock();
773 mutex_lock(&pinst->lock);
774
775 switch (cpumask_type) {
776 case PADATA_CPU_PARALLEL:
777 serial_mask = pinst->cpumask.cbcpu;
778 parallel_mask = cpumask;
779 break;
780 case PADATA_CPU_SERIAL:
781 parallel_mask = pinst->cpumask.pcpu;
782 serial_mask = cpumask;
783 break;
784 default:
785 goto out;
786 }
787
788 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
789
790out:
791 mutex_unlock(&pinst->lock);
792 cpus_read_unlock();
793
794 return err;
795}
796EXPORT_SYMBOL(padata_set_cpumask);
797
798#ifdef CONFIG_HOTPLUG_CPU
799
800static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
801{
802 int err = 0;
803
804 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
805 err = padata_replace(pinst);
806
807 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
808 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
809 __padata_start(pinst);
810 }
811
812 return err;
813}
814
815static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
816{
817 int err = 0;
818
819 if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
820 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
821 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
822 __padata_stop(pinst);
823
824 err = padata_replace(pinst);
825 }
826
827 return err;
828}
829
830static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
831{
832 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
833 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
834}
835
836static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
837{
838 struct padata_instance *pinst;
839 int ret;
840
841 pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
842 if (!pinst_has_cpu(pinst, cpu))
843 return 0;
844
845 mutex_lock(&pinst->lock);
846 ret = __padata_add_cpu(pinst, cpu);
847 mutex_unlock(&pinst->lock);
848 return ret;
849}
850
851static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
852{
853 struct padata_instance *pinst;
854 int ret;
855
856 pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
857 if (!pinst_has_cpu(pinst, cpu))
858 return 0;
859
860 mutex_lock(&pinst->lock);
861 ret = __padata_remove_cpu(pinst, cpu);
862 mutex_unlock(&pinst->lock);
863 return ret;
864}
865
866static enum cpuhp_state hp_online;
867#endif
868
869static void __padata_free(struct padata_instance *pinst)
870{
871#ifdef CONFIG_HOTPLUG_CPU
872 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
873 &pinst->cpu_dead_node);
874 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
875#endif
876
877 WARN_ON(!list_empty(&pinst->pslist));
878
879 free_cpumask_var(pinst->cpumask.pcpu);
880 free_cpumask_var(pinst->cpumask.cbcpu);
881 destroy_workqueue(pinst->serial_wq);
882 destroy_workqueue(pinst->parallel_wq);
883 kfree(pinst);
884}
885
886#define kobj2pinst(_kobj) \
887 container_of(_kobj, struct padata_instance, kobj)
888#define attr2pentry(_attr) \
889 container_of(_attr, struct padata_sysfs_entry, attr)
890
891static void padata_sysfs_release(struct kobject *kobj)
892{
893 struct padata_instance *pinst = kobj2pinst(kobj);
894 __padata_free(pinst);
895}
896
897struct padata_sysfs_entry {
898 struct attribute attr;
899 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
900 ssize_t (*store)(struct padata_instance *, struct attribute *,
901 const char *, size_t);
902};
903
904static ssize_t show_cpumask(struct padata_instance *pinst,
905 struct attribute *attr, char *buf)
906{
907 struct cpumask *cpumask;
908 ssize_t len;
909
910 mutex_lock(&pinst->lock);
911 if (!strcmp(attr->name, "serial_cpumask"))
912 cpumask = pinst->cpumask.cbcpu;
913 else
914 cpumask = pinst->cpumask.pcpu;
915
916 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
917 nr_cpu_ids, cpumask_bits(cpumask));
918 mutex_unlock(&pinst->lock);
919 return len < PAGE_SIZE ? len : -EINVAL;
920}
921
922static ssize_t store_cpumask(struct padata_instance *pinst,
923 struct attribute *attr,
924 const char *buf, size_t count)
925{
926 cpumask_var_t new_cpumask;
927 ssize_t ret;
928 int mask_type;
929
930 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
931 return -ENOMEM;
932
933 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
934 nr_cpumask_bits);
935 if (ret < 0)
936 goto out;
937
938 mask_type = !strcmp(attr->name, "serial_cpumask") ?
939 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
940 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
941 if (!ret)
942 ret = count;
943
944out:
945 free_cpumask_var(new_cpumask);
946 return ret;
947}
948
949#define PADATA_ATTR_RW(_name, _show_name, _store_name) \
950 static struct padata_sysfs_entry _name##_attr = \
951 __ATTR(_name, 0644, _show_name, _store_name)
952#define PADATA_ATTR_RO(_name, _show_name) \
953 static struct padata_sysfs_entry _name##_attr = \
954 __ATTR(_name, 0400, _show_name, NULL)
955
956PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
957PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
958
959/*
960 * Padata sysfs provides the following objects:
961 * serial_cpumask [RW] - cpumask for serial workers
962 * parallel_cpumask [RW] - cpumask for parallel workers
963 */
964static struct attribute *padata_default_attrs[] = {
965 &serial_cpumask_attr.attr,
966 ¶llel_cpumask_attr.attr,
967 NULL,
968};
969ATTRIBUTE_GROUPS(padata_default);
970
971static ssize_t padata_sysfs_show(struct kobject *kobj,
972 struct attribute *attr, char *buf)
973{
974 struct padata_instance *pinst;
975 struct padata_sysfs_entry *pentry;
976 ssize_t ret = -EIO;
977
978 pinst = kobj2pinst(kobj);
979 pentry = attr2pentry(attr);
980 if (pentry->show)
981 ret = pentry->show(pinst, attr, buf);
982
983 return ret;
984}
985
986static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
987 const char *buf, size_t count)
988{
989 struct padata_instance *pinst;
990 struct padata_sysfs_entry *pentry;
991 ssize_t ret = -EIO;
992
993 pinst = kobj2pinst(kobj);
994 pentry = attr2pentry(attr);
995 if (pentry->store)
996 ret = pentry->store(pinst, attr, buf, count);
997
998 return ret;
999}
1000
1001static const struct sysfs_ops padata_sysfs_ops = {
1002 .show = padata_sysfs_show,
1003 .store = padata_sysfs_store,
1004};
1005
1006static const struct kobj_type padata_attr_type = {
1007 .sysfs_ops = &padata_sysfs_ops,
1008 .default_groups = padata_default_groups,
1009 .release = padata_sysfs_release,
1010};
1011
1012/**
1013 * padata_alloc - allocate and initialize a padata instance
1014 * @name: used to identify the instance
1015 *
1016 * Return: new instance on success, NULL on error
1017 */
1018struct padata_instance *padata_alloc(const char *name)
1019{
1020 struct padata_instance *pinst;
1021
1022 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1023 if (!pinst)
1024 goto err;
1025
1026 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
1027 name);
1028 if (!pinst->parallel_wq)
1029 goto err_free_inst;
1030
1031 cpus_read_lock();
1032
1033 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
1034 WQ_CPU_INTENSIVE, 1, name);
1035 if (!pinst->serial_wq)
1036 goto err_put_cpus;
1037
1038 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1039 goto err_free_serial_wq;
1040 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1041 free_cpumask_var(pinst->cpumask.pcpu);
1042 goto err_free_serial_wq;
1043 }
1044
1045 INIT_LIST_HEAD(&pinst->pslist);
1046
1047 cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
1048 cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
1049
1050 if (padata_setup_cpumasks(pinst))
1051 goto err_free_masks;
1052
1053 __padata_start(pinst);
1054
1055 kobject_init(&pinst->kobj, &padata_attr_type);
1056 mutex_init(&pinst->lock);
1057
1058#ifdef CONFIG_HOTPLUG_CPU
1059 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1060 &pinst->cpu_online_node);
1061 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1062 &pinst->cpu_dead_node);
1063#endif
1064
1065 cpus_read_unlock();
1066
1067 return pinst;
1068
1069err_free_masks:
1070 free_cpumask_var(pinst->cpumask.pcpu);
1071 free_cpumask_var(pinst->cpumask.cbcpu);
1072err_free_serial_wq:
1073 destroy_workqueue(pinst->serial_wq);
1074err_put_cpus:
1075 cpus_read_unlock();
1076 destroy_workqueue(pinst->parallel_wq);
1077err_free_inst:
1078 kfree(pinst);
1079err:
1080 return NULL;
1081}
1082EXPORT_SYMBOL(padata_alloc);
1083
1084/**
1085 * padata_free - free a padata instance
1086 *
1087 * @pinst: padata instance to free
1088 */
1089void padata_free(struct padata_instance *pinst)
1090{
1091 kobject_put(&pinst->kobj);
1092}
1093EXPORT_SYMBOL(padata_free);
1094
1095/**
1096 * padata_alloc_shell - Allocate and initialize padata shell.
1097 *
1098 * @pinst: Parent padata_instance object.
1099 *
1100 * Return: new shell on success, NULL on error
1101 */
1102struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1103{
1104 struct parallel_data *pd;
1105 struct padata_shell *ps;
1106
1107 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1108 if (!ps)
1109 goto out;
1110
1111 ps->pinst = pinst;
1112
1113 cpus_read_lock();
1114 pd = padata_alloc_pd(ps);
1115 cpus_read_unlock();
1116
1117 if (!pd)
1118 goto out_free_ps;
1119
1120 mutex_lock(&pinst->lock);
1121 RCU_INIT_POINTER(ps->pd, pd);
1122 list_add(&ps->list, &pinst->pslist);
1123 mutex_unlock(&pinst->lock);
1124
1125 return ps;
1126
1127out_free_ps:
1128 kfree(ps);
1129out:
1130 return NULL;
1131}
1132EXPORT_SYMBOL(padata_alloc_shell);
1133
1134/**
1135 * padata_free_shell - free a padata shell
1136 *
1137 * @ps: padata shell to free
1138 */
1139void padata_free_shell(struct padata_shell *ps)
1140{
1141 struct parallel_data *pd;
1142
1143 if (!ps)
1144 return;
1145
1146 /*
1147 * Wait for all _do_serial calls to finish to avoid touching
1148 * freed pd's and ps's.
1149 */
1150 synchronize_rcu();
1151
1152 mutex_lock(&ps->pinst->lock);
1153 list_del(&ps->list);
1154 pd = rcu_dereference_protected(ps->pd, 1);
1155 padata_put_pd(pd);
1156 mutex_unlock(&ps->pinst->lock);
1157
1158 kfree(ps);
1159}
1160EXPORT_SYMBOL(padata_free_shell);
1161
1162void __init padata_init(void)
1163{
1164 unsigned int i, possible_cpus;
1165#ifdef CONFIG_HOTPLUG_CPU
1166 int ret;
1167
1168 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1169 padata_cpu_online, NULL);
1170 if (ret < 0)
1171 goto err;
1172 hp_online = ret;
1173
1174 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1175 NULL, padata_cpu_dead);
1176 if (ret < 0)
1177 goto remove_online_state;
1178#endif
1179
1180 possible_cpus = num_possible_cpus();
1181 padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1182 GFP_KERNEL);
1183 if (!padata_works)
1184 goto remove_dead_state;
1185
1186 for (i = 0; i < possible_cpus; ++i)
1187 list_add(&padata_works[i].pw_list, &padata_free_works);
1188
1189 return;
1190
1191remove_dead_state:
1192#ifdef CONFIG_HOTPLUG_CPU
1193 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1194remove_online_state:
1195 cpuhp_remove_multi_state(hp_online);
1196err:
1197#endif
1198 pr_warn("padata: initialization failed\n");
1199}
1/*
2 * padata.c - generic interface to process data streams in parallel
3 *
4 * See Documentation/padata.txt for an api documentation.
5 *
6 * Copyright (C) 2008, 2009 secunet Security Networks AG
7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <linux/export.h>
24#include <linux/cpumask.h>
25#include <linux/err.h>
26#include <linux/cpu.h>
27#include <linux/padata.h>
28#include <linux/mutex.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/sysfs.h>
32#include <linux/rcupdate.h>
33
34#define MAX_OBJ_NUM 1000
35
36static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
37{
38 int cpu, target_cpu;
39
40 target_cpu = cpumask_first(pd->cpumask.pcpu);
41 for (cpu = 0; cpu < cpu_index; cpu++)
42 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
43
44 return target_cpu;
45}
46
47static int padata_cpu_hash(struct parallel_data *pd)
48{
49 unsigned int seq_nr;
50 int cpu_index;
51
52 /*
53 * Hash the sequence numbers to the cpus by taking
54 * seq_nr mod. number of cpus in use.
55 */
56
57 seq_nr = atomic_inc_return(&pd->seq_nr);
58 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
59
60 return padata_index_to_cpu(pd, cpu_index);
61}
62
63static void padata_parallel_worker(struct work_struct *parallel_work)
64{
65 struct padata_parallel_queue *pqueue;
66 struct parallel_data *pd;
67 struct padata_instance *pinst;
68 LIST_HEAD(local_list);
69
70 local_bh_disable();
71 pqueue = container_of(parallel_work,
72 struct padata_parallel_queue, work);
73 pd = pqueue->pd;
74 pinst = pd->pinst;
75
76 spin_lock(&pqueue->parallel.lock);
77 list_replace_init(&pqueue->parallel.list, &local_list);
78 spin_unlock(&pqueue->parallel.lock);
79
80 while (!list_empty(&local_list)) {
81 struct padata_priv *padata;
82
83 padata = list_entry(local_list.next,
84 struct padata_priv, list);
85
86 list_del_init(&padata->list);
87
88 padata->parallel(padata);
89 }
90
91 local_bh_enable();
92}
93
94/**
95 * padata_do_parallel - padata parallelization function
96 *
97 * @pinst: padata instance
98 * @padata: object to be parallelized
99 * @cb_cpu: cpu the serialization callback function will run on,
100 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
101 *
102 * The parallelization callback function will run with BHs off.
103 * Note: Every object which is parallelized by padata_do_parallel
104 * must be seen by padata_do_serial.
105 */
106int padata_do_parallel(struct padata_instance *pinst,
107 struct padata_priv *padata, int cb_cpu)
108{
109 int target_cpu, err;
110 struct padata_parallel_queue *queue;
111 struct parallel_data *pd;
112
113 rcu_read_lock_bh();
114
115 pd = rcu_dereference_bh(pinst->pd);
116
117 err = -EINVAL;
118 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
119 goto out;
120
121 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
122 goto out;
123
124 err = -EBUSY;
125 if ((pinst->flags & PADATA_RESET))
126 goto out;
127
128 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
129 goto out;
130
131 err = 0;
132 atomic_inc(&pd->refcnt);
133 padata->pd = pd;
134 padata->cb_cpu = cb_cpu;
135
136 target_cpu = padata_cpu_hash(pd);
137 queue = per_cpu_ptr(pd->pqueue, target_cpu);
138
139 spin_lock(&queue->parallel.lock);
140 list_add_tail(&padata->list, &queue->parallel.list);
141 spin_unlock(&queue->parallel.lock);
142
143 queue_work_on(target_cpu, pinst->wq, &queue->work);
144
145out:
146 rcu_read_unlock_bh();
147
148 return err;
149}
150EXPORT_SYMBOL(padata_do_parallel);
151
152/*
153 * padata_get_next - Get the next object that needs serialization.
154 *
155 * Return values are:
156 *
157 * A pointer to the control struct of the next object that needs
158 * serialization, if present in one of the percpu reorder queues.
159 *
160 * NULL, if all percpu reorder queues are empty.
161 *
162 * -EINPROGRESS, if the next object that needs serialization will
163 * be parallel processed by another cpu and is not yet present in
164 * the cpu's reorder queue.
165 *
166 * -ENODATA, if this cpu has to do the parallel processing for
167 * the next object.
168 */
169static struct padata_priv *padata_get_next(struct parallel_data *pd)
170{
171 int cpu, num_cpus;
172 unsigned int next_nr, next_index;
173 struct padata_parallel_queue *next_queue;
174 struct padata_priv *padata;
175 struct padata_list *reorder;
176
177 num_cpus = cpumask_weight(pd->cpumask.pcpu);
178
179 /*
180 * Calculate the percpu reorder queue and the sequence
181 * number of the next object.
182 */
183 next_nr = pd->processed;
184 next_index = next_nr % num_cpus;
185 cpu = padata_index_to_cpu(pd, next_index);
186 next_queue = per_cpu_ptr(pd->pqueue, cpu);
187
188 padata = NULL;
189
190 reorder = &next_queue->reorder;
191
192 if (!list_empty(&reorder->list)) {
193 padata = list_entry(reorder->list.next,
194 struct padata_priv, list);
195
196 spin_lock(&reorder->lock);
197 list_del_init(&padata->list);
198 atomic_dec(&pd->reorder_objects);
199 spin_unlock(&reorder->lock);
200
201 pd->processed++;
202
203 goto out;
204 }
205
206 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
207 padata = ERR_PTR(-ENODATA);
208 goto out;
209 }
210
211 padata = ERR_PTR(-EINPROGRESS);
212out:
213 return padata;
214}
215
216static void padata_reorder(struct parallel_data *pd)
217{
218 int cb_cpu;
219 struct padata_priv *padata;
220 struct padata_serial_queue *squeue;
221 struct padata_instance *pinst = pd->pinst;
222
223 /*
224 * We need to ensure that only one cpu can work on dequeueing of
225 * the reorder queue the time. Calculating in which percpu reorder
226 * queue the next object will arrive takes some time. A spinlock
227 * would be highly contended. Also it is not clear in which order
228 * the objects arrive to the reorder queues. So a cpu could wait to
229 * get the lock just to notice that there is nothing to do at the
230 * moment. Therefore we use a trylock and let the holder of the lock
231 * care for all the objects enqueued during the holdtime of the lock.
232 */
233 if (!spin_trylock_bh(&pd->lock))
234 return;
235
236 while (1) {
237 padata = padata_get_next(pd);
238
239 /*
240 * All reorder queues are empty, or the next object that needs
241 * serialization is parallel processed by another cpu and is
242 * still on it's way to the cpu's reorder queue, nothing to
243 * do for now.
244 */
245 if (!padata || PTR_ERR(padata) == -EINPROGRESS)
246 break;
247
248 /*
249 * This cpu has to do the parallel processing of the next
250 * object. It's waiting in the cpu's parallelization queue,
251 * so exit immediately.
252 */
253 if (PTR_ERR(padata) == -ENODATA) {
254 del_timer(&pd->timer);
255 spin_unlock_bh(&pd->lock);
256 return;
257 }
258
259 cb_cpu = padata->cb_cpu;
260 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
261
262 spin_lock(&squeue->serial.lock);
263 list_add_tail(&padata->list, &squeue->serial.list);
264 spin_unlock(&squeue->serial.lock);
265
266 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
267 }
268
269 spin_unlock_bh(&pd->lock);
270
271 /*
272 * The next object that needs serialization might have arrived to
273 * the reorder queues in the meantime, we will be called again
274 * from the timer function if no one else cares for it.
275 */
276 if (atomic_read(&pd->reorder_objects)
277 && !(pinst->flags & PADATA_RESET))
278 mod_timer(&pd->timer, jiffies + HZ);
279 else
280 del_timer(&pd->timer);
281
282 return;
283}
284
285static void padata_reorder_timer(unsigned long arg)
286{
287 struct parallel_data *pd = (struct parallel_data *)arg;
288
289 padata_reorder(pd);
290}
291
292static void padata_serial_worker(struct work_struct *serial_work)
293{
294 struct padata_serial_queue *squeue;
295 struct parallel_data *pd;
296 LIST_HEAD(local_list);
297
298 local_bh_disable();
299 squeue = container_of(serial_work, struct padata_serial_queue, work);
300 pd = squeue->pd;
301
302 spin_lock(&squeue->serial.lock);
303 list_replace_init(&squeue->serial.list, &local_list);
304 spin_unlock(&squeue->serial.lock);
305
306 while (!list_empty(&local_list)) {
307 struct padata_priv *padata;
308
309 padata = list_entry(local_list.next,
310 struct padata_priv, list);
311
312 list_del_init(&padata->list);
313
314 padata->serial(padata);
315 atomic_dec(&pd->refcnt);
316 }
317 local_bh_enable();
318}
319
320/**
321 * padata_do_serial - padata serialization function
322 *
323 * @padata: object to be serialized.
324 *
325 * padata_do_serial must be called for every parallelized object.
326 * The serialization callback function will run with BHs off.
327 */
328void padata_do_serial(struct padata_priv *padata)
329{
330 int cpu;
331 struct padata_parallel_queue *pqueue;
332 struct parallel_data *pd;
333
334 pd = padata->pd;
335
336 cpu = get_cpu();
337 pqueue = per_cpu_ptr(pd->pqueue, cpu);
338
339 spin_lock(&pqueue->reorder.lock);
340 atomic_inc(&pd->reorder_objects);
341 list_add_tail(&padata->list, &pqueue->reorder.list);
342 spin_unlock(&pqueue->reorder.lock);
343
344 put_cpu();
345
346 padata_reorder(pd);
347}
348EXPORT_SYMBOL(padata_do_serial);
349
350static int padata_setup_cpumasks(struct parallel_data *pd,
351 const struct cpumask *pcpumask,
352 const struct cpumask *cbcpumask)
353{
354 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
355 return -ENOMEM;
356
357 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
358 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
359 free_cpumask_var(pd->cpumask.cbcpu);
360 return -ENOMEM;
361 }
362
363 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
364 return 0;
365}
366
367static void __padata_list_init(struct padata_list *pd_list)
368{
369 INIT_LIST_HEAD(&pd_list->list);
370 spin_lock_init(&pd_list->lock);
371}
372
373/* Initialize all percpu queues used by serial workers */
374static void padata_init_squeues(struct parallel_data *pd)
375{
376 int cpu;
377 struct padata_serial_queue *squeue;
378
379 for_each_cpu(cpu, pd->cpumask.cbcpu) {
380 squeue = per_cpu_ptr(pd->squeue, cpu);
381 squeue->pd = pd;
382 __padata_list_init(&squeue->serial);
383 INIT_WORK(&squeue->work, padata_serial_worker);
384 }
385}
386
387/* Initialize all percpu queues used by parallel workers */
388static void padata_init_pqueues(struct parallel_data *pd)
389{
390 int cpu_index, cpu;
391 struct padata_parallel_queue *pqueue;
392
393 cpu_index = 0;
394 for_each_cpu(cpu, pd->cpumask.pcpu) {
395 pqueue = per_cpu_ptr(pd->pqueue, cpu);
396 pqueue->pd = pd;
397 pqueue->cpu_index = cpu_index;
398 cpu_index++;
399
400 __padata_list_init(&pqueue->reorder);
401 __padata_list_init(&pqueue->parallel);
402 INIT_WORK(&pqueue->work, padata_parallel_worker);
403 atomic_set(&pqueue->num_obj, 0);
404 }
405}
406
407/* Allocate and initialize the internal cpumask dependend resources. */
408static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
409 const struct cpumask *pcpumask,
410 const struct cpumask *cbcpumask)
411{
412 struct parallel_data *pd;
413
414 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
415 if (!pd)
416 goto err;
417
418 pd->pqueue = alloc_percpu(struct padata_parallel_queue);
419 if (!pd->pqueue)
420 goto err_free_pd;
421
422 pd->squeue = alloc_percpu(struct padata_serial_queue);
423 if (!pd->squeue)
424 goto err_free_pqueue;
425 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
426 goto err_free_squeue;
427
428 padata_init_pqueues(pd);
429 padata_init_squeues(pd);
430 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
431 atomic_set(&pd->seq_nr, -1);
432 atomic_set(&pd->reorder_objects, 0);
433 atomic_set(&pd->refcnt, 0);
434 pd->pinst = pinst;
435 spin_lock_init(&pd->lock);
436
437 return pd;
438
439err_free_squeue:
440 free_percpu(pd->squeue);
441err_free_pqueue:
442 free_percpu(pd->pqueue);
443err_free_pd:
444 kfree(pd);
445err:
446 return NULL;
447}
448
449static void padata_free_pd(struct parallel_data *pd)
450{
451 free_cpumask_var(pd->cpumask.pcpu);
452 free_cpumask_var(pd->cpumask.cbcpu);
453 free_percpu(pd->pqueue);
454 free_percpu(pd->squeue);
455 kfree(pd);
456}
457
458/* Flush all objects out of the padata queues. */
459static void padata_flush_queues(struct parallel_data *pd)
460{
461 int cpu;
462 struct padata_parallel_queue *pqueue;
463 struct padata_serial_queue *squeue;
464
465 for_each_cpu(cpu, pd->cpumask.pcpu) {
466 pqueue = per_cpu_ptr(pd->pqueue, cpu);
467 flush_work(&pqueue->work);
468 }
469
470 del_timer_sync(&pd->timer);
471
472 if (atomic_read(&pd->reorder_objects))
473 padata_reorder(pd);
474
475 for_each_cpu(cpu, pd->cpumask.cbcpu) {
476 squeue = per_cpu_ptr(pd->squeue, cpu);
477 flush_work(&squeue->work);
478 }
479
480 BUG_ON(atomic_read(&pd->refcnt) != 0);
481}
482
483static void __padata_start(struct padata_instance *pinst)
484{
485 pinst->flags |= PADATA_INIT;
486}
487
488static void __padata_stop(struct padata_instance *pinst)
489{
490 if (!(pinst->flags & PADATA_INIT))
491 return;
492
493 pinst->flags &= ~PADATA_INIT;
494
495 synchronize_rcu();
496
497 get_online_cpus();
498 padata_flush_queues(pinst->pd);
499 put_online_cpus();
500}
501
502/* Replace the internal control structure with a new one. */
503static void padata_replace(struct padata_instance *pinst,
504 struct parallel_data *pd_new)
505{
506 struct parallel_data *pd_old = pinst->pd;
507 int notification_mask = 0;
508
509 pinst->flags |= PADATA_RESET;
510
511 rcu_assign_pointer(pinst->pd, pd_new);
512
513 synchronize_rcu();
514
515 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
516 notification_mask |= PADATA_CPU_PARALLEL;
517 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
518 notification_mask |= PADATA_CPU_SERIAL;
519
520 padata_flush_queues(pd_old);
521 padata_free_pd(pd_old);
522
523 if (notification_mask)
524 blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
525 notification_mask,
526 &pd_new->cpumask);
527
528 pinst->flags &= ~PADATA_RESET;
529}
530
531/**
532 * padata_register_cpumask_notifier - Registers a notifier that will be called
533 * if either pcpu or cbcpu or both cpumasks change.
534 *
535 * @pinst: A poineter to padata instance
536 * @nblock: A pointer to notifier block.
537 */
538int padata_register_cpumask_notifier(struct padata_instance *pinst,
539 struct notifier_block *nblock)
540{
541 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
542 nblock);
543}
544EXPORT_SYMBOL(padata_register_cpumask_notifier);
545
546/**
547 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
548 * registered earlier using padata_register_cpumask_notifier
549 *
550 * @pinst: A pointer to data instance.
551 * @nlock: A pointer to notifier block.
552 */
553int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
554 struct notifier_block *nblock)
555{
556 return blocking_notifier_chain_unregister(
557 &pinst->cpumask_change_notifier,
558 nblock);
559}
560EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
561
562
563/* If cpumask contains no active cpu, we mark the instance as invalid. */
564static bool padata_validate_cpumask(struct padata_instance *pinst,
565 const struct cpumask *cpumask)
566{
567 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
568 pinst->flags |= PADATA_INVALID;
569 return false;
570 }
571
572 pinst->flags &= ~PADATA_INVALID;
573 return true;
574}
575
576static int __padata_set_cpumasks(struct padata_instance *pinst,
577 cpumask_var_t pcpumask,
578 cpumask_var_t cbcpumask)
579{
580 int valid;
581 struct parallel_data *pd;
582
583 valid = padata_validate_cpumask(pinst, pcpumask);
584 if (!valid) {
585 __padata_stop(pinst);
586 goto out_replace;
587 }
588
589 valid = padata_validate_cpumask(pinst, cbcpumask);
590 if (!valid)
591 __padata_stop(pinst);
592
593out_replace:
594 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
595 if (!pd)
596 return -ENOMEM;
597
598 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
599 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
600
601 padata_replace(pinst, pd);
602
603 if (valid)
604 __padata_start(pinst);
605
606 return 0;
607}
608
609/**
610 * padata_set_cpumasks - Set both parallel and serial cpumasks. The first
611 * one is used by parallel workers and the second one
612 * by the wokers doing serialization.
613 *
614 * @pinst: padata instance
615 * @pcpumask: the cpumask to use for parallel workers
616 * @cbcpumask: the cpumsak to use for serial workers
617 */
618int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask,
619 cpumask_var_t cbcpumask)
620{
621 int err;
622
623 mutex_lock(&pinst->lock);
624 get_online_cpus();
625
626 err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask);
627
628 put_online_cpus();
629 mutex_unlock(&pinst->lock);
630
631 return err;
632
633}
634EXPORT_SYMBOL(padata_set_cpumasks);
635
636/**
637 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
638 * equivalent to @cpumask.
639 *
640 * @pinst: padata instance
641 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
642 * to parallel and serial cpumasks respectively.
643 * @cpumask: the cpumask to use
644 */
645int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
646 cpumask_var_t cpumask)
647{
648 struct cpumask *serial_mask, *parallel_mask;
649 int err = -EINVAL;
650
651 mutex_lock(&pinst->lock);
652 get_online_cpus();
653
654 switch (cpumask_type) {
655 case PADATA_CPU_PARALLEL:
656 serial_mask = pinst->cpumask.cbcpu;
657 parallel_mask = cpumask;
658 break;
659 case PADATA_CPU_SERIAL:
660 parallel_mask = pinst->cpumask.pcpu;
661 serial_mask = cpumask;
662 break;
663 default:
664 goto out;
665 }
666
667 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
668
669out:
670 put_online_cpus();
671 mutex_unlock(&pinst->lock);
672
673 return err;
674}
675EXPORT_SYMBOL(padata_set_cpumask);
676
677static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
678{
679 struct parallel_data *pd;
680
681 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
682 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
683 pinst->cpumask.cbcpu);
684 if (!pd)
685 return -ENOMEM;
686
687 padata_replace(pinst, pd);
688
689 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
690 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
691 __padata_start(pinst);
692 }
693
694 return 0;
695}
696
697 /**
698 * padata_add_cpu - add a cpu to one or both(parallel and serial)
699 * padata cpumasks.
700 *
701 * @pinst: padata instance
702 * @cpu: cpu to add
703 * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added.
704 * The @mask may be any combination of the following flags:
705 * PADATA_CPU_SERIAL - serial cpumask
706 * PADATA_CPU_PARALLEL - parallel cpumask
707 */
708
709int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask)
710{
711 int err;
712
713 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
714 return -EINVAL;
715
716 mutex_lock(&pinst->lock);
717
718 get_online_cpus();
719 if (mask & PADATA_CPU_SERIAL)
720 cpumask_set_cpu(cpu, pinst->cpumask.cbcpu);
721 if (mask & PADATA_CPU_PARALLEL)
722 cpumask_set_cpu(cpu, pinst->cpumask.pcpu);
723
724 err = __padata_add_cpu(pinst, cpu);
725 put_online_cpus();
726
727 mutex_unlock(&pinst->lock);
728
729 return err;
730}
731EXPORT_SYMBOL(padata_add_cpu);
732
733static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
734{
735 struct parallel_data *pd = NULL;
736
737 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
738
739 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
740 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
741 __padata_stop(pinst);
742
743 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
744 pinst->cpumask.cbcpu);
745 if (!pd)
746 return -ENOMEM;
747
748 padata_replace(pinst, pd);
749
750 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
751 cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
752 }
753
754 return 0;
755}
756
757 /**
758 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
759 * padata cpumasks.
760 *
761 * @pinst: padata instance
762 * @cpu: cpu to remove
763 * @mask: bitmask specifying from which cpumask @cpu should be removed
764 * The @mask may be any combination of the following flags:
765 * PADATA_CPU_SERIAL - serial cpumask
766 * PADATA_CPU_PARALLEL - parallel cpumask
767 */
768int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
769{
770 int err;
771
772 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
773 return -EINVAL;
774
775 mutex_lock(&pinst->lock);
776
777 get_online_cpus();
778 if (mask & PADATA_CPU_SERIAL)
779 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
780 if (mask & PADATA_CPU_PARALLEL)
781 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
782
783 err = __padata_remove_cpu(pinst, cpu);
784 put_online_cpus();
785
786 mutex_unlock(&pinst->lock);
787
788 return err;
789}
790EXPORT_SYMBOL(padata_remove_cpu);
791
792/**
793 * padata_start - start the parallel processing
794 *
795 * @pinst: padata instance to start
796 */
797int padata_start(struct padata_instance *pinst)
798{
799 int err = 0;
800
801 mutex_lock(&pinst->lock);
802
803 if (pinst->flags & PADATA_INVALID)
804 err =-EINVAL;
805
806 __padata_start(pinst);
807
808 mutex_unlock(&pinst->lock);
809
810 return err;
811}
812EXPORT_SYMBOL(padata_start);
813
814/**
815 * padata_stop - stop the parallel processing
816 *
817 * @pinst: padata instance to stop
818 */
819void padata_stop(struct padata_instance *pinst)
820{
821 mutex_lock(&pinst->lock);
822 __padata_stop(pinst);
823 mutex_unlock(&pinst->lock);
824}
825EXPORT_SYMBOL(padata_stop);
826
827#ifdef CONFIG_HOTPLUG_CPU
828
829static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
830{
831 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
832 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
833}
834
835
836static int padata_cpu_callback(struct notifier_block *nfb,
837 unsigned long action, void *hcpu)
838{
839 int err;
840 struct padata_instance *pinst;
841 int cpu = (unsigned long)hcpu;
842
843 pinst = container_of(nfb, struct padata_instance, cpu_notifier);
844
845 switch (action) {
846 case CPU_ONLINE:
847 case CPU_ONLINE_FROZEN:
848 case CPU_DOWN_FAILED:
849 case CPU_DOWN_FAILED_FROZEN:
850 if (!pinst_has_cpu(pinst, cpu))
851 break;
852 mutex_lock(&pinst->lock);
853 err = __padata_add_cpu(pinst, cpu);
854 mutex_unlock(&pinst->lock);
855 if (err)
856 return notifier_from_errno(err);
857 break;
858
859 case CPU_DOWN_PREPARE:
860 case CPU_DOWN_PREPARE_FROZEN:
861 case CPU_UP_CANCELED:
862 case CPU_UP_CANCELED_FROZEN:
863 if (!pinst_has_cpu(pinst, cpu))
864 break;
865 mutex_lock(&pinst->lock);
866 err = __padata_remove_cpu(pinst, cpu);
867 mutex_unlock(&pinst->lock);
868 if (err)
869 return notifier_from_errno(err);
870 break;
871 }
872
873 return NOTIFY_OK;
874}
875#endif
876
877static void __padata_free(struct padata_instance *pinst)
878{
879#ifdef CONFIG_HOTPLUG_CPU
880 unregister_hotcpu_notifier(&pinst->cpu_notifier);
881#endif
882
883 padata_stop(pinst);
884 padata_free_pd(pinst->pd);
885 free_cpumask_var(pinst->cpumask.pcpu);
886 free_cpumask_var(pinst->cpumask.cbcpu);
887 kfree(pinst);
888}
889
890#define kobj2pinst(_kobj) \
891 container_of(_kobj, struct padata_instance, kobj)
892#define attr2pentry(_attr) \
893 container_of(_attr, struct padata_sysfs_entry, attr)
894
895static void padata_sysfs_release(struct kobject *kobj)
896{
897 struct padata_instance *pinst = kobj2pinst(kobj);
898 __padata_free(pinst);
899}
900
901struct padata_sysfs_entry {
902 struct attribute attr;
903 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
904 ssize_t (*store)(struct padata_instance *, struct attribute *,
905 const char *, size_t);
906};
907
908static ssize_t show_cpumask(struct padata_instance *pinst,
909 struct attribute *attr, char *buf)
910{
911 struct cpumask *cpumask;
912 ssize_t len;
913
914 mutex_lock(&pinst->lock);
915 if (!strcmp(attr->name, "serial_cpumask"))
916 cpumask = pinst->cpumask.cbcpu;
917 else
918 cpumask = pinst->cpumask.pcpu;
919
920 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
921 nr_cpu_ids, cpumask_bits(cpumask));
922 mutex_unlock(&pinst->lock);
923 return len < PAGE_SIZE ? len : -EINVAL;
924}
925
926static ssize_t store_cpumask(struct padata_instance *pinst,
927 struct attribute *attr,
928 const char *buf, size_t count)
929{
930 cpumask_var_t new_cpumask;
931 ssize_t ret;
932 int mask_type;
933
934 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
935 return -ENOMEM;
936
937 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
938 nr_cpumask_bits);
939 if (ret < 0)
940 goto out;
941
942 mask_type = !strcmp(attr->name, "serial_cpumask") ?
943 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
944 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
945 if (!ret)
946 ret = count;
947
948out:
949 free_cpumask_var(new_cpumask);
950 return ret;
951}
952
953#define PADATA_ATTR_RW(_name, _show_name, _store_name) \
954 static struct padata_sysfs_entry _name##_attr = \
955 __ATTR(_name, 0644, _show_name, _store_name)
956#define PADATA_ATTR_RO(_name, _show_name) \
957 static struct padata_sysfs_entry _name##_attr = \
958 __ATTR(_name, 0400, _show_name, NULL)
959
960PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
961PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
962
963/*
964 * Padata sysfs provides the following objects:
965 * serial_cpumask [RW] - cpumask for serial workers
966 * parallel_cpumask [RW] - cpumask for parallel workers
967 */
968static struct attribute *padata_default_attrs[] = {
969 &serial_cpumask_attr.attr,
970 ¶llel_cpumask_attr.attr,
971 NULL,
972};
973
974static ssize_t padata_sysfs_show(struct kobject *kobj,
975 struct attribute *attr, char *buf)
976{
977 struct padata_instance *pinst;
978 struct padata_sysfs_entry *pentry;
979 ssize_t ret = -EIO;
980
981 pinst = kobj2pinst(kobj);
982 pentry = attr2pentry(attr);
983 if (pentry->show)
984 ret = pentry->show(pinst, attr, buf);
985
986 return ret;
987}
988
989static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
990 const char *buf, size_t count)
991{
992 struct padata_instance *pinst;
993 struct padata_sysfs_entry *pentry;
994 ssize_t ret = -EIO;
995
996 pinst = kobj2pinst(kobj);
997 pentry = attr2pentry(attr);
998 if (pentry->show)
999 ret = pentry->store(pinst, attr, buf, count);
1000
1001 return ret;
1002}
1003
1004static const struct sysfs_ops padata_sysfs_ops = {
1005 .show = padata_sysfs_show,
1006 .store = padata_sysfs_store,
1007};
1008
1009static struct kobj_type padata_attr_type = {
1010 .sysfs_ops = &padata_sysfs_ops,
1011 .default_attrs = padata_default_attrs,
1012 .release = padata_sysfs_release,
1013};
1014
1015/**
1016 * padata_alloc_possible - Allocate and initialize padata instance.
1017 * Use the cpu_possible_mask for serial and
1018 * parallel workers.
1019 *
1020 * @wq: workqueue to use for the allocated padata instance
1021 */
1022struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
1023{
1024 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1025}
1026EXPORT_SYMBOL(padata_alloc_possible);
1027
1028/**
1029 * padata_alloc - allocate and initialize a padata instance and specify
1030 * cpumasks for serial and parallel workers.
1031 *
1032 * @wq: workqueue to use for the allocated padata instance
1033 * @pcpumask: cpumask that will be used for padata parallelization
1034 * @cbcpumask: cpumask that will be used for padata serialization
1035 */
1036struct padata_instance *padata_alloc(struct workqueue_struct *wq,
1037 const struct cpumask *pcpumask,
1038 const struct cpumask *cbcpumask)
1039{
1040 struct padata_instance *pinst;
1041 struct parallel_data *pd = NULL;
1042
1043 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1044 if (!pinst)
1045 goto err;
1046
1047 get_online_cpus();
1048 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1049 goto err_free_inst;
1050 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1051 free_cpumask_var(pinst->cpumask.pcpu);
1052 goto err_free_inst;
1053 }
1054 if (!padata_validate_cpumask(pinst, pcpumask) ||
1055 !padata_validate_cpumask(pinst, cbcpumask))
1056 goto err_free_masks;
1057
1058 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
1059 if (!pd)
1060 goto err_free_masks;
1061
1062 rcu_assign_pointer(pinst->pd, pd);
1063
1064 pinst->wq = wq;
1065
1066 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
1067 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1068
1069 pinst->flags = 0;
1070
1071 put_online_cpus();
1072
1073 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1074 kobject_init(&pinst->kobj, &padata_attr_type);
1075 mutex_init(&pinst->lock);
1076
1077#ifdef CONFIG_HOTPLUG_CPU
1078 pinst->cpu_notifier.notifier_call = padata_cpu_callback;
1079 pinst->cpu_notifier.priority = 0;
1080 register_hotcpu_notifier(&pinst->cpu_notifier);
1081#endif
1082
1083 return pinst;
1084
1085err_free_masks:
1086 free_cpumask_var(pinst->cpumask.pcpu);
1087 free_cpumask_var(pinst->cpumask.cbcpu);
1088err_free_inst:
1089 kfree(pinst);
1090 put_online_cpus();
1091err:
1092 return NULL;
1093}
1094EXPORT_SYMBOL(padata_alloc);
1095
1096/**
1097 * padata_free - free a padata instance
1098 *
1099 * @padata_inst: padata instance to free
1100 */
1101void padata_free(struct padata_instance *pinst)
1102{
1103 kobject_put(&pinst->kobj);
1104}
1105EXPORT_SYMBOL(padata_free);