Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * padata.c - generic interface to process data streams in parallel
   4 *
   5 * See Documentation/core-api/padata.rst for more information.
   6 *
   7 * Copyright (C) 2008, 2009 secunet Security Networks AG
   8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
   9 *
  10 * Copyright (c) 2020 Oracle and/or its affiliates.
  11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
  12 */
  13
  14#include <linux/completion.h>
  15#include <linux/export.h>
  16#include <linux/cpumask.h>
  17#include <linux/err.h>
  18#include <linux/cpu.h>
  19#include <linux/padata.h>
  20#include <linux/mutex.h>
  21#include <linux/sched.h>
  22#include <linux/slab.h>
  23#include <linux/sysfs.h>
  24#include <linux/rcupdate.h>
  25
  26#define	PADATA_WORK_ONSTACK	1	/* Work's memory is on stack */
  27
  28struct padata_work {
  29	struct work_struct	pw_work;
  30	struct list_head	pw_list;  /* padata_free_works linkage */
  31	void			*pw_data;
  32};
  33
  34static DEFINE_SPINLOCK(padata_works_lock);
  35static struct padata_work *padata_works;
  36static LIST_HEAD(padata_free_works);
  37
  38struct padata_mt_job_state {
  39	spinlock_t		lock;
  40	struct completion	completion;
  41	struct padata_mt_job	*job;
  42	int			nworks;
  43	int			nworks_fini;
  44	unsigned long		chunk_size;
  45};
  46
  47static void padata_free_pd(struct parallel_data *pd);
  48static void __init padata_mt_helper(struct work_struct *work);
  49
  50static inline void padata_get_pd(struct parallel_data *pd)
  51{
  52	refcount_inc(&pd->refcnt);
  53}
  54
  55static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
  56{
  57	if (refcount_sub_and_test(cnt, &pd->refcnt))
  58		padata_free_pd(pd);
  59}
  60
  61static inline void padata_put_pd(struct parallel_data *pd)
  62{
  63	padata_put_pd_cnt(pd, 1);
  64}
  65
  66static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
  67{
  68	int cpu, target_cpu;
  69
  70	target_cpu = cpumask_first(pd->cpumask.pcpu);
  71	for (cpu = 0; cpu < cpu_index; cpu++)
  72		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
  73
  74	return target_cpu;
  75}
  76
  77static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
  78{
  79	/*
  80	 * Hash the sequence numbers to the cpus by taking
  81	 * seq_nr mod. number of cpus in use.
  82	 */
  83	int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
  84
  85	return padata_index_to_cpu(pd, cpu_index);
  86}
  87
  88static struct padata_work *padata_work_alloc(void)
  89{
  90	struct padata_work *pw;
  91
  92	lockdep_assert_held(&padata_works_lock);
  93
  94	if (list_empty(&padata_free_works))
  95		return NULL;	/* No more work items allowed to be queued. */
  96
  97	pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
  98	list_del(&pw->pw_list);
  99	return pw;
 100}
 101
 102/*
 103 * This function is marked __ref because this function may be optimized in such
 104 * a way that it directly refers to work_fn's address, which causes modpost to
 105 * complain when work_fn is marked __init. This scenario was observed with clang
 106 * LTO, where padata_work_init() was optimized to refer directly to
 107 * padata_mt_helper() because the calls to padata_work_init() with other work_fn
 108 * values were eliminated or inlined.
 109 */
 110static void __ref padata_work_init(struct padata_work *pw, work_func_t work_fn,
 111				   void *data, int flags)
 112{
 113	if (flags & PADATA_WORK_ONSTACK)
 114		INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
 115	else
 116		INIT_WORK(&pw->pw_work, work_fn);
 117	pw->pw_data = data;
 118}
 119
 120static int __init padata_work_alloc_mt(int nworks, void *data,
 121				       struct list_head *head)
 122{
 123	int i;
 124
 125	spin_lock_bh(&padata_works_lock);
 126	/* Start at 1 because the current task participates in the job. */
 127	for (i = 1; i < nworks; ++i) {
 128		struct padata_work *pw = padata_work_alloc();
 129
 130		if (!pw)
 131			break;
 132		padata_work_init(pw, padata_mt_helper, data, 0);
 133		list_add(&pw->pw_list, head);
 134	}
 135	spin_unlock_bh(&padata_works_lock);
 136
 137	return i;
 138}
 139
 140static void padata_work_free(struct padata_work *pw)
 141{
 142	lockdep_assert_held(&padata_works_lock);
 143	list_add(&pw->pw_list, &padata_free_works);
 144}
 145
 146static void __init padata_works_free(struct list_head *works)
 147{
 148	struct padata_work *cur, *next;
 149
 150	if (list_empty(works))
 151		return;
 152
 153	spin_lock_bh(&padata_works_lock);
 154	list_for_each_entry_safe(cur, next, works, pw_list) {
 155		list_del(&cur->pw_list);
 156		padata_work_free(cur);
 157	}
 158	spin_unlock_bh(&padata_works_lock);
 159}
 160
 161static void padata_parallel_worker(struct work_struct *parallel_work)
 162{
 163	struct padata_work *pw = container_of(parallel_work, struct padata_work,
 164					      pw_work);
 165	struct padata_priv *padata = pw->pw_data;
 166
 167	local_bh_disable();
 168	padata->parallel(padata);
 169	spin_lock(&padata_works_lock);
 170	padata_work_free(pw);
 171	spin_unlock(&padata_works_lock);
 172	local_bh_enable();
 173}
 174
 175/**
 176 * padata_do_parallel - padata parallelization function
 177 *
 178 * @ps: padatashell
 179 * @padata: object to be parallelized
 180 * @cb_cpu: pointer to the CPU that the serialization callback function should
 181 *          run on.  If it's not in the serial cpumask of @pinst
 182 *          (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
 183 *          none found, returns -EINVAL.
 184 *
 185 * The parallelization callback function will run with BHs off.
 186 * Note: Every object which is parallelized by padata_do_parallel
 187 * must be seen by padata_do_serial.
 188 *
 189 * Return: 0 on success or else negative error code.
 190 */
 191int padata_do_parallel(struct padata_shell *ps,
 192		       struct padata_priv *padata, int *cb_cpu)
 193{
 194	struct padata_instance *pinst = ps->pinst;
 195	int i, cpu, cpu_index, err;
 196	struct parallel_data *pd;
 197	struct padata_work *pw;
 198
 199	rcu_read_lock_bh();
 200
 201	pd = rcu_dereference_bh(ps->pd);
 202
 203	err = -EINVAL;
 204	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
 205		goto out;
 206
 207	if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
 208		if (cpumask_empty(pd->cpumask.cbcpu))
 209			goto out;
 210
 211		/* Select an alternate fallback CPU and notify the caller. */
 212		cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
 213
 214		cpu = cpumask_first(pd->cpumask.cbcpu);
 215		for (i = 0; i < cpu_index; i++)
 216			cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
 217
 218		*cb_cpu = cpu;
 219	}
 220
 221	err = -EBUSY;
 222	if ((pinst->flags & PADATA_RESET))
 223		goto out;
 224
 225	padata_get_pd(pd);
 226	padata->pd = pd;
 227	padata->cb_cpu = *cb_cpu;
 228
 229	spin_lock(&padata_works_lock);
 230	padata->seq_nr = ++pd->seq_nr;
 231	pw = padata_work_alloc();
 232	spin_unlock(&padata_works_lock);
 233
 234	if (!pw) {
 235		/* Maximum works limit exceeded, run in the current task. */
 236		padata->parallel(padata);
 237	}
 238
 239	rcu_read_unlock_bh();
 240
 241	if (pw) {
 242		padata_work_init(pw, padata_parallel_worker, padata, 0);
 243		queue_work(pinst->parallel_wq, &pw->pw_work);
 
 
 
 244	}
 245
 246	return 0;
 247out:
 248	rcu_read_unlock_bh();
 249
 250	return err;
 251}
 252EXPORT_SYMBOL(padata_do_parallel);
 253
 254/*
 255 * padata_find_next - Find the next object that needs serialization.
 256 *
 257 * Return:
 258 * * A pointer to the control struct of the next object that needs
 259 *   serialization, if present in one of the percpu reorder queues.
 260 * * NULL, if the next object that needs serialization will
 261 *   be parallel processed by another cpu and is not yet present in
 262 *   the cpu's reorder queue.
 263 */
 264static struct padata_priv *padata_find_next(struct parallel_data *pd,
 265					    bool remove_object)
 266{
 267	struct padata_priv *padata;
 268	struct padata_list *reorder;
 269	int cpu = pd->cpu;
 270
 271	reorder = per_cpu_ptr(pd->reorder_list, cpu);
 272
 273	spin_lock(&reorder->lock);
 274	if (list_empty(&reorder->list)) {
 275		spin_unlock(&reorder->lock);
 276		return NULL;
 277	}
 278
 279	padata = list_entry(reorder->list.next, struct padata_priv, list);
 280
 281	/*
 282	 * Checks the rare case where two or more parallel jobs have hashed to
 283	 * the same CPU and one of the later ones finishes first.
 284	 */
 285	if (padata->seq_nr != pd->processed) {
 286		spin_unlock(&reorder->lock);
 287		return NULL;
 288	}
 289
 290	if (remove_object) {
 291		list_del_init(&padata->list);
 292		++pd->processed;
 293		pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
 294	}
 295
 296	spin_unlock(&reorder->lock);
 297	return padata;
 298}
 299
 300static void padata_reorder(struct parallel_data *pd)
 301{
 302	struct padata_instance *pinst = pd->ps->pinst;
 303	int cb_cpu;
 304	struct padata_priv *padata;
 305	struct padata_serial_queue *squeue;
 306	struct padata_list *reorder;
 307
 308	/*
 309	 * We need to ensure that only one cpu can work on dequeueing of
 310	 * the reorder queue the time. Calculating in which percpu reorder
 311	 * queue the next object will arrive takes some time. A spinlock
 312	 * would be highly contended. Also it is not clear in which order
 313	 * the objects arrive to the reorder queues. So a cpu could wait to
 314	 * get the lock just to notice that there is nothing to do at the
 315	 * moment. Therefore we use a trylock and let the holder of the lock
 316	 * care for all the objects enqueued during the holdtime of the lock.
 317	 */
 318	if (!spin_trylock_bh(&pd->lock))
 319		return;
 320
 321	while (1) {
 322		padata = padata_find_next(pd, true);
 323
 324		/*
 325		 * If the next object that needs serialization is parallel
 326		 * processed by another cpu and is still on it's way to the
 327		 * cpu's reorder queue, nothing to do for now.
 328		 */
 329		if (!padata)
 330			break;
 331
 332		cb_cpu = padata->cb_cpu;
 333		squeue = per_cpu_ptr(pd->squeue, cb_cpu);
 334
 335		spin_lock(&squeue->serial.lock);
 336		list_add_tail(&padata->list, &squeue->serial.list);
 337		spin_unlock(&squeue->serial.lock);
 338
 339		queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
 340	}
 341
 342	spin_unlock_bh(&pd->lock);
 343
 344	/*
 345	 * The next object that needs serialization might have arrived to
 346	 * the reorder queues in the meantime.
 347	 *
 348	 * Ensure reorder queue is read after pd->lock is dropped so we see
 349	 * new objects from another task in padata_do_serial.  Pairs with
 350	 * smp_mb in padata_do_serial.
 351	 */
 352	smp_mb();
 353
 354	reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
 355	if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
 356		/*
 357		 * Other context(eg. the padata_serial_worker) can finish the request.
 358		 * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
 359		 */
 360		padata_get_pd(pd);
 361		queue_work(pinst->serial_wq, &pd->reorder_work);
 362	}
 363}
 364
 365static void invoke_padata_reorder(struct work_struct *work)
 366{
 367	struct parallel_data *pd;
 368
 369	local_bh_disable();
 370	pd = container_of(work, struct parallel_data, reorder_work);
 371	padata_reorder(pd);
 372	local_bh_enable();
 373	/* Pairs with putting the reorder_work in the serial_wq */
 374	padata_put_pd(pd);
 375}
 376
 377static void padata_serial_worker(struct work_struct *serial_work)
 378{
 379	struct padata_serial_queue *squeue;
 380	struct parallel_data *pd;
 381	LIST_HEAD(local_list);
 382	int cnt;
 383
 384	local_bh_disable();
 385	squeue = container_of(serial_work, struct padata_serial_queue, work);
 386	pd = squeue->pd;
 387
 388	spin_lock(&squeue->serial.lock);
 389	list_replace_init(&squeue->serial.list, &local_list);
 390	spin_unlock(&squeue->serial.lock);
 391
 392	cnt = 0;
 393
 394	while (!list_empty(&local_list)) {
 395		struct padata_priv *padata;
 396
 397		padata = list_entry(local_list.next,
 398				    struct padata_priv, list);
 399
 400		list_del_init(&padata->list);
 401
 402		padata->serial(padata);
 403		cnt++;
 404	}
 405	local_bh_enable();
 406
 407	padata_put_pd_cnt(pd, cnt);
 
 408}
 409
 410/**
 411 * padata_do_serial - padata serialization function
 412 *
 413 * @padata: object to be serialized.
 414 *
 415 * padata_do_serial must be called for every parallelized object.
 416 * The serialization callback function will run with BHs off.
 417 */
 418void padata_do_serial(struct padata_priv *padata)
 419{
 420	struct parallel_data *pd = padata->pd;
 421	int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
 422	struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
 423	struct padata_priv *cur;
 424	struct list_head *pos;
 425
 426	spin_lock(&reorder->lock);
 427	/* Sort in ascending order of sequence number. */
 428	list_for_each_prev(pos, &reorder->list) {
 429		cur = list_entry(pos, struct padata_priv, list);
 430		/* Compare by difference to consider integer wrap around */
 431		if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
 432			break;
 433	}
 434	list_add(&padata->list, pos);
 435	spin_unlock(&reorder->lock);
 436
 437	/*
 438	 * Ensure the addition to the reorder list is ordered correctly
 439	 * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
 440	 * in padata_reorder.
 441	 */
 442	smp_mb();
 443
 444	padata_reorder(pd);
 445}
 446EXPORT_SYMBOL(padata_do_serial);
 447
 448static int padata_setup_cpumasks(struct padata_instance *pinst)
 449{
 450	struct workqueue_attrs *attrs;
 451	int err;
 452
 453	attrs = alloc_workqueue_attrs();
 454	if (!attrs)
 455		return -ENOMEM;
 456
 457	/* Restrict parallel_wq workers to pd->cpumask.pcpu. */
 458	cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
 459	err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
 460	free_workqueue_attrs(attrs);
 461
 462	return err;
 463}
 464
 465static void __init padata_mt_helper(struct work_struct *w)
 466{
 467	struct padata_work *pw = container_of(w, struct padata_work, pw_work);
 468	struct padata_mt_job_state *ps = pw->pw_data;
 469	struct padata_mt_job *job = ps->job;
 470	bool done;
 471
 472	spin_lock(&ps->lock);
 473
 474	while (job->size > 0) {
 475		unsigned long start, size, end;
 476
 477		start = job->start;
 478		/* So end is chunk size aligned if enough work remains. */
 479		size = roundup(start + 1, ps->chunk_size) - start;
 480		size = min(size, job->size);
 481		end = start + size;
 482
 483		job->start = end;
 484		job->size -= size;
 485
 486		spin_unlock(&ps->lock);
 487		job->thread_fn(start, end, job->fn_arg);
 488		spin_lock(&ps->lock);
 489	}
 490
 491	++ps->nworks_fini;
 492	done = (ps->nworks_fini == ps->nworks);
 493	spin_unlock(&ps->lock);
 494
 495	if (done)
 496		complete(&ps->completion);
 497}
 498
 499/**
 500 * padata_do_multithreaded - run a multithreaded job
 501 * @job: Description of the job.
 502 *
 503 * See the definition of struct padata_mt_job for more details.
 504 */
 505void __init padata_do_multithreaded(struct padata_mt_job *job)
 506{
 507	/* In case threads finish at different times. */
 508	static const unsigned long load_balance_factor = 4;
 509	struct padata_work my_work, *pw;
 510	struct padata_mt_job_state ps;
 511	LIST_HEAD(works);
 512	int nworks, nid;
 513	static atomic_t last_used_nid __initdata;
 514
 515	if (job->size == 0)
 516		return;
 517
 518	/* Ensure at least one thread when size < min_chunk. */
 519	nworks = max(job->size / max(job->min_chunk, job->align), 1ul);
 520	nworks = min(nworks, job->max_threads);
 521
 522	if (nworks == 1) {
 523		/* Single thread, no coordination needed, cut to the chase. */
 524		job->thread_fn(job->start, job->start + job->size, job->fn_arg);
 525		return;
 526	}
 527
 528	spin_lock_init(&ps.lock);
 529	init_completion(&ps.completion);
 530	ps.job	       = job;
 531	ps.nworks      = padata_work_alloc_mt(nworks, &ps, &works);
 532	ps.nworks_fini = 0;
 533
 534	/*
 535	 * Chunk size is the amount of work a helper does per call to the
 536	 * thread function.  Load balance large jobs between threads by
 537	 * increasing the number of chunks, guarantee at least the minimum
 538	 * chunk size from the caller, and honor the caller's alignment.
 539	 * Ensure chunk_size is at least 1 to prevent divide-by-0
 540	 * panic in padata_mt_helper().
 541	 */
 542	ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
 543	ps.chunk_size = max(ps.chunk_size, job->min_chunk);
 544	ps.chunk_size = max(ps.chunk_size, 1ul);
 545	ps.chunk_size = roundup(ps.chunk_size, job->align);
 546
 547	list_for_each_entry(pw, &works, pw_list)
 548		if (job->numa_aware) {
 549			int old_node = atomic_read(&last_used_nid);
 550
 551			do {
 552				nid = next_node_in(old_node, node_states[N_CPU]);
 553			} while (!atomic_try_cmpxchg(&last_used_nid, &old_node, nid));
 554			queue_work_node(nid, system_unbound_wq, &pw->pw_work);
 555		} else {
 556			queue_work(system_unbound_wq, &pw->pw_work);
 557		}
 558
 559	/* Use the current thread, which saves starting a workqueue worker. */
 560	padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
 561	padata_mt_helper(&my_work.pw_work);
 562
 563	/* Wait for all the helpers to finish. */
 564	wait_for_completion(&ps.completion);
 565
 566	destroy_work_on_stack(&my_work.pw_work);
 567	padata_works_free(&works);
 568}
 569
 570static void __padata_list_init(struct padata_list *pd_list)
 571{
 572	INIT_LIST_HEAD(&pd_list->list);
 573	spin_lock_init(&pd_list->lock);
 574}
 575
 576/* Initialize all percpu queues used by serial workers */
 577static void padata_init_squeues(struct parallel_data *pd)
 578{
 579	int cpu;
 580	struct padata_serial_queue *squeue;
 581
 582	for_each_cpu(cpu, pd->cpumask.cbcpu) {
 583		squeue = per_cpu_ptr(pd->squeue, cpu);
 584		squeue->pd = pd;
 585		__padata_list_init(&squeue->serial);
 586		INIT_WORK(&squeue->work, padata_serial_worker);
 587	}
 588}
 589
 590/* Initialize per-CPU reorder lists */
 591static void padata_init_reorder_list(struct parallel_data *pd)
 592{
 593	int cpu;
 594	struct padata_list *list;
 595
 596	for_each_cpu(cpu, pd->cpumask.pcpu) {
 597		list = per_cpu_ptr(pd->reorder_list, cpu);
 598		__padata_list_init(list);
 599	}
 600}
 601
 602/* Allocate and initialize the internal cpumask dependend resources. */
 603static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
 604{
 605	struct padata_instance *pinst = ps->pinst;
 606	struct parallel_data *pd;
 607
 608	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
 609	if (!pd)
 610		goto err;
 611
 612	pd->reorder_list = alloc_percpu(struct padata_list);
 613	if (!pd->reorder_list)
 614		goto err_free_pd;
 615
 616	pd->squeue = alloc_percpu(struct padata_serial_queue);
 617	if (!pd->squeue)
 618		goto err_free_reorder_list;
 619
 620	pd->ps = ps;
 621
 622	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
 623		goto err_free_squeue;
 624	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
 625		goto err_free_pcpu;
 626
 627	cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
 628	cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
 629
 630	padata_init_reorder_list(pd);
 631	padata_init_squeues(pd);
 632	pd->seq_nr = -1;
 633	refcount_set(&pd->refcnt, 1);
 634	spin_lock_init(&pd->lock);
 635	pd->cpu = cpumask_first(pd->cpumask.pcpu);
 636	INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
 637
 638	return pd;
 639
 640err_free_pcpu:
 641	free_cpumask_var(pd->cpumask.pcpu);
 642err_free_squeue:
 643	free_percpu(pd->squeue);
 644err_free_reorder_list:
 645	free_percpu(pd->reorder_list);
 646err_free_pd:
 647	kfree(pd);
 648err:
 649	return NULL;
 650}
 651
 652static void padata_free_pd(struct parallel_data *pd)
 653{
 654	free_cpumask_var(pd->cpumask.pcpu);
 655	free_cpumask_var(pd->cpumask.cbcpu);
 656	free_percpu(pd->reorder_list);
 657	free_percpu(pd->squeue);
 658	kfree(pd);
 659}
 660
 661static void __padata_start(struct padata_instance *pinst)
 662{
 663	pinst->flags |= PADATA_INIT;
 664}
 665
 666static void __padata_stop(struct padata_instance *pinst)
 667{
 668	if (!(pinst->flags & PADATA_INIT))
 669		return;
 670
 671	pinst->flags &= ~PADATA_INIT;
 672
 673	synchronize_rcu();
 674}
 675
 676/* Replace the internal control structure with a new one. */
 677static int padata_replace_one(struct padata_shell *ps)
 678{
 679	struct parallel_data *pd_new;
 680
 681	pd_new = padata_alloc_pd(ps);
 682	if (!pd_new)
 683		return -ENOMEM;
 684
 685	ps->opd = rcu_dereference_protected(ps->pd, 1);
 686	rcu_assign_pointer(ps->pd, pd_new);
 687
 688	return 0;
 689}
 690
 691static int padata_replace(struct padata_instance *pinst)
 692{
 693	struct padata_shell *ps;
 694	int err = 0;
 695
 696	pinst->flags |= PADATA_RESET;
 697
 698	list_for_each_entry(ps, &pinst->pslist, list) {
 699		err = padata_replace_one(ps);
 700		if (err)
 701			break;
 702	}
 703
 704	synchronize_rcu();
 705
 706	list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
 707		padata_put_pd(ps->opd);
 
 708
 709	pinst->flags &= ~PADATA_RESET;
 710
 711	return err;
 712}
 713
 714/* If cpumask contains no active cpu, we mark the instance as invalid. */
 715static bool padata_validate_cpumask(struct padata_instance *pinst,
 716				    const struct cpumask *cpumask)
 717{
 718	if (!cpumask_intersects(cpumask, cpu_online_mask)) {
 719		pinst->flags |= PADATA_INVALID;
 720		return false;
 721	}
 722
 723	pinst->flags &= ~PADATA_INVALID;
 724	return true;
 725}
 726
 727static int __padata_set_cpumasks(struct padata_instance *pinst,
 728				 cpumask_var_t pcpumask,
 729				 cpumask_var_t cbcpumask)
 730{
 731	int valid;
 732	int err;
 733
 734	valid = padata_validate_cpumask(pinst, pcpumask);
 735	if (!valid) {
 736		__padata_stop(pinst);
 737		goto out_replace;
 738	}
 739
 740	valid = padata_validate_cpumask(pinst, cbcpumask);
 741	if (!valid)
 742		__padata_stop(pinst);
 743
 744out_replace:
 745	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
 746	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
 747
 748	err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
 749
 750	if (valid)
 751		__padata_start(pinst);
 752
 753	return err;
 754}
 755
 756/**
 757 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
 758 *                      equivalent to @cpumask.
 759 * @pinst: padata instance
 760 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
 761 *                to parallel and serial cpumasks respectively.
 762 * @cpumask: the cpumask to use
 763 *
 764 * Return: 0 on success or negative error code
 765 */
 766int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
 767		       cpumask_var_t cpumask)
 768{
 769	struct cpumask *serial_mask, *parallel_mask;
 770	int err = -EINVAL;
 771
 772	cpus_read_lock();
 773	mutex_lock(&pinst->lock);
 774
 775	switch (cpumask_type) {
 776	case PADATA_CPU_PARALLEL:
 777		serial_mask = pinst->cpumask.cbcpu;
 778		parallel_mask = cpumask;
 779		break;
 780	case PADATA_CPU_SERIAL:
 781		parallel_mask = pinst->cpumask.pcpu;
 782		serial_mask = cpumask;
 783		break;
 784	default:
 785		 goto out;
 786	}
 787
 788	err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
 789
 790out:
 791	mutex_unlock(&pinst->lock);
 792	cpus_read_unlock();
 793
 794	return err;
 795}
 796EXPORT_SYMBOL(padata_set_cpumask);
 797
 798#ifdef CONFIG_HOTPLUG_CPU
 799
 800static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
 801{
 802	int err = 0;
 803
 804	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
 805		err = padata_replace(pinst);
 806
 807		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
 808		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
 809			__padata_start(pinst);
 810	}
 811
 812	return err;
 813}
 814
 815static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
 816{
 817	int err = 0;
 818
 819	if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
 820		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
 821		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
 822			__padata_stop(pinst);
 823
 824		err = padata_replace(pinst);
 825	}
 826
 827	return err;
 828}
 829
 830static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
 831{
 832	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
 833		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
 834}
 835
 836static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
 837{
 838	struct padata_instance *pinst;
 839	int ret;
 840
 841	pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
 842	if (!pinst_has_cpu(pinst, cpu))
 843		return 0;
 844
 845	mutex_lock(&pinst->lock);
 846	ret = __padata_add_cpu(pinst, cpu);
 847	mutex_unlock(&pinst->lock);
 848	return ret;
 849}
 850
 851static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
 852{
 853	struct padata_instance *pinst;
 854	int ret;
 855
 856	pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
 857	if (!pinst_has_cpu(pinst, cpu))
 858		return 0;
 859
 860	mutex_lock(&pinst->lock);
 861	ret = __padata_remove_cpu(pinst, cpu);
 862	mutex_unlock(&pinst->lock);
 863	return ret;
 864}
 865
 866static enum cpuhp_state hp_online;
 867#endif
 868
 869static void __padata_free(struct padata_instance *pinst)
 870{
 871#ifdef CONFIG_HOTPLUG_CPU
 872	cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
 873					    &pinst->cpu_dead_node);
 874	cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
 875#endif
 876
 877	WARN_ON(!list_empty(&pinst->pslist));
 878
 879	free_cpumask_var(pinst->cpumask.pcpu);
 880	free_cpumask_var(pinst->cpumask.cbcpu);
 881	destroy_workqueue(pinst->serial_wq);
 882	destroy_workqueue(pinst->parallel_wq);
 883	kfree(pinst);
 884}
 885
 886#define kobj2pinst(_kobj)					\
 887	container_of(_kobj, struct padata_instance, kobj)
 888#define attr2pentry(_attr)					\
 889	container_of(_attr, struct padata_sysfs_entry, attr)
 890
 891static void padata_sysfs_release(struct kobject *kobj)
 892{
 893	struct padata_instance *pinst = kobj2pinst(kobj);
 894	__padata_free(pinst);
 895}
 896
 897struct padata_sysfs_entry {
 898	struct attribute attr;
 899	ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
 900	ssize_t (*store)(struct padata_instance *, struct attribute *,
 901			 const char *, size_t);
 902};
 903
 904static ssize_t show_cpumask(struct padata_instance *pinst,
 905			    struct attribute *attr,  char *buf)
 906{
 907	struct cpumask *cpumask;
 908	ssize_t len;
 909
 910	mutex_lock(&pinst->lock);
 911	if (!strcmp(attr->name, "serial_cpumask"))
 912		cpumask = pinst->cpumask.cbcpu;
 913	else
 914		cpumask = pinst->cpumask.pcpu;
 915
 916	len = snprintf(buf, PAGE_SIZE, "%*pb\n",
 917		       nr_cpu_ids, cpumask_bits(cpumask));
 918	mutex_unlock(&pinst->lock);
 919	return len < PAGE_SIZE ? len : -EINVAL;
 920}
 921
 922static ssize_t store_cpumask(struct padata_instance *pinst,
 923			     struct attribute *attr,
 924			     const char *buf, size_t count)
 925{
 926	cpumask_var_t new_cpumask;
 927	ssize_t ret;
 928	int mask_type;
 929
 930	if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
 931		return -ENOMEM;
 932
 933	ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
 934			   nr_cpumask_bits);
 935	if (ret < 0)
 936		goto out;
 937
 938	mask_type = !strcmp(attr->name, "serial_cpumask") ?
 939		PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
 940	ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
 941	if (!ret)
 942		ret = count;
 943
 944out:
 945	free_cpumask_var(new_cpumask);
 946	return ret;
 947}
 948
 949#define PADATA_ATTR_RW(_name, _show_name, _store_name)		\
 950	static struct padata_sysfs_entry _name##_attr =		\
 951		__ATTR(_name, 0644, _show_name, _store_name)
 952#define PADATA_ATTR_RO(_name, _show_name)		\
 953	static struct padata_sysfs_entry _name##_attr = \
 954		__ATTR(_name, 0400, _show_name, NULL)
 955
 956PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
 957PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
 958
 959/*
 960 * Padata sysfs provides the following objects:
 961 * serial_cpumask   [RW] - cpumask for serial workers
 962 * parallel_cpumask [RW] - cpumask for parallel workers
 963 */
 964static struct attribute *padata_default_attrs[] = {
 965	&serial_cpumask_attr.attr,
 966	&parallel_cpumask_attr.attr,
 967	NULL,
 968};
 969ATTRIBUTE_GROUPS(padata_default);
 970
 971static ssize_t padata_sysfs_show(struct kobject *kobj,
 972				 struct attribute *attr, char *buf)
 973{
 974	struct padata_instance *pinst;
 975	struct padata_sysfs_entry *pentry;
 976	ssize_t ret = -EIO;
 977
 978	pinst = kobj2pinst(kobj);
 979	pentry = attr2pentry(attr);
 980	if (pentry->show)
 981		ret = pentry->show(pinst, attr, buf);
 982
 983	return ret;
 984}
 985
 986static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
 987				  const char *buf, size_t count)
 988{
 989	struct padata_instance *pinst;
 990	struct padata_sysfs_entry *pentry;
 991	ssize_t ret = -EIO;
 992
 993	pinst = kobj2pinst(kobj);
 994	pentry = attr2pentry(attr);
 995	if (pentry->store)
 996		ret = pentry->store(pinst, attr, buf, count);
 997
 998	return ret;
 999}
1000
1001static const struct sysfs_ops padata_sysfs_ops = {
1002	.show = padata_sysfs_show,
1003	.store = padata_sysfs_store,
1004};
1005
1006static const struct kobj_type padata_attr_type = {
1007	.sysfs_ops = &padata_sysfs_ops,
1008	.default_groups = padata_default_groups,
1009	.release = padata_sysfs_release,
1010};
1011
1012/**
1013 * padata_alloc - allocate and initialize a padata instance
1014 * @name: used to identify the instance
1015 *
1016 * Return: new instance on success, NULL on error
1017 */
1018struct padata_instance *padata_alloc(const char *name)
1019{
1020	struct padata_instance *pinst;
1021
1022	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1023	if (!pinst)
1024		goto err;
1025
1026	pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
1027					     name);
1028	if (!pinst->parallel_wq)
1029		goto err_free_inst;
1030
1031	cpus_read_lock();
1032
1033	pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
1034					   WQ_CPU_INTENSIVE, 1, name);
1035	if (!pinst->serial_wq)
1036		goto err_put_cpus;
1037
1038	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1039		goto err_free_serial_wq;
1040	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1041		free_cpumask_var(pinst->cpumask.pcpu);
1042		goto err_free_serial_wq;
1043	}
1044
1045	INIT_LIST_HEAD(&pinst->pslist);
1046
1047	cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
1048	cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
1049
1050	if (padata_setup_cpumasks(pinst))
1051		goto err_free_masks;
1052
1053	__padata_start(pinst);
1054
1055	kobject_init(&pinst->kobj, &padata_attr_type);
1056	mutex_init(&pinst->lock);
1057
1058#ifdef CONFIG_HOTPLUG_CPU
1059	cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1060						    &pinst->cpu_online_node);
1061	cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1062						    &pinst->cpu_dead_node);
1063#endif
1064
1065	cpus_read_unlock();
1066
1067	return pinst;
1068
1069err_free_masks:
1070	free_cpumask_var(pinst->cpumask.pcpu);
1071	free_cpumask_var(pinst->cpumask.cbcpu);
1072err_free_serial_wq:
1073	destroy_workqueue(pinst->serial_wq);
1074err_put_cpus:
1075	cpus_read_unlock();
1076	destroy_workqueue(pinst->parallel_wq);
1077err_free_inst:
1078	kfree(pinst);
1079err:
1080	return NULL;
1081}
1082EXPORT_SYMBOL(padata_alloc);
1083
1084/**
1085 * padata_free - free a padata instance
1086 *
1087 * @pinst: padata instance to free
1088 */
1089void padata_free(struct padata_instance *pinst)
1090{
1091	kobject_put(&pinst->kobj);
1092}
1093EXPORT_SYMBOL(padata_free);
1094
1095/**
1096 * padata_alloc_shell - Allocate and initialize padata shell.
1097 *
1098 * @pinst: Parent padata_instance object.
1099 *
1100 * Return: new shell on success, NULL on error
1101 */
1102struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1103{
1104	struct parallel_data *pd;
1105	struct padata_shell *ps;
1106
1107	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1108	if (!ps)
1109		goto out;
1110
1111	ps->pinst = pinst;
1112
1113	cpus_read_lock();
1114	pd = padata_alloc_pd(ps);
1115	cpus_read_unlock();
1116
1117	if (!pd)
1118		goto out_free_ps;
1119
1120	mutex_lock(&pinst->lock);
1121	RCU_INIT_POINTER(ps->pd, pd);
1122	list_add(&ps->list, &pinst->pslist);
1123	mutex_unlock(&pinst->lock);
1124
1125	return ps;
1126
1127out_free_ps:
1128	kfree(ps);
1129out:
1130	return NULL;
1131}
1132EXPORT_SYMBOL(padata_alloc_shell);
1133
1134/**
1135 * padata_free_shell - free a padata shell
1136 *
1137 * @ps: padata shell to free
1138 */
1139void padata_free_shell(struct padata_shell *ps)
1140{
1141	struct parallel_data *pd;
1142
1143	if (!ps)
1144		return;
1145
1146	/*
1147	 * Wait for all _do_serial calls to finish to avoid touching
1148	 * freed pd's and ps's.
1149	 */
1150	synchronize_rcu();
1151
1152	mutex_lock(&ps->pinst->lock);
1153	list_del(&ps->list);
1154	pd = rcu_dereference_protected(ps->pd, 1);
1155	padata_put_pd(pd);
1156	mutex_unlock(&ps->pinst->lock);
1157
1158	kfree(ps);
1159}
1160EXPORT_SYMBOL(padata_free_shell);
1161
1162void __init padata_init(void)
1163{
1164	unsigned int i, possible_cpus;
1165#ifdef CONFIG_HOTPLUG_CPU
1166	int ret;
1167
1168	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1169				      padata_cpu_online, NULL);
1170	if (ret < 0)
1171		goto err;
1172	hp_online = ret;
1173
1174	ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1175				      NULL, padata_cpu_dead);
1176	if (ret < 0)
1177		goto remove_online_state;
1178#endif
1179
1180	possible_cpus = num_possible_cpus();
1181	padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1182				     GFP_KERNEL);
1183	if (!padata_works)
1184		goto remove_dead_state;
1185
1186	for (i = 0; i < possible_cpus; ++i)
1187		list_add(&padata_works[i].pw_list, &padata_free_works);
1188
1189	return;
1190
1191remove_dead_state:
1192#ifdef CONFIG_HOTPLUG_CPU
1193	cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1194remove_online_state:
1195	cpuhp_remove_multi_state(hp_online);
1196err:
1197#endif
1198	pr_warn("padata: initialization failed\n");
1199}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * padata.c - generic interface to process data streams in parallel
   4 *
   5 * See Documentation/core-api/padata.rst for more information.
   6 *
   7 * Copyright (C) 2008, 2009 secunet Security Networks AG
   8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
   9 *
  10 * Copyright (c) 2020 Oracle and/or its affiliates.
  11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
  12 *
  13 * This program is free software; you can redistribute it and/or modify it
  14 * under the terms and conditions of the GNU General Public License,
  15 * version 2, as published by the Free Software Foundation.
  16 *
  17 * This program is distributed in the hope it will be useful, but WITHOUT
  18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  19 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  20 * more details.
  21 *
  22 * You should have received a copy of the GNU General Public License along with
  23 * this program; if not, write to the Free Software Foundation, Inc.,
  24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  25 */
  26
  27#include <linux/completion.h>
  28#include <linux/export.h>
  29#include <linux/cpumask.h>
  30#include <linux/err.h>
  31#include <linux/cpu.h>
  32#include <linux/padata.h>
  33#include <linux/mutex.h>
  34#include <linux/sched.h>
  35#include <linux/slab.h>
  36#include <linux/sysfs.h>
  37#include <linux/rcupdate.h>
  38
  39#define	PADATA_WORK_ONSTACK	1	/* Work's memory is on stack */
  40
  41struct padata_work {
  42	struct work_struct	pw_work;
  43	struct list_head	pw_list;  /* padata_free_works linkage */
  44	void			*pw_data;
  45};
  46
  47static DEFINE_SPINLOCK(padata_works_lock);
  48static struct padata_work *padata_works;
  49static LIST_HEAD(padata_free_works);
  50
  51struct padata_mt_job_state {
  52	spinlock_t		lock;
  53	struct completion	completion;
  54	struct padata_mt_job	*job;
  55	int			nworks;
  56	int			nworks_fini;
  57	unsigned long		chunk_size;
  58};
  59
  60static void padata_free_pd(struct parallel_data *pd);
  61static void __init padata_mt_helper(struct work_struct *work);
  62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
  64{
  65	int cpu, target_cpu;
  66
  67	target_cpu = cpumask_first(pd->cpumask.pcpu);
  68	for (cpu = 0; cpu < cpu_index; cpu++)
  69		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
  70
  71	return target_cpu;
  72}
  73
  74static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
  75{
  76	/*
  77	 * Hash the sequence numbers to the cpus by taking
  78	 * seq_nr mod. number of cpus in use.
  79	 */
  80	int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
  81
  82	return padata_index_to_cpu(pd, cpu_index);
  83}
  84
  85static struct padata_work *padata_work_alloc(void)
  86{
  87	struct padata_work *pw;
  88
  89	lockdep_assert_held(&padata_works_lock);
  90
  91	if (list_empty(&padata_free_works))
  92		return NULL;	/* No more work items allowed to be queued. */
  93
  94	pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
  95	list_del(&pw->pw_list);
  96	return pw;
  97}
  98
  99static void padata_work_init(struct padata_work *pw, work_func_t work_fn,
 100			     void *data, int flags)
 
 
 
 
 
 
 
 
 101{
 102	if (flags & PADATA_WORK_ONSTACK)
 103		INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
 104	else
 105		INIT_WORK(&pw->pw_work, work_fn);
 106	pw->pw_data = data;
 107}
 108
 109static int __init padata_work_alloc_mt(int nworks, void *data,
 110				       struct list_head *head)
 111{
 112	int i;
 113
 114	spin_lock(&padata_works_lock);
 115	/* Start at 1 because the current task participates in the job. */
 116	for (i = 1; i < nworks; ++i) {
 117		struct padata_work *pw = padata_work_alloc();
 118
 119		if (!pw)
 120			break;
 121		padata_work_init(pw, padata_mt_helper, data, 0);
 122		list_add(&pw->pw_list, head);
 123	}
 124	spin_unlock(&padata_works_lock);
 125
 126	return i;
 127}
 128
 129static void padata_work_free(struct padata_work *pw)
 130{
 131	lockdep_assert_held(&padata_works_lock);
 132	list_add(&pw->pw_list, &padata_free_works);
 133}
 134
 135static void __init padata_works_free(struct list_head *works)
 136{
 137	struct padata_work *cur, *next;
 138
 139	if (list_empty(works))
 140		return;
 141
 142	spin_lock(&padata_works_lock);
 143	list_for_each_entry_safe(cur, next, works, pw_list) {
 144		list_del(&cur->pw_list);
 145		padata_work_free(cur);
 146	}
 147	spin_unlock(&padata_works_lock);
 148}
 149
 150static void padata_parallel_worker(struct work_struct *parallel_work)
 151{
 152	struct padata_work *pw = container_of(parallel_work, struct padata_work,
 153					      pw_work);
 154	struct padata_priv *padata = pw->pw_data;
 155
 156	local_bh_disable();
 157	padata->parallel(padata);
 158	spin_lock(&padata_works_lock);
 159	padata_work_free(pw);
 160	spin_unlock(&padata_works_lock);
 161	local_bh_enable();
 162}
 163
 164/**
 165 * padata_do_parallel - padata parallelization function
 166 *
 167 * @ps: padatashell
 168 * @padata: object to be parallelized
 169 * @cb_cpu: pointer to the CPU that the serialization callback function should
 170 *          run on.  If it's not in the serial cpumask of @pinst
 171 *          (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
 172 *          none found, returns -EINVAL.
 173 *
 174 * The parallelization callback function will run with BHs off.
 175 * Note: Every object which is parallelized by padata_do_parallel
 176 * must be seen by padata_do_serial.
 177 *
 178 * Return: 0 on success or else negative error code.
 179 */
 180int padata_do_parallel(struct padata_shell *ps,
 181		       struct padata_priv *padata, int *cb_cpu)
 182{
 183	struct padata_instance *pinst = ps->pinst;
 184	int i, cpu, cpu_index, err;
 185	struct parallel_data *pd;
 186	struct padata_work *pw;
 187
 188	rcu_read_lock_bh();
 189
 190	pd = rcu_dereference_bh(ps->pd);
 191
 192	err = -EINVAL;
 193	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
 194		goto out;
 195
 196	if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
 197		if (!cpumask_weight(pd->cpumask.cbcpu))
 198			goto out;
 199
 200		/* Select an alternate fallback CPU and notify the caller. */
 201		cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
 202
 203		cpu = cpumask_first(pd->cpumask.cbcpu);
 204		for (i = 0; i < cpu_index; i++)
 205			cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
 206
 207		*cb_cpu = cpu;
 208	}
 209
 210	err =  -EBUSY;
 211	if ((pinst->flags & PADATA_RESET))
 212		goto out;
 213
 214	atomic_inc(&pd->refcnt);
 215	padata->pd = pd;
 216	padata->cb_cpu = *cb_cpu;
 217
 218	spin_lock(&padata_works_lock);
 219	padata->seq_nr = ++pd->seq_nr;
 220	pw = padata_work_alloc();
 221	spin_unlock(&padata_works_lock);
 222
 
 
 
 
 
 223	rcu_read_unlock_bh();
 224
 225	if (pw) {
 226		padata_work_init(pw, padata_parallel_worker, padata, 0);
 227		queue_work(pinst->parallel_wq, &pw->pw_work);
 228	} else {
 229		/* Maximum works limit exceeded, run in the current task. */
 230		padata->parallel(padata);
 231	}
 232
 233	return 0;
 234out:
 235	rcu_read_unlock_bh();
 236
 237	return err;
 238}
 239EXPORT_SYMBOL(padata_do_parallel);
 240
 241/*
 242 * padata_find_next - Find the next object that needs serialization.
 243 *
 244 * Return:
 245 * * A pointer to the control struct of the next object that needs
 246 *   serialization, if present in one of the percpu reorder queues.
 247 * * NULL, if the next object that needs serialization will
 248 *   be parallel processed by another cpu and is not yet present in
 249 *   the cpu's reorder queue.
 250 */
 251static struct padata_priv *padata_find_next(struct parallel_data *pd,
 252					    bool remove_object)
 253{
 254	struct padata_priv *padata;
 255	struct padata_list *reorder;
 256	int cpu = pd->cpu;
 257
 258	reorder = per_cpu_ptr(pd->reorder_list, cpu);
 259
 260	spin_lock(&reorder->lock);
 261	if (list_empty(&reorder->list)) {
 262		spin_unlock(&reorder->lock);
 263		return NULL;
 264	}
 265
 266	padata = list_entry(reorder->list.next, struct padata_priv, list);
 267
 268	/*
 269	 * Checks the rare case where two or more parallel jobs have hashed to
 270	 * the same CPU and one of the later ones finishes first.
 271	 */
 272	if (padata->seq_nr != pd->processed) {
 273		spin_unlock(&reorder->lock);
 274		return NULL;
 275	}
 276
 277	if (remove_object) {
 278		list_del_init(&padata->list);
 279		++pd->processed;
 280		pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
 281	}
 282
 283	spin_unlock(&reorder->lock);
 284	return padata;
 285}
 286
 287static void padata_reorder(struct parallel_data *pd)
 288{
 289	struct padata_instance *pinst = pd->ps->pinst;
 290	int cb_cpu;
 291	struct padata_priv *padata;
 292	struct padata_serial_queue *squeue;
 293	struct padata_list *reorder;
 294
 295	/*
 296	 * We need to ensure that only one cpu can work on dequeueing of
 297	 * the reorder queue the time. Calculating in which percpu reorder
 298	 * queue the next object will arrive takes some time. A spinlock
 299	 * would be highly contended. Also it is not clear in which order
 300	 * the objects arrive to the reorder queues. So a cpu could wait to
 301	 * get the lock just to notice that there is nothing to do at the
 302	 * moment. Therefore we use a trylock and let the holder of the lock
 303	 * care for all the objects enqueued during the holdtime of the lock.
 304	 */
 305	if (!spin_trylock_bh(&pd->lock))
 306		return;
 307
 308	while (1) {
 309		padata = padata_find_next(pd, true);
 310
 311		/*
 312		 * If the next object that needs serialization is parallel
 313		 * processed by another cpu and is still on it's way to the
 314		 * cpu's reorder queue, nothing to do for now.
 315		 */
 316		if (!padata)
 317			break;
 318
 319		cb_cpu = padata->cb_cpu;
 320		squeue = per_cpu_ptr(pd->squeue, cb_cpu);
 321
 322		spin_lock(&squeue->serial.lock);
 323		list_add_tail(&padata->list, &squeue->serial.list);
 324		spin_unlock(&squeue->serial.lock);
 325
 326		queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
 327	}
 328
 329	spin_unlock_bh(&pd->lock);
 330
 331	/*
 332	 * The next object that needs serialization might have arrived to
 333	 * the reorder queues in the meantime.
 334	 *
 335	 * Ensure reorder queue is read after pd->lock is dropped so we see
 336	 * new objects from another task in padata_do_serial.  Pairs with
 337	 * smp_mb in padata_do_serial.
 338	 */
 339	smp_mb();
 340
 341	reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
 342	if (!list_empty(&reorder->list) && padata_find_next(pd, false))
 
 
 
 
 
 343		queue_work(pinst->serial_wq, &pd->reorder_work);
 
 344}
 345
 346static void invoke_padata_reorder(struct work_struct *work)
 347{
 348	struct parallel_data *pd;
 349
 350	local_bh_disable();
 351	pd = container_of(work, struct parallel_data, reorder_work);
 352	padata_reorder(pd);
 353	local_bh_enable();
 
 
 354}
 355
 356static void padata_serial_worker(struct work_struct *serial_work)
 357{
 358	struct padata_serial_queue *squeue;
 359	struct parallel_data *pd;
 360	LIST_HEAD(local_list);
 361	int cnt;
 362
 363	local_bh_disable();
 364	squeue = container_of(serial_work, struct padata_serial_queue, work);
 365	pd = squeue->pd;
 366
 367	spin_lock(&squeue->serial.lock);
 368	list_replace_init(&squeue->serial.list, &local_list);
 369	spin_unlock(&squeue->serial.lock);
 370
 371	cnt = 0;
 372
 373	while (!list_empty(&local_list)) {
 374		struct padata_priv *padata;
 375
 376		padata = list_entry(local_list.next,
 377				    struct padata_priv, list);
 378
 379		list_del_init(&padata->list);
 380
 381		padata->serial(padata);
 382		cnt++;
 383	}
 384	local_bh_enable();
 385
 386	if (atomic_sub_and_test(cnt, &pd->refcnt))
 387		padata_free_pd(pd);
 388}
 389
 390/**
 391 * padata_do_serial - padata serialization function
 392 *
 393 * @padata: object to be serialized.
 394 *
 395 * padata_do_serial must be called for every parallelized object.
 396 * The serialization callback function will run with BHs off.
 397 */
 398void padata_do_serial(struct padata_priv *padata)
 399{
 400	struct parallel_data *pd = padata->pd;
 401	int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
 402	struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
 403	struct padata_priv *cur;
 
 404
 405	spin_lock(&reorder->lock);
 406	/* Sort in ascending order of sequence number. */
 407	list_for_each_entry_reverse(cur, &reorder->list, list)
 408		if (cur->seq_nr < padata->seq_nr)
 
 
 409			break;
 410	list_add(&padata->list, &cur->list);
 
 411	spin_unlock(&reorder->lock);
 412
 413	/*
 414	 * Ensure the addition to the reorder list is ordered correctly
 415	 * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
 416	 * in padata_reorder.
 417	 */
 418	smp_mb();
 419
 420	padata_reorder(pd);
 421}
 422EXPORT_SYMBOL(padata_do_serial);
 423
 424static int padata_setup_cpumasks(struct padata_instance *pinst)
 425{
 426	struct workqueue_attrs *attrs;
 427	int err;
 428
 429	attrs = alloc_workqueue_attrs();
 430	if (!attrs)
 431		return -ENOMEM;
 432
 433	/* Restrict parallel_wq workers to pd->cpumask.pcpu. */
 434	cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
 435	err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
 436	free_workqueue_attrs(attrs);
 437
 438	return err;
 439}
 440
 441static void __init padata_mt_helper(struct work_struct *w)
 442{
 443	struct padata_work *pw = container_of(w, struct padata_work, pw_work);
 444	struct padata_mt_job_state *ps = pw->pw_data;
 445	struct padata_mt_job *job = ps->job;
 446	bool done;
 447
 448	spin_lock(&ps->lock);
 449
 450	while (job->size > 0) {
 451		unsigned long start, size, end;
 452
 453		start = job->start;
 454		/* So end is chunk size aligned if enough work remains. */
 455		size = roundup(start + 1, ps->chunk_size) - start;
 456		size = min(size, job->size);
 457		end = start + size;
 458
 459		job->start = end;
 460		job->size -= size;
 461
 462		spin_unlock(&ps->lock);
 463		job->thread_fn(start, end, job->fn_arg);
 464		spin_lock(&ps->lock);
 465	}
 466
 467	++ps->nworks_fini;
 468	done = (ps->nworks_fini == ps->nworks);
 469	spin_unlock(&ps->lock);
 470
 471	if (done)
 472		complete(&ps->completion);
 473}
 474
 475/**
 476 * padata_do_multithreaded - run a multithreaded job
 477 * @job: Description of the job.
 478 *
 479 * See the definition of struct padata_mt_job for more details.
 480 */
 481void __init padata_do_multithreaded(struct padata_mt_job *job)
 482{
 483	/* In case threads finish at different times. */
 484	static const unsigned long load_balance_factor = 4;
 485	struct padata_work my_work, *pw;
 486	struct padata_mt_job_state ps;
 487	LIST_HEAD(works);
 488	int nworks;
 
 489
 490	if (job->size == 0)
 491		return;
 492
 493	/* Ensure at least one thread when size < min_chunk. */
 494	nworks = max(job->size / job->min_chunk, 1ul);
 495	nworks = min(nworks, job->max_threads);
 496
 497	if (nworks == 1) {
 498		/* Single thread, no coordination needed, cut to the chase. */
 499		job->thread_fn(job->start, job->start + job->size, job->fn_arg);
 500		return;
 501	}
 502
 503	spin_lock_init(&ps.lock);
 504	init_completion(&ps.completion);
 505	ps.job	       = job;
 506	ps.nworks      = padata_work_alloc_mt(nworks, &ps, &works);
 507	ps.nworks_fini = 0;
 508
 509	/*
 510	 * Chunk size is the amount of work a helper does per call to the
 511	 * thread function.  Load balance large jobs between threads by
 512	 * increasing the number of chunks, guarantee at least the minimum
 513	 * chunk size from the caller, and honor the caller's alignment.
 
 
 514	 */
 515	ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
 516	ps.chunk_size = max(ps.chunk_size, job->min_chunk);
 
 517	ps.chunk_size = roundup(ps.chunk_size, job->align);
 518
 519	list_for_each_entry(pw, &works, pw_list)
 520		queue_work(system_unbound_wq, &pw->pw_work);
 
 
 
 
 
 
 
 
 
 521
 522	/* Use the current thread, which saves starting a workqueue worker. */
 523	padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
 524	padata_mt_helper(&my_work.pw_work);
 525
 526	/* Wait for all the helpers to finish. */
 527	wait_for_completion(&ps.completion);
 528
 529	destroy_work_on_stack(&my_work.pw_work);
 530	padata_works_free(&works);
 531}
 532
 533static void __padata_list_init(struct padata_list *pd_list)
 534{
 535	INIT_LIST_HEAD(&pd_list->list);
 536	spin_lock_init(&pd_list->lock);
 537}
 538
 539/* Initialize all percpu queues used by serial workers */
 540static void padata_init_squeues(struct parallel_data *pd)
 541{
 542	int cpu;
 543	struct padata_serial_queue *squeue;
 544
 545	for_each_cpu(cpu, pd->cpumask.cbcpu) {
 546		squeue = per_cpu_ptr(pd->squeue, cpu);
 547		squeue->pd = pd;
 548		__padata_list_init(&squeue->serial);
 549		INIT_WORK(&squeue->work, padata_serial_worker);
 550	}
 551}
 552
 553/* Initialize per-CPU reorder lists */
 554static void padata_init_reorder_list(struct parallel_data *pd)
 555{
 556	int cpu;
 557	struct padata_list *list;
 558
 559	for_each_cpu(cpu, pd->cpumask.pcpu) {
 560		list = per_cpu_ptr(pd->reorder_list, cpu);
 561		__padata_list_init(list);
 562	}
 563}
 564
 565/* Allocate and initialize the internal cpumask dependend resources. */
 566static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
 567{
 568	struct padata_instance *pinst = ps->pinst;
 569	struct parallel_data *pd;
 570
 571	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
 572	if (!pd)
 573		goto err;
 574
 575	pd->reorder_list = alloc_percpu(struct padata_list);
 576	if (!pd->reorder_list)
 577		goto err_free_pd;
 578
 579	pd->squeue = alloc_percpu(struct padata_serial_queue);
 580	if (!pd->squeue)
 581		goto err_free_reorder_list;
 582
 583	pd->ps = ps;
 584
 585	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
 586		goto err_free_squeue;
 587	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
 588		goto err_free_pcpu;
 589
 590	cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
 591	cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
 592
 593	padata_init_reorder_list(pd);
 594	padata_init_squeues(pd);
 595	pd->seq_nr = -1;
 596	atomic_set(&pd->refcnt, 1);
 597	spin_lock_init(&pd->lock);
 598	pd->cpu = cpumask_first(pd->cpumask.pcpu);
 599	INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
 600
 601	return pd;
 602
 603err_free_pcpu:
 604	free_cpumask_var(pd->cpumask.pcpu);
 605err_free_squeue:
 606	free_percpu(pd->squeue);
 607err_free_reorder_list:
 608	free_percpu(pd->reorder_list);
 609err_free_pd:
 610	kfree(pd);
 611err:
 612	return NULL;
 613}
 614
 615static void padata_free_pd(struct parallel_data *pd)
 616{
 617	free_cpumask_var(pd->cpumask.pcpu);
 618	free_cpumask_var(pd->cpumask.cbcpu);
 619	free_percpu(pd->reorder_list);
 620	free_percpu(pd->squeue);
 621	kfree(pd);
 622}
 623
 624static void __padata_start(struct padata_instance *pinst)
 625{
 626	pinst->flags |= PADATA_INIT;
 627}
 628
 629static void __padata_stop(struct padata_instance *pinst)
 630{
 631	if (!(pinst->flags & PADATA_INIT))
 632		return;
 633
 634	pinst->flags &= ~PADATA_INIT;
 635
 636	synchronize_rcu();
 637}
 638
 639/* Replace the internal control structure with a new one. */
 640static int padata_replace_one(struct padata_shell *ps)
 641{
 642	struct parallel_data *pd_new;
 643
 644	pd_new = padata_alloc_pd(ps);
 645	if (!pd_new)
 646		return -ENOMEM;
 647
 648	ps->opd = rcu_dereference_protected(ps->pd, 1);
 649	rcu_assign_pointer(ps->pd, pd_new);
 650
 651	return 0;
 652}
 653
 654static int padata_replace(struct padata_instance *pinst)
 655{
 656	struct padata_shell *ps;
 657	int err = 0;
 658
 659	pinst->flags |= PADATA_RESET;
 660
 661	list_for_each_entry(ps, &pinst->pslist, list) {
 662		err = padata_replace_one(ps);
 663		if (err)
 664			break;
 665	}
 666
 667	synchronize_rcu();
 668
 669	list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
 670		if (atomic_dec_and_test(&ps->opd->refcnt))
 671			padata_free_pd(ps->opd);
 672
 673	pinst->flags &= ~PADATA_RESET;
 674
 675	return err;
 676}
 677
 678/* If cpumask contains no active cpu, we mark the instance as invalid. */
 679static bool padata_validate_cpumask(struct padata_instance *pinst,
 680				    const struct cpumask *cpumask)
 681{
 682	if (!cpumask_intersects(cpumask, cpu_online_mask)) {
 683		pinst->flags |= PADATA_INVALID;
 684		return false;
 685	}
 686
 687	pinst->flags &= ~PADATA_INVALID;
 688	return true;
 689}
 690
 691static int __padata_set_cpumasks(struct padata_instance *pinst,
 692				 cpumask_var_t pcpumask,
 693				 cpumask_var_t cbcpumask)
 694{
 695	int valid;
 696	int err;
 697
 698	valid = padata_validate_cpumask(pinst, pcpumask);
 699	if (!valid) {
 700		__padata_stop(pinst);
 701		goto out_replace;
 702	}
 703
 704	valid = padata_validate_cpumask(pinst, cbcpumask);
 705	if (!valid)
 706		__padata_stop(pinst);
 707
 708out_replace:
 709	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
 710	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
 711
 712	err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
 713
 714	if (valid)
 715		__padata_start(pinst);
 716
 717	return err;
 718}
 719
 720/**
 721 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
 722 *                      equivalent to @cpumask.
 723 * @pinst: padata instance
 724 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
 725 *                to parallel and serial cpumasks respectively.
 726 * @cpumask: the cpumask to use
 727 *
 728 * Return: 0 on success or negative error code
 729 */
 730int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
 731		       cpumask_var_t cpumask)
 732{
 733	struct cpumask *serial_mask, *parallel_mask;
 734	int err = -EINVAL;
 735
 736	get_online_cpus();
 737	mutex_lock(&pinst->lock);
 738
 739	switch (cpumask_type) {
 740	case PADATA_CPU_PARALLEL:
 741		serial_mask = pinst->cpumask.cbcpu;
 742		parallel_mask = cpumask;
 743		break;
 744	case PADATA_CPU_SERIAL:
 745		parallel_mask = pinst->cpumask.pcpu;
 746		serial_mask = cpumask;
 747		break;
 748	default:
 749		 goto out;
 750	}
 751
 752	err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
 753
 754out:
 755	mutex_unlock(&pinst->lock);
 756	put_online_cpus();
 757
 758	return err;
 759}
 760EXPORT_SYMBOL(padata_set_cpumask);
 761
 762#ifdef CONFIG_HOTPLUG_CPU
 763
 764static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
 765{
 766	int err = 0;
 767
 768	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
 769		err = padata_replace(pinst);
 770
 771		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
 772		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
 773			__padata_start(pinst);
 774	}
 775
 776	return err;
 777}
 778
 779static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
 780{
 781	int err = 0;
 782
 783	if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
 784		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
 785		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
 786			__padata_stop(pinst);
 787
 788		err = padata_replace(pinst);
 789	}
 790
 791	return err;
 792}
 793
 794static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
 795{
 796	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
 797		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
 798}
 799
 800static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
 801{
 802	struct padata_instance *pinst;
 803	int ret;
 804
 805	pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
 806	if (!pinst_has_cpu(pinst, cpu))
 807		return 0;
 808
 809	mutex_lock(&pinst->lock);
 810	ret = __padata_add_cpu(pinst, cpu);
 811	mutex_unlock(&pinst->lock);
 812	return ret;
 813}
 814
 815static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
 816{
 817	struct padata_instance *pinst;
 818	int ret;
 819
 820	pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
 821	if (!pinst_has_cpu(pinst, cpu))
 822		return 0;
 823
 824	mutex_lock(&pinst->lock);
 825	ret = __padata_remove_cpu(pinst, cpu);
 826	mutex_unlock(&pinst->lock);
 827	return ret;
 828}
 829
 830static enum cpuhp_state hp_online;
 831#endif
 832
 833static void __padata_free(struct padata_instance *pinst)
 834{
 835#ifdef CONFIG_HOTPLUG_CPU
 836	cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
 837					    &pinst->cpu_dead_node);
 838	cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
 839#endif
 840
 841	WARN_ON(!list_empty(&pinst->pslist));
 842
 843	free_cpumask_var(pinst->cpumask.pcpu);
 844	free_cpumask_var(pinst->cpumask.cbcpu);
 845	destroy_workqueue(pinst->serial_wq);
 846	destroy_workqueue(pinst->parallel_wq);
 847	kfree(pinst);
 848}
 849
 850#define kobj2pinst(_kobj)					\
 851	container_of(_kobj, struct padata_instance, kobj)
 852#define attr2pentry(_attr)					\
 853	container_of(_attr, struct padata_sysfs_entry, attr)
 854
 855static void padata_sysfs_release(struct kobject *kobj)
 856{
 857	struct padata_instance *pinst = kobj2pinst(kobj);
 858	__padata_free(pinst);
 859}
 860
 861struct padata_sysfs_entry {
 862	struct attribute attr;
 863	ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
 864	ssize_t (*store)(struct padata_instance *, struct attribute *,
 865			 const char *, size_t);
 866};
 867
 868static ssize_t show_cpumask(struct padata_instance *pinst,
 869			    struct attribute *attr,  char *buf)
 870{
 871	struct cpumask *cpumask;
 872	ssize_t len;
 873
 874	mutex_lock(&pinst->lock);
 875	if (!strcmp(attr->name, "serial_cpumask"))
 876		cpumask = pinst->cpumask.cbcpu;
 877	else
 878		cpumask = pinst->cpumask.pcpu;
 879
 880	len = snprintf(buf, PAGE_SIZE, "%*pb\n",
 881		       nr_cpu_ids, cpumask_bits(cpumask));
 882	mutex_unlock(&pinst->lock);
 883	return len < PAGE_SIZE ? len : -EINVAL;
 884}
 885
 886static ssize_t store_cpumask(struct padata_instance *pinst,
 887			     struct attribute *attr,
 888			     const char *buf, size_t count)
 889{
 890	cpumask_var_t new_cpumask;
 891	ssize_t ret;
 892	int mask_type;
 893
 894	if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
 895		return -ENOMEM;
 896
 897	ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
 898			   nr_cpumask_bits);
 899	if (ret < 0)
 900		goto out;
 901
 902	mask_type = !strcmp(attr->name, "serial_cpumask") ?
 903		PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
 904	ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
 905	if (!ret)
 906		ret = count;
 907
 908out:
 909	free_cpumask_var(new_cpumask);
 910	return ret;
 911}
 912
 913#define PADATA_ATTR_RW(_name, _show_name, _store_name)		\
 914	static struct padata_sysfs_entry _name##_attr =		\
 915		__ATTR(_name, 0644, _show_name, _store_name)
 916#define PADATA_ATTR_RO(_name, _show_name)		\
 917	static struct padata_sysfs_entry _name##_attr = \
 918		__ATTR(_name, 0400, _show_name, NULL)
 919
 920PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
 921PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
 922
 923/*
 924 * Padata sysfs provides the following objects:
 925 * serial_cpumask   [RW] - cpumask for serial workers
 926 * parallel_cpumask [RW] - cpumask for parallel workers
 927 */
 928static struct attribute *padata_default_attrs[] = {
 929	&serial_cpumask_attr.attr,
 930	&parallel_cpumask_attr.attr,
 931	NULL,
 932};
 933ATTRIBUTE_GROUPS(padata_default);
 934
 935static ssize_t padata_sysfs_show(struct kobject *kobj,
 936				 struct attribute *attr, char *buf)
 937{
 938	struct padata_instance *pinst;
 939	struct padata_sysfs_entry *pentry;
 940	ssize_t ret = -EIO;
 941
 942	pinst = kobj2pinst(kobj);
 943	pentry = attr2pentry(attr);
 944	if (pentry->show)
 945		ret = pentry->show(pinst, attr, buf);
 946
 947	return ret;
 948}
 949
 950static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
 951				  const char *buf, size_t count)
 952{
 953	struct padata_instance *pinst;
 954	struct padata_sysfs_entry *pentry;
 955	ssize_t ret = -EIO;
 956
 957	pinst = kobj2pinst(kobj);
 958	pentry = attr2pentry(attr);
 959	if (pentry->show)
 960		ret = pentry->store(pinst, attr, buf, count);
 961
 962	return ret;
 963}
 964
 965static const struct sysfs_ops padata_sysfs_ops = {
 966	.show = padata_sysfs_show,
 967	.store = padata_sysfs_store,
 968};
 969
 970static struct kobj_type padata_attr_type = {
 971	.sysfs_ops = &padata_sysfs_ops,
 972	.default_groups = padata_default_groups,
 973	.release = padata_sysfs_release,
 974};
 975
 976/**
 977 * padata_alloc - allocate and initialize a padata instance
 978 * @name: used to identify the instance
 979 *
 980 * Return: new instance on success, NULL on error
 981 */
 982struct padata_instance *padata_alloc(const char *name)
 983{
 984	struct padata_instance *pinst;
 985
 986	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
 987	if (!pinst)
 988		goto err;
 989
 990	pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
 991					     name);
 992	if (!pinst->parallel_wq)
 993		goto err_free_inst;
 994
 995	get_online_cpus();
 996
 997	pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
 998					   WQ_CPU_INTENSIVE, 1, name);
 999	if (!pinst->serial_wq)
1000		goto err_put_cpus;
1001
1002	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1003		goto err_free_serial_wq;
1004	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1005		free_cpumask_var(pinst->cpumask.pcpu);
1006		goto err_free_serial_wq;
1007	}
1008
1009	INIT_LIST_HEAD(&pinst->pslist);
1010
1011	cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
1012	cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
1013
1014	if (padata_setup_cpumasks(pinst))
1015		goto err_free_masks;
1016
1017	__padata_start(pinst);
1018
1019	kobject_init(&pinst->kobj, &padata_attr_type);
1020	mutex_init(&pinst->lock);
1021
1022#ifdef CONFIG_HOTPLUG_CPU
1023	cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1024						    &pinst->cpu_online_node);
1025	cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1026						    &pinst->cpu_dead_node);
1027#endif
1028
1029	put_online_cpus();
1030
1031	return pinst;
1032
1033err_free_masks:
1034	free_cpumask_var(pinst->cpumask.pcpu);
1035	free_cpumask_var(pinst->cpumask.cbcpu);
1036err_free_serial_wq:
1037	destroy_workqueue(pinst->serial_wq);
1038err_put_cpus:
1039	put_online_cpus();
1040	destroy_workqueue(pinst->parallel_wq);
1041err_free_inst:
1042	kfree(pinst);
1043err:
1044	return NULL;
1045}
1046EXPORT_SYMBOL(padata_alloc);
1047
1048/**
1049 * padata_free - free a padata instance
1050 *
1051 * @pinst: padata instance to free
1052 */
1053void padata_free(struct padata_instance *pinst)
1054{
1055	kobject_put(&pinst->kobj);
1056}
1057EXPORT_SYMBOL(padata_free);
1058
1059/**
1060 * padata_alloc_shell - Allocate and initialize padata shell.
1061 *
1062 * @pinst: Parent padata_instance object.
1063 *
1064 * Return: new shell on success, NULL on error
1065 */
1066struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1067{
1068	struct parallel_data *pd;
1069	struct padata_shell *ps;
1070
1071	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1072	if (!ps)
1073		goto out;
1074
1075	ps->pinst = pinst;
1076
1077	get_online_cpus();
1078	pd = padata_alloc_pd(ps);
1079	put_online_cpus();
1080
1081	if (!pd)
1082		goto out_free_ps;
1083
1084	mutex_lock(&pinst->lock);
1085	RCU_INIT_POINTER(ps->pd, pd);
1086	list_add(&ps->list, &pinst->pslist);
1087	mutex_unlock(&pinst->lock);
1088
1089	return ps;
1090
1091out_free_ps:
1092	kfree(ps);
1093out:
1094	return NULL;
1095}
1096EXPORT_SYMBOL(padata_alloc_shell);
1097
1098/**
1099 * padata_free_shell - free a padata shell
1100 *
1101 * @ps: padata shell to free
1102 */
1103void padata_free_shell(struct padata_shell *ps)
1104{
 
 
1105	if (!ps)
1106		return;
1107
 
 
 
 
 
 
1108	mutex_lock(&ps->pinst->lock);
1109	list_del(&ps->list);
1110	padata_free_pd(rcu_dereference_protected(ps->pd, 1));
 
1111	mutex_unlock(&ps->pinst->lock);
1112
1113	kfree(ps);
1114}
1115EXPORT_SYMBOL(padata_free_shell);
1116
1117void __init padata_init(void)
1118{
1119	unsigned int i, possible_cpus;
1120#ifdef CONFIG_HOTPLUG_CPU
1121	int ret;
1122
1123	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1124				      padata_cpu_online, NULL);
1125	if (ret < 0)
1126		goto err;
1127	hp_online = ret;
1128
1129	ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1130				      NULL, padata_cpu_dead);
1131	if (ret < 0)
1132		goto remove_online_state;
1133#endif
1134
1135	possible_cpus = num_possible_cpus();
1136	padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1137				     GFP_KERNEL);
1138	if (!padata_works)
1139		goto remove_dead_state;
1140
1141	for (i = 0; i < possible_cpus; ++i)
1142		list_add(&padata_works[i].pw_list, &padata_free_works);
1143
1144	return;
1145
1146remove_dead_state:
1147#ifdef CONFIG_HOTPLUG_CPU
1148	cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1149remove_online_state:
1150	cpuhp_remove_multi_state(hp_online);
1151err:
1152#endif
1153	pr_warn("padata: initialization failed\n");
1154}