Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
  1/*
  2	kmod, the new module loader (replaces kerneld)
  3	Kirk Petersen
  4
  5	Reorganized not to be a daemon by Adam Richter, with guidance
  6	from Greg Zornetzer.
  7
  8	Modified to avoid chroot and file sharing problems.
  9	Mikael Pettersson
 10
 11	Limit the concurrent number of kmod modprobes to catch loops from
 12	"modprobe needs a service that is in a module".
 13	Keith Owens <kaos@ocs.com.au> December 1999
 14
 15	Unblock all signals when we exec a usermode process.
 16	Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
 17
 18	call_usermodehelper wait flag, and remove exec_usermodehelper.
 19	Rusty Russell <rusty@rustcorp.com.au>  Jan 2003
 20*/
 21#include <linux/module.h>
 22#include <linux/sched.h>
 23#include <linux/syscalls.h>
 24#include <linux/unistd.h>
 25#include <linux/kmod.h>
 26#include <linux/slab.h>
 27#include <linux/completion.h>
 28#include <linux/cred.h>
 29#include <linux/file.h>
 30#include <linux/fdtable.h>
 31#include <linux/workqueue.h>
 32#include <linux/security.h>
 33#include <linux/mount.h>
 34#include <linux/kernel.h>
 35#include <linux/init.h>
 36#include <linux/resource.h>
 37#include <linux/notifier.h>
 38#include <linux/suspend.h>
 39#include <linux/rwsem.h>
 40#include <linux/ptrace.h>
 41#include <linux/async.h>
 42#include <asm/uaccess.h>
 43
 44#include <trace/events/module.h>
 45
 46extern int max_threads;
 47
 48static struct workqueue_struct *khelper_wq;
 49
 50/*
 51 * kmod_thread_locker is used for deadlock avoidance.  There is no explicit
 52 * locking to protect this global - it is private to the singleton khelper
 53 * thread and should only ever be modified by that thread.
 54 */
 55static const struct task_struct *kmod_thread_locker;
 56
 57#define CAP_BSET	(void *)1
 58#define CAP_PI		(void *)2
 59
 60static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
 61static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
 62static DEFINE_SPINLOCK(umh_sysctl_lock);
 63static DECLARE_RWSEM(umhelper_sem);
 64
 65#ifdef CONFIG_MODULES
 66
 67/*
 68	modprobe_path is set via /proc/sys.
 69*/
 70char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
 71
 72static void free_modprobe_argv(struct subprocess_info *info)
 73{
 74	kfree(info->argv[3]); /* check call_modprobe() */
 75	kfree(info->argv);
 76}
 77
 78static int call_modprobe(char *module_name, int wait)
 79{
 80	struct subprocess_info *info;
 81	static char *envp[] = {
 82		"HOME=/",
 83		"TERM=linux",
 84		"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
 85		NULL
 86	};
 87
 88	char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
 89	if (!argv)
 90		goto out;
 91
 92	module_name = kstrdup(module_name, GFP_KERNEL);
 93	if (!module_name)
 94		goto free_argv;
 95
 96	argv[0] = modprobe_path;
 97	argv[1] = "-q";
 98	argv[2] = "--";
 99	argv[3] = module_name;	/* check free_modprobe_argv() */
100	argv[4] = NULL;
101
102	info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
103					 NULL, free_modprobe_argv, NULL);
104	if (!info)
105		goto free_module_name;
106
107	return call_usermodehelper_exec(info, wait | UMH_KILLABLE);
108
109free_module_name:
110	kfree(module_name);
111free_argv:
112	kfree(argv);
113out:
114	return -ENOMEM;
115}
116
117/**
118 * __request_module - try to load a kernel module
119 * @wait: wait (or not) for the operation to complete
120 * @fmt: printf style format string for the name of the module
121 * @...: arguments as specified in the format string
122 *
123 * Load a module using the user mode module loader. The function returns
124 * zero on success or a negative errno code on failure. Note that a
125 * successful module load does not mean the module did not then unload
126 * and exit on an error of its own. Callers must check that the service
127 * they requested is now available not blindly invoke it.
128 *
129 * If module auto-loading support is disabled then this function
130 * becomes a no-operation.
131 */
132int __request_module(bool wait, const char *fmt, ...)
133{
134	va_list args;
135	char module_name[MODULE_NAME_LEN];
136	unsigned int max_modprobes;
137	int ret;
138	static atomic_t kmod_concurrent = ATOMIC_INIT(0);
139#define MAX_KMOD_CONCURRENT 50	/* Completely arbitrary value - KAO */
140	static int kmod_loop_msg;
141
142	/*
143	 * We don't allow synchronous module loading from async.  Module
144	 * init may invoke async_synchronize_full() which will end up
145	 * waiting for this task which already is waiting for the module
146	 * loading to complete, leading to a deadlock.
147	 */
148	WARN_ON_ONCE(wait && current_is_async());
149
150	if (!modprobe_path[0])
151		return 0;
152
153	va_start(args, fmt);
154	ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
155	va_end(args);
156	if (ret >= MODULE_NAME_LEN)
157		return -ENAMETOOLONG;
158
159	ret = security_kernel_module_request(module_name);
160	if (ret)
161		return ret;
162
163	/* If modprobe needs a service that is in a module, we get a recursive
164	 * loop.  Limit the number of running kmod threads to max_threads/2 or
165	 * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
166	 * would be to run the parents of this process, counting how many times
167	 * kmod was invoked.  That would mean accessing the internals of the
168	 * process tables to get the command line, proc_pid_cmdline is static
169	 * and it is not worth changing the proc code just to handle this case. 
170	 * KAO.
171	 *
172	 * "trace the ppid" is simple, but will fail if someone's
173	 * parent exits.  I think this is as good as it gets. --RR
174	 */
175	max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT);
176	atomic_inc(&kmod_concurrent);
177	if (atomic_read(&kmod_concurrent) > max_modprobes) {
178		/* We may be blaming an innocent here, but unlikely */
179		if (kmod_loop_msg < 5) {
180			printk(KERN_ERR
181			       "request_module: runaway loop modprobe %s\n",
182			       module_name);
183			kmod_loop_msg++;
184		}
185		atomic_dec(&kmod_concurrent);
186		return -ENOMEM;
187	}
188
189	trace_module_request(module_name, wait, _RET_IP_);
190
191	ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
192
193	atomic_dec(&kmod_concurrent);
194	return ret;
195}
196EXPORT_SYMBOL(__request_module);
197#endif /* CONFIG_MODULES */
198
199/*
200 * This is the task which runs the usermode application
201 */
202static int ____call_usermodehelper(void *data)
203{
204	struct subprocess_info *sub_info = data;
205	struct cred *new;
206	int retval;
207
208	spin_lock_irq(&current->sighand->siglock);
209	flush_signal_handlers(current, 1);
210	spin_unlock_irq(&current->sighand->siglock);
211
212	/* We can run anywhere, unlike our parent keventd(). */
213	set_cpus_allowed_ptr(current, cpu_all_mask);
214
215	/*
216	 * Our parent is keventd, which runs with elevated scheduling priority.
217	 * Avoid propagating that into the userspace child.
218	 */
219	set_user_nice(current, 0);
220
221	retval = -ENOMEM;
222	new = prepare_kernel_cred(current);
223	if (!new)
224		goto fail;
225
226	spin_lock(&umh_sysctl_lock);
227	new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
228	new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
229					     new->cap_inheritable);
230	spin_unlock(&umh_sysctl_lock);
231
232	if (sub_info->init) {
233		retval = sub_info->init(sub_info, new);
234		if (retval) {
235			abort_creds(new);
236			goto fail;
237		}
238	}
239
240	commit_creds(new);
241
242	retval = do_execve(getname_kernel(sub_info->path),
243			   (const char __user *const __user *)sub_info->argv,
244			   (const char __user *const __user *)sub_info->envp);
245	if (!retval)
246		return 0;
247
248	/* Exec failed? */
249fail:
250	sub_info->retval = retval;
251	do_exit(0);
252}
253
254static int call_helper(void *data)
255{
256	/* Worker thread started blocking khelper thread. */
257	kmod_thread_locker = current;
258	return ____call_usermodehelper(data);
259}
260
261static void call_usermodehelper_freeinfo(struct subprocess_info *info)
262{
263	if (info->cleanup)
264		(*info->cleanup)(info);
265	kfree(info);
266}
267
268static void umh_complete(struct subprocess_info *sub_info)
269{
270	struct completion *comp = xchg(&sub_info->complete, NULL);
271	/*
272	 * See call_usermodehelper_exec(). If xchg() returns NULL
273	 * we own sub_info, the UMH_KILLABLE caller has gone away.
274	 */
275	if (comp)
276		complete(comp);
277	else
278		call_usermodehelper_freeinfo(sub_info);
279}
280
281/* Keventd can't block, but this (a child) can. */
282static int wait_for_helper(void *data)
283{
284	struct subprocess_info *sub_info = data;
285	pid_t pid;
286
287	/* If SIGCLD is ignored sys_wait4 won't populate the status. */
288	spin_lock_irq(&current->sighand->siglock);
289	current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL;
290	spin_unlock_irq(&current->sighand->siglock);
291
292	pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
293	if (pid < 0) {
294		sub_info->retval = pid;
295	} else {
296		int ret = -ECHILD;
297		/*
298		 * Normally it is bogus to call wait4() from in-kernel because
299		 * wait4() wants to write the exit code to a userspace address.
300		 * But wait_for_helper() always runs as keventd, and put_user()
301		 * to a kernel address works OK for kernel threads, due to their
302		 * having an mm_segment_t which spans the entire address space.
303		 *
304		 * Thus the __user pointer cast is valid here.
305		 */
306		sys_wait4(pid, (int __user *)&ret, 0, NULL);
307
308		/*
309		 * If ret is 0, either ____call_usermodehelper failed and the
310		 * real error code is already in sub_info->retval or
311		 * sub_info->retval is 0 anyway, so don't mess with it then.
312		 */
313		if (ret)
314			sub_info->retval = ret;
315	}
316
317	umh_complete(sub_info);
318	do_exit(0);
319}
320
321/* This is run by khelper thread  */
322static void __call_usermodehelper(struct work_struct *work)
323{
324	struct subprocess_info *sub_info =
325		container_of(work, struct subprocess_info, work);
326	int wait = sub_info->wait & ~UMH_KILLABLE;
327	pid_t pid;
328
329	/* CLONE_VFORK: wait until the usermode helper has execve'd
330	 * successfully We need the data structures to stay around
331	 * until that is done.  */
332	if (wait == UMH_WAIT_PROC)
333		pid = kernel_thread(wait_for_helper, sub_info,
334				    CLONE_FS | CLONE_FILES | SIGCHLD);
335	else {
336		pid = kernel_thread(call_helper, sub_info,
337				    CLONE_VFORK | SIGCHLD);
338		/* Worker thread stopped blocking khelper thread. */
339		kmod_thread_locker = NULL;
340	}
341
342	switch (wait) {
343	case UMH_NO_WAIT:
344		call_usermodehelper_freeinfo(sub_info);
345		break;
346
347	case UMH_WAIT_PROC:
348		if (pid > 0)
349			break;
350		/* FALLTHROUGH */
351	case UMH_WAIT_EXEC:
352		if (pid < 0)
353			sub_info->retval = pid;
354		umh_complete(sub_info);
355	}
356}
357
358/*
359 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
360 * (used for preventing user land processes from being created after the user
361 * land has been frozen during a system-wide hibernation or suspend operation).
362 * Should always be manipulated under umhelper_sem acquired for write.
363 */
364static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
365
366/* Number of helpers running */
367static atomic_t running_helpers = ATOMIC_INIT(0);
368
369/*
370 * Wait queue head used by usermodehelper_disable() to wait for all running
371 * helpers to finish.
372 */
373static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
374
375/*
376 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
377 * to become 'false'.
378 */
379static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
380
381/*
382 * Time to wait for running_helpers to become zero before the setting of
383 * usermodehelper_disabled in usermodehelper_disable() fails
384 */
385#define RUNNING_HELPERS_TIMEOUT	(5 * HZ)
386
387int usermodehelper_read_trylock(void)
388{
389	DEFINE_WAIT(wait);
390	int ret = 0;
391
392	down_read(&umhelper_sem);
393	for (;;) {
394		prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
395				TASK_INTERRUPTIBLE);
396		if (!usermodehelper_disabled)
397			break;
398
399		if (usermodehelper_disabled == UMH_DISABLED)
400			ret = -EAGAIN;
401
402		up_read(&umhelper_sem);
403
404		if (ret)
405			break;
406
407		schedule();
408		try_to_freeze();
409
410		down_read(&umhelper_sem);
411	}
412	finish_wait(&usermodehelper_disabled_waitq, &wait);
413	return ret;
414}
415EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
416
417long usermodehelper_read_lock_wait(long timeout)
418{
419	DEFINE_WAIT(wait);
420
421	if (timeout < 0)
422		return -EINVAL;
423
424	down_read(&umhelper_sem);
425	for (;;) {
426		prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
427				TASK_UNINTERRUPTIBLE);
428		if (!usermodehelper_disabled)
429			break;
430
431		up_read(&umhelper_sem);
432
433		timeout = schedule_timeout(timeout);
434		if (!timeout)
435			break;
436
437		down_read(&umhelper_sem);
438	}
439	finish_wait(&usermodehelper_disabled_waitq, &wait);
440	return timeout;
441}
442EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
443
444void usermodehelper_read_unlock(void)
445{
446	up_read(&umhelper_sem);
447}
448EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
449
450/**
451 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
452 * @depth: New value to assign to usermodehelper_disabled.
453 *
454 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
455 * writing) and wakeup tasks waiting for it to change.
456 */
457void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
458{
459	down_write(&umhelper_sem);
460	usermodehelper_disabled = depth;
461	wake_up(&usermodehelper_disabled_waitq);
462	up_write(&umhelper_sem);
463}
464
465/**
466 * __usermodehelper_disable - Prevent new helpers from being started.
467 * @depth: New value to assign to usermodehelper_disabled.
468 *
469 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
470 */
471int __usermodehelper_disable(enum umh_disable_depth depth)
472{
473	long retval;
474
475	if (!depth)
476		return -EINVAL;
477
478	down_write(&umhelper_sem);
479	usermodehelper_disabled = depth;
480	up_write(&umhelper_sem);
481
482	/*
483	 * From now on call_usermodehelper_exec() won't start any new
484	 * helpers, so it is sufficient if running_helpers turns out to
485	 * be zero at one point (it may be increased later, but that
486	 * doesn't matter).
487	 */
488	retval = wait_event_timeout(running_helpers_waitq,
489					atomic_read(&running_helpers) == 0,
490					RUNNING_HELPERS_TIMEOUT);
491	if (retval)
492		return 0;
493
494	__usermodehelper_set_disable_depth(UMH_ENABLED);
495	return -EAGAIN;
496}
497
498static void helper_lock(void)
499{
500	atomic_inc(&running_helpers);
501	smp_mb__after_atomic_inc();
502}
503
504static void helper_unlock(void)
505{
506	if (atomic_dec_and_test(&running_helpers))
507		wake_up(&running_helpers_waitq);
508}
509
510/**
511 * call_usermodehelper_setup - prepare to call a usermode helper
512 * @path: path to usermode executable
513 * @argv: arg vector for process
514 * @envp: environment for process
515 * @gfp_mask: gfp mask for memory allocation
516 * @cleanup: a cleanup function
517 * @init: an init function
518 * @data: arbitrary context sensitive data
519 *
520 * Returns either %NULL on allocation failure, or a subprocess_info
521 * structure.  This should be passed to call_usermodehelper_exec to
522 * exec the process and free the structure.
523 *
524 * The init function is used to customize the helper process prior to
525 * exec.  A non-zero return code causes the process to error out, exit,
526 * and return the failure to the calling process
527 *
528 * The cleanup function is just before ethe subprocess_info is about to
529 * be freed.  This can be used for freeing the argv and envp.  The
530 * Function must be runnable in either a process context or the
531 * context in which call_usermodehelper_exec is called.
532 */
533struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
534		char **envp, gfp_t gfp_mask,
535		int (*init)(struct subprocess_info *info, struct cred *new),
536		void (*cleanup)(struct subprocess_info *info),
537		void *data)
538{
539	struct subprocess_info *sub_info;
540	sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
541	if (!sub_info)
542		goto out;
543
544	INIT_WORK(&sub_info->work, __call_usermodehelper);
545	sub_info->path = path;
546	sub_info->argv = argv;
547	sub_info->envp = envp;
548
549	sub_info->cleanup = cleanup;
550	sub_info->init = init;
551	sub_info->data = data;
552  out:
553	return sub_info;
554}
555EXPORT_SYMBOL(call_usermodehelper_setup);
556
557/**
558 * call_usermodehelper_exec - start a usermode application
559 * @sub_info: information about the subprocessa
560 * @wait: wait for the application to finish and return status.
561 *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
562 *        when the program couldn't be exec'ed. This makes it safe to call
563 *        from interrupt context.
564 *
565 * Runs a user-space application.  The application is started
566 * asynchronously if wait is not set, and runs as a child of keventd.
567 * (ie. it runs with full root capabilities).
568 */
569int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
570{
571	DECLARE_COMPLETION_ONSTACK(done);
572	int retval = 0;
573
574	if (!sub_info->path) {
575		call_usermodehelper_freeinfo(sub_info);
576		return -EINVAL;
577	}
578	helper_lock();
579	if (!khelper_wq || usermodehelper_disabled) {
580		retval = -EBUSY;
581		goto out;
582	}
583	/*
584	 * Worker thread must not wait for khelper thread at below
585	 * wait_for_completion() if the thread was created with CLONE_VFORK
586	 * flag, for khelper thread is already waiting for the thread at
587	 * wait_for_completion() in do_fork().
588	 */
589	if (wait != UMH_NO_WAIT && current == kmod_thread_locker) {
590		retval = -EBUSY;
591		goto out;
592	}
593
594	sub_info->complete = &done;
595	sub_info->wait = wait;
596
597	queue_work(khelper_wq, &sub_info->work);
598	if (wait == UMH_NO_WAIT)	/* task has freed sub_info */
599		goto unlock;
600
601	if (wait & UMH_KILLABLE) {
602		retval = wait_for_completion_killable(&done);
603		if (!retval)
604			goto wait_done;
605
606		/* umh_complete() will see NULL and free sub_info */
607		if (xchg(&sub_info->complete, NULL))
608			goto unlock;
609		/* fallthrough, umh_complete() was already called */
610	}
611
612	wait_for_completion(&done);
613wait_done:
614	retval = sub_info->retval;
615out:
616	call_usermodehelper_freeinfo(sub_info);
617unlock:
618	helper_unlock();
619	return retval;
620}
621EXPORT_SYMBOL(call_usermodehelper_exec);
622
623/**
624 * call_usermodehelper() - prepare and start a usermode application
625 * @path: path to usermode executable
626 * @argv: arg vector for process
627 * @envp: environment for process
628 * @wait: wait for the application to finish and return status.
629 *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
630 *        when the program couldn't be exec'ed. This makes it safe to call
631 *        from interrupt context.
632 *
633 * This function is the equivalent to use call_usermodehelper_setup() and
634 * call_usermodehelper_exec().
635 */
636int call_usermodehelper(char *path, char **argv, char **envp, int wait)
637{
638	struct subprocess_info *info;
639	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
640
641	info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
642					 NULL, NULL, NULL);
643	if (info == NULL)
644		return -ENOMEM;
645
646	return call_usermodehelper_exec(info, wait);
647}
648EXPORT_SYMBOL(call_usermodehelper);
649
650static int proc_cap_handler(struct ctl_table *table, int write,
651			 void __user *buffer, size_t *lenp, loff_t *ppos)
652{
653	struct ctl_table t;
654	unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
655	kernel_cap_t new_cap;
656	int err, i;
657
658	if (write && (!capable(CAP_SETPCAP) ||
659		      !capable(CAP_SYS_MODULE)))
660		return -EPERM;
661
662	/*
663	 * convert from the global kernel_cap_t to the ulong array to print to
664	 * userspace if this is a read.
665	 */
666	spin_lock(&umh_sysctl_lock);
667	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
668		if (table->data == CAP_BSET)
669			cap_array[i] = usermodehelper_bset.cap[i];
670		else if (table->data == CAP_PI)
671			cap_array[i] = usermodehelper_inheritable.cap[i];
672		else
673			BUG();
674	}
675	spin_unlock(&umh_sysctl_lock);
676
677	t = *table;
678	t.data = &cap_array;
679
680	/*
681	 * actually read or write and array of ulongs from userspace.  Remember
682	 * these are least significant 32 bits first
683	 */
684	err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
685	if (err < 0)
686		return err;
687
688	/*
689	 * convert from the sysctl array of ulongs to the kernel_cap_t
690	 * internal representation
691	 */
692	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
693		new_cap.cap[i] = cap_array[i];
694
695	/*
696	 * Drop everything not in the new_cap (but don't add things)
697	 */
698	spin_lock(&umh_sysctl_lock);
699	if (write) {
700		if (table->data == CAP_BSET)
701			usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
702		if (table->data == CAP_PI)
703			usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
704	}
705	spin_unlock(&umh_sysctl_lock);
706
707	return 0;
708}
709
710struct ctl_table usermodehelper_table[] = {
711	{
712		.procname	= "bset",
713		.data		= CAP_BSET,
714		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
715		.mode		= 0600,
716		.proc_handler	= proc_cap_handler,
717	},
718	{
719		.procname	= "inheritable",
720		.data		= CAP_PI,
721		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
722		.mode		= 0600,
723		.proc_handler	= proc_cap_handler,
724	},
725	{ }
726};
727
728void __init usermodehelper_init(void)
729{
730	khelper_wq = create_singlethread_workqueue("khelper");
731	BUG_ON(!khelper_wq);
732}