Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2	kmod, the new module loader (replaces kerneld)
  3	Kirk Petersen
  4
  5	Reorganized not to be a daemon by Adam Richter, with guidance
  6	from Greg Zornetzer.
  7
  8	Modified to avoid chroot and file sharing problems.
  9	Mikael Pettersson
 10
 11	Limit the concurrent number of kmod modprobes to catch loops from
 12	"modprobe needs a service that is in a module".
 13	Keith Owens <kaos@ocs.com.au> December 1999
 14
 15	Unblock all signals when we exec a usermode process.
 16	Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
 17
 18	call_usermodehelper wait flag, and remove exec_usermodehelper.
 19	Rusty Russell <rusty@rustcorp.com.au>  Jan 2003
 20*/
 21#include <linux/module.h>
 22#include <linux/sched.h>
 23#include <linux/syscalls.h>
 24#include <linux/unistd.h>
 25#include <linux/kmod.h>
 26#include <linux/slab.h>
 27#include <linux/completion.h>
 28#include <linux/cred.h>
 29#include <linux/file.h>
 30#include <linux/fdtable.h>
 31#include <linux/workqueue.h>
 32#include <linux/security.h>
 33#include <linux/mount.h>
 34#include <linux/kernel.h>
 35#include <linux/init.h>
 36#include <linux/resource.h>
 37#include <linux/notifier.h>
 38#include <linux/suspend.h>
 
 39#include <asm/uaccess.h>
 40
 41#include <trace/events/module.h>
 42
 43extern int max_threads;
 44
 45static struct workqueue_struct *khelper_wq;
 46
 47#define CAP_BSET	(void *)1
 48#define CAP_PI		(void *)2
 49
 50static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
 51static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
 52static DEFINE_SPINLOCK(umh_sysctl_lock);
 
 53
 54#ifdef CONFIG_MODULES
 55
 56/*
 57	modprobe_path is set via /proc/sys.
 58*/
 59char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
 60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61/**
 62 * __request_module - try to load a kernel module
 63 * @wait: wait (or not) for the operation to complete
 64 * @fmt: printf style format string for the name of the module
 65 * @...: arguments as specified in the format string
 66 *
 67 * Load a module using the user mode module loader. The function returns
 68 * zero on success or a negative errno code on failure. Note that a
 69 * successful module load does not mean the module did not then unload
 70 * and exit on an error of its own. Callers must check that the service
 71 * they requested is now available not blindly invoke it.
 72 *
 73 * If module auto-loading support is disabled then this function
 74 * becomes a no-operation.
 75 */
 76int __request_module(bool wait, const char *fmt, ...)
 77{
 78	va_list args;
 79	char module_name[MODULE_NAME_LEN];
 80	unsigned int max_modprobes;
 81	int ret;
 82	char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
 83	static char *envp[] = { "HOME=/",
 84				"TERM=linux",
 85				"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
 86				NULL };
 87	static atomic_t kmod_concurrent = ATOMIC_INIT(0);
 88#define MAX_KMOD_CONCURRENT 50	/* Completely arbitrary value - KAO */
 89	static int kmod_loop_msg;
 90
 91	va_start(args, fmt);
 92	ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
 93	va_end(args);
 94	if (ret >= MODULE_NAME_LEN)
 95		return -ENAMETOOLONG;
 96
 97	ret = security_kernel_module_request(module_name);
 98	if (ret)
 99		return ret;
100
101	/* If modprobe needs a service that is in a module, we get a recursive
102	 * loop.  Limit the number of running kmod threads to max_threads/2 or
103	 * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
104	 * would be to run the parents of this process, counting how many times
105	 * kmod was invoked.  That would mean accessing the internals of the
106	 * process tables to get the command line, proc_pid_cmdline is static
107	 * and it is not worth changing the proc code just to handle this case. 
108	 * KAO.
109	 *
110	 * "trace the ppid" is simple, but will fail if someone's
111	 * parent exits.  I think this is as good as it gets. --RR
112	 */
113	max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT);
114	atomic_inc(&kmod_concurrent);
115	if (atomic_read(&kmod_concurrent) > max_modprobes) {
116		/* We may be blaming an innocent here, but unlikely */
117		if (kmod_loop_msg++ < 5)
118			printk(KERN_ERR
119			       "request_module: runaway loop modprobe %s\n",
120			       module_name);
 
 
121		atomic_dec(&kmod_concurrent);
122		return -ENOMEM;
123	}
124
125	trace_module_request(module_name, wait, _RET_IP_);
126
127	ret = call_usermodehelper_fns(modprobe_path, argv, envp,
128			wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC,
129			NULL, NULL, NULL);
130
131	atomic_dec(&kmod_concurrent);
132	return ret;
133}
134EXPORT_SYMBOL(__request_module);
135#endif /* CONFIG_MODULES */
136
137/*
138 * This is the task which runs the usermode application
139 */
140static int ____call_usermodehelper(void *data)
141{
142	struct subprocess_info *sub_info = data;
143	struct cred *new;
144	int retval;
145
146	spin_lock_irq(&current->sighand->siglock);
147	flush_signal_handlers(current, 1);
148	spin_unlock_irq(&current->sighand->siglock);
149
150	/* We can run anywhere, unlike our parent keventd(). */
151	set_cpus_allowed_ptr(current, cpu_all_mask);
152
153	/*
154	 * Our parent is keventd, which runs with elevated scheduling priority.
155	 * Avoid propagating that into the userspace child.
156	 */
157	set_user_nice(current, 0);
158
159	retval = -ENOMEM;
160	new = prepare_kernel_cred(current);
161	if (!new)
162		goto fail;
163
164	spin_lock(&umh_sysctl_lock);
165	new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
166	new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
167					     new->cap_inheritable);
168	spin_unlock(&umh_sysctl_lock);
169
170	if (sub_info->init) {
171		retval = sub_info->init(sub_info, new);
172		if (retval) {
173			abort_creds(new);
174			goto fail;
175		}
176	}
177
178	commit_creds(new);
179
180	retval = kernel_execve(sub_info->path,
181			       (const char *const *)sub_info->argv,
182			       (const char *const *)sub_info->envp);
183
184	/* Exec failed? */
185fail:
186	sub_info->retval = retval;
187	do_exit(0);
188}
189
190void call_usermodehelper_freeinfo(struct subprocess_info *info)
191{
192	if (info->cleanup)
193		(*info->cleanup)(info);
194	kfree(info);
195}
196EXPORT_SYMBOL(call_usermodehelper_freeinfo);
 
 
 
 
 
 
 
 
 
 
 
 
197
198/* Keventd can't block, but this (a child) can. */
199static int wait_for_helper(void *data)
200{
201	struct subprocess_info *sub_info = data;
202	pid_t pid;
203
204	/* If SIGCLD is ignored sys_wait4 won't populate the status. */
205	spin_lock_irq(&current->sighand->siglock);
206	current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL;
207	spin_unlock_irq(&current->sighand->siglock);
208
209	pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
210	if (pid < 0) {
211		sub_info->retval = pid;
212	} else {
213		int ret = -ECHILD;
214		/*
215		 * Normally it is bogus to call wait4() from in-kernel because
216		 * wait4() wants to write the exit code to a userspace address.
217		 * But wait_for_helper() always runs as keventd, and put_user()
218		 * to a kernel address works OK for kernel threads, due to their
219		 * having an mm_segment_t which spans the entire address space.
220		 *
221		 * Thus the __user pointer cast is valid here.
222		 */
223		sys_wait4(pid, (int __user *)&ret, 0, NULL);
224
225		/*
226		 * If ret is 0, either ____call_usermodehelper failed and the
227		 * real error code is already in sub_info->retval or
228		 * sub_info->retval is 0 anyway, so don't mess with it then.
229		 */
230		if (ret)
231			sub_info->retval = ret;
232	}
233
234	complete(sub_info->complete);
235	return 0;
236}
237
238/* This is run by khelper thread  */
239static void __call_usermodehelper(struct work_struct *work)
240{
241	struct subprocess_info *sub_info =
242		container_of(work, struct subprocess_info, work);
243	enum umh_wait wait = sub_info->wait;
244	pid_t pid;
245
246	/* CLONE_VFORK: wait until the usermode helper has execve'd
247	 * successfully We need the data structures to stay around
248	 * until that is done.  */
249	if (wait == UMH_WAIT_PROC)
250		pid = kernel_thread(wait_for_helper, sub_info,
251				    CLONE_FS | CLONE_FILES | SIGCHLD);
252	else
253		pid = kernel_thread(____call_usermodehelper, sub_info,
254				    CLONE_VFORK | SIGCHLD);
255
256	switch (wait) {
257	case UMH_NO_WAIT:
258		call_usermodehelper_freeinfo(sub_info);
259		break;
260
261	case UMH_WAIT_PROC:
262		if (pid > 0)
263			break;
264		/* FALLTHROUGH */
265	case UMH_WAIT_EXEC:
266		if (pid < 0)
267			sub_info->retval = pid;
268		complete(sub_info->complete);
269	}
270}
271
272/*
273 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
274 * (used for preventing user land processes from being created after the user
275 * land has been frozen during a system-wide hibernation or suspend operation).
 
276 */
277static int usermodehelper_disabled = 1;
278
279/* Number of helpers running */
280static atomic_t running_helpers = ATOMIC_INIT(0);
281
282/*
283 * Wait queue head used by usermodehelper_pm_callback() to wait for all running
284 * helpers to finish.
285 */
286static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
287
288/*
 
 
 
 
 
 
289 * Time to wait for running_helpers to become zero before the setting of
290 * usermodehelper_disabled in usermodehelper_pm_callback() fails
291 */
292#define RUNNING_HELPERS_TIMEOUT	(5 * HZ)
293
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294/**
295 * usermodehelper_disable - prevent new helpers from being started
 
 
 
296 */
297int usermodehelper_disable(void)
298{
299	long retval;
300
301	usermodehelper_disabled = 1;
302	smp_mb();
 
 
 
 
 
303	/*
304	 * From now on call_usermodehelper_exec() won't start any new
305	 * helpers, so it is sufficient if running_helpers turns out to
306	 * be zero at one point (it may be increased later, but that
307	 * doesn't matter).
308	 */
309	retval = wait_event_timeout(running_helpers_waitq,
310					atomic_read(&running_helpers) == 0,
311					RUNNING_HELPERS_TIMEOUT);
312	if (retval)
313		return 0;
314
315	usermodehelper_disabled = 0;
316	return -EAGAIN;
317}
318
319/**
320 * usermodehelper_enable - allow new helpers to be started again
321 */
322void usermodehelper_enable(void)
323{
324	usermodehelper_disabled = 0;
325}
326
327/**
328 * usermodehelper_is_disabled - check if new helpers are allowed to be started
329 */
330bool usermodehelper_is_disabled(void)
331{
332	return usermodehelper_disabled;
333}
334EXPORT_SYMBOL_GPL(usermodehelper_is_disabled);
335
336static void helper_lock(void)
337{
338	atomic_inc(&running_helpers);
339	smp_mb__after_atomic_inc();
340}
341
342static void helper_unlock(void)
343{
344	if (atomic_dec_and_test(&running_helpers))
345		wake_up(&running_helpers_waitq);
346}
347
348/**
349 * call_usermodehelper_setup - prepare to call a usermode helper
350 * @path: path to usermode executable
351 * @argv: arg vector for process
352 * @envp: environment for process
353 * @gfp_mask: gfp mask for memory allocation
354 *
355 * Returns either %NULL on allocation failure, or a subprocess_info
356 * structure.  This should be passed to call_usermodehelper_exec to
357 * exec the process and free the structure.
358 */
 
359struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
360						  char **envp, gfp_t gfp_mask)
361{
362	struct subprocess_info *sub_info;
363	sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
364	if (!sub_info)
365		goto out;
366
367	INIT_WORK(&sub_info->work, __call_usermodehelper);
368	sub_info->path = path;
369	sub_info->argv = argv;
370	sub_info->envp = envp;
371  out:
372	return sub_info;
373}
374EXPORT_SYMBOL(call_usermodehelper_setup);
375
376/**
377 * call_usermodehelper_setfns - set a cleanup/init function
378 * @info: a subprocess_info returned by call_usermodehelper_setup
379 * @cleanup: a cleanup function
380 * @init: an init function
381 * @data: arbitrary context sensitive data
382 *
383 * The init function is used to customize the helper process prior to
384 * exec.  A non-zero return code causes the process to error out, exit,
385 * and return the failure to the calling process
386 *
387 * The cleanup function is just before ethe subprocess_info is about to
388 * be freed.  This can be used for freeing the argv and envp.  The
389 * Function must be runnable in either a process context or the
390 * context in which call_usermodehelper_exec is called.
391 */
 
392void call_usermodehelper_setfns(struct subprocess_info *info,
393		    int (*init)(struct subprocess_info *info, struct cred *new),
394		    void (*cleanup)(struct subprocess_info *info),
395		    void *data)
396{
397	info->cleanup = cleanup;
398	info->init = init;
399	info->data = data;
400}
401EXPORT_SYMBOL(call_usermodehelper_setfns);
402
403/**
404 * call_usermodehelper_exec - start a usermode application
405 * @sub_info: information about the subprocessa
406 * @wait: wait for the application to finish and return status.
407 *        when -1 don't wait at all, but you get no useful error back when
408 *        the program couldn't be exec'ed. This makes it safe to call
409 *        from interrupt context.
410 *
411 * Runs a user-space application.  The application is started
412 * asynchronously if wait is not set, and runs as a child of keventd.
413 * (ie. it runs with full root capabilities).
414 */
415int call_usermodehelper_exec(struct subprocess_info *sub_info,
416			     enum umh_wait wait)
417{
418	DECLARE_COMPLETION_ONSTACK(done);
419	int retval = 0;
420
421	helper_lock();
422	if (sub_info->path[0] == '\0')
423		goto out;
424
425	if (!khelper_wq || usermodehelper_disabled) {
426		retval = -EBUSY;
427		goto out;
428	}
429
430	sub_info->complete = &done;
431	sub_info->wait = wait;
432
433	queue_work(khelper_wq, &sub_info->work);
434	if (wait == UMH_NO_WAIT)	/* task has freed sub_info */
435		goto unlock;
 
 
 
 
 
 
 
 
 
 
 
 
436	wait_for_completion(&done);
 
437	retval = sub_info->retval;
438
439out:
440	call_usermodehelper_freeinfo(sub_info);
441unlock:
442	helper_unlock();
443	return retval;
444}
445EXPORT_SYMBOL(call_usermodehelper_exec);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
446
447static int proc_cap_handler(struct ctl_table *table, int write,
448			 void __user *buffer, size_t *lenp, loff_t *ppos)
449{
450	struct ctl_table t;
451	unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
452	kernel_cap_t new_cap;
453	int err, i;
454
455	if (write && (!capable(CAP_SETPCAP) ||
456		      !capable(CAP_SYS_MODULE)))
457		return -EPERM;
458
459	/*
460	 * convert from the global kernel_cap_t to the ulong array to print to
461	 * userspace if this is a read.
462	 */
463	spin_lock(&umh_sysctl_lock);
464	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
465		if (table->data == CAP_BSET)
466			cap_array[i] = usermodehelper_bset.cap[i];
467		else if (table->data == CAP_PI)
468			cap_array[i] = usermodehelper_inheritable.cap[i];
469		else
470			BUG();
471	}
472	spin_unlock(&umh_sysctl_lock);
473
474	t = *table;
475	t.data = &cap_array;
476
477	/*
478	 * actually read or write and array of ulongs from userspace.  Remember
479	 * these are least significant 32 bits first
480	 */
481	err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
482	if (err < 0)
483		return err;
484
485	/*
486	 * convert from the sysctl array of ulongs to the kernel_cap_t
487	 * internal representation
488	 */
489	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
490		new_cap.cap[i] = cap_array[i];
491
492	/*
493	 * Drop everything not in the new_cap (but don't add things)
494	 */
495	spin_lock(&umh_sysctl_lock);
496	if (write) {
497		if (table->data == CAP_BSET)
498			usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
499		if (table->data == CAP_PI)
500			usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
501	}
502	spin_unlock(&umh_sysctl_lock);
503
504	return 0;
505}
506
507struct ctl_table usermodehelper_table[] = {
508	{
509		.procname	= "bset",
510		.data		= CAP_BSET,
511		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
512		.mode		= 0600,
513		.proc_handler	= proc_cap_handler,
514	},
515	{
516		.procname	= "inheritable",
517		.data		= CAP_PI,
518		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
519		.mode		= 0600,
520		.proc_handler	= proc_cap_handler,
521	},
522	{ }
523};
524
525void __init usermodehelper_init(void)
526{
527	khelper_wq = create_singlethread_workqueue("khelper");
528	BUG_ON(!khelper_wq);
529}
v3.5.6
  1/*
  2	kmod, the new module loader (replaces kerneld)
  3	Kirk Petersen
  4
  5	Reorganized not to be a daemon by Adam Richter, with guidance
  6	from Greg Zornetzer.
  7
  8	Modified to avoid chroot and file sharing problems.
  9	Mikael Pettersson
 10
 11	Limit the concurrent number of kmod modprobes to catch loops from
 12	"modprobe needs a service that is in a module".
 13	Keith Owens <kaos@ocs.com.au> December 1999
 14
 15	Unblock all signals when we exec a usermode process.
 16	Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
 17
 18	call_usermodehelper wait flag, and remove exec_usermodehelper.
 19	Rusty Russell <rusty@rustcorp.com.au>  Jan 2003
 20*/
 21#include <linux/module.h>
 22#include <linux/sched.h>
 23#include <linux/syscalls.h>
 24#include <linux/unistd.h>
 25#include <linux/kmod.h>
 26#include <linux/slab.h>
 27#include <linux/completion.h>
 28#include <linux/cred.h>
 29#include <linux/file.h>
 30#include <linux/fdtable.h>
 31#include <linux/workqueue.h>
 32#include <linux/security.h>
 33#include <linux/mount.h>
 34#include <linux/kernel.h>
 35#include <linux/init.h>
 36#include <linux/resource.h>
 37#include <linux/notifier.h>
 38#include <linux/suspend.h>
 39#include <linux/rwsem.h>
 40#include <asm/uaccess.h>
 41
 42#include <trace/events/module.h>
 43
 44extern int max_threads;
 45
 46static struct workqueue_struct *khelper_wq;
 47
 48#define CAP_BSET	(void *)1
 49#define CAP_PI		(void *)2
 50
 51static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
 52static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
 53static DEFINE_SPINLOCK(umh_sysctl_lock);
 54static DECLARE_RWSEM(umhelper_sem);
 55
 56#ifdef CONFIG_MODULES
 57
 58/*
 59	modprobe_path is set via /proc/sys.
 60*/
 61char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
 62
 63static void free_modprobe_argv(struct subprocess_info *info)
 64{
 65	kfree(info->argv[3]); /* check call_modprobe() */
 66	kfree(info->argv);
 67}
 68
 69static int call_modprobe(char *module_name, int wait)
 70{
 71	static char *envp[] = {
 72		"HOME=/",
 73		"TERM=linux",
 74		"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
 75		NULL
 76	};
 77
 78	char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
 79	if (!argv)
 80		goto out;
 81
 82	module_name = kstrdup(module_name, GFP_KERNEL);
 83	if (!module_name)
 84		goto free_argv;
 85
 86	argv[0] = modprobe_path;
 87	argv[1] = "-q";
 88	argv[2] = "--";
 89	argv[3] = module_name;	/* check free_modprobe_argv() */
 90	argv[4] = NULL;
 91
 92	return call_usermodehelper_fns(modprobe_path, argv, envp,
 93		wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
 94free_argv:
 95	kfree(argv);
 96out:
 97	return -ENOMEM;
 98}
 99
100/**
101 * __request_module - try to load a kernel module
102 * @wait: wait (or not) for the operation to complete
103 * @fmt: printf style format string for the name of the module
104 * @...: arguments as specified in the format string
105 *
106 * Load a module using the user mode module loader. The function returns
107 * zero on success or a negative errno code on failure. Note that a
108 * successful module load does not mean the module did not then unload
109 * and exit on an error of its own. Callers must check that the service
110 * they requested is now available not blindly invoke it.
111 *
112 * If module auto-loading support is disabled then this function
113 * becomes a no-operation.
114 */
115int __request_module(bool wait, const char *fmt, ...)
116{
117	va_list args;
118	char module_name[MODULE_NAME_LEN];
119	unsigned int max_modprobes;
120	int ret;
 
 
 
 
 
121	static atomic_t kmod_concurrent = ATOMIC_INIT(0);
122#define MAX_KMOD_CONCURRENT 50	/* Completely arbitrary value - KAO */
123	static int kmod_loop_msg;
124
125	va_start(args, fmt);
126	ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
127	va_end(args);
128	if (ret >= MODULE_NAME_LEN)
129		return -ENAMETOOLONG;
130
131	ret = security_kernel_module_request(module_name);
132	if (ret)
133		return ret;
134
135	/* If modprobe needs a service that is in a module, we get a recursive
136	 * loop.  Limit the number of running kmod threads to max_threads/2 or
137	 * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
138	 * would be to run the parents of this process, counting how many times
139	 * kmod was invoked.  That would mean accessing the internals of the
140	 * process tables to get the command line, proc_pid_cmdline is static
141	 * and it is not worth changing the proc code just to handle this case. 
142	 * KAO.
143	 *
144	 * "trace the ppid" is simple, but will fail if someone's
145	 * parent exits.  I think this is as good as it gets. --RR
146	 */
147	max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT);
148	atomic_inc(&kmod_concurrent);
149	if (atomic_read(&kmod_concurrent) > max_modprobes) {
150		/* We may be blaming an innocent here, but unlikely */
151		if (kmod_loop_msg < 5) {
152			printk(KERN_ERR
153			       "request_module: runaway loop modprobe %s\n",
154			       module_name);
155			kmod_loop_msg++;
156		}
157		atomic_dec(&kmod_concurrent);
158		return -ENOMEM;
159	}
160
161	trace_module_request(module_name, wait, _RET_IP_);
162
163	ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
 
 
164
165	atomic_dec(&kmod_concurrent);
166	return ret;
167}
168EXPORT_SYMBOL(__request_module);
169#endif /* CONFIG_MODULES */
170
171/*
172 * This is the task which runs the usermode application
173 */
174static int ____call_usermodehelper(void *data)
175{
176	struct subprocess_info *sub_info = data;
177	struct cred *new;
178	int retval;
179
180	spin_lock_irq(&current->sighand->siglock);
181	flush_signal_handlers(current, 1);
182	spin_unlock_irq(&current->sighand->siglock);
183
184	/* We can run anywhere, unlike our parent keventd(). */
185	set_cpus_allowed_ptr(current, cpu_all_mask);
186
187	/*
188	 * Our parent is keventd, which runs with elevated scheduling priority.
189	 * Avoid propagating that into the userspace child.
190	 */
191	set_user_nice(current, 0);
192
193	retval = -ENOMEM;
194	new = prepare_kernel_cred(current);
195	if (!new)
196		goto fail;
197
198	spin_lock(&umh_sysctl_lock);
199	new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
200	new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
201					     new->cap_inheritable);
202	spin_unlock(&umh_sysctl_lock);
203
204	if (sub_info->init) {
205		retval = sub_info->init(sub_info, new);
206		if (retval) {
207			abort_creds(new);
208			goto fail;
209		}
210	}
211
212	commit_creds(new);
213
214	retval = kernel_execve(sub_info->path,
215			       (const char *const *)sub_info->argv,
216			       (const char *const *)sub_info->envp);
217
218	/* Exec failed? */
219fail:
220	sub_info->retval = retval;
221	return 0;
222}
223
224static void call_usermodehelper_freeinfo(struct subprocess_info *info)
225{
226	if (info->cleanup)
227		(*info->cleanup)(info);
228	kfree(info);
229}
230
231static void umh_complete(struct subprocess_info *sub_info)
232{
233	struct completion *comp = xchg(&sub_info->complete, NULL);
234	/*
235	 * See call_usermodehelper_exec(). If xchg() returns NULL
236	 * we own sub_info, the UMH_KILLABLE caller has gone away.
237	 */
238	if (comp)
239		complete(comp);
240	else
241		call_usermodehelper_freeinfo(sub_info);
242}
243
244/* Keventd can't block, but this (a child) can. */
245static int wait_for_helper(void *data)
246{
247	struct subprocess_info *sub_info = data;
248	pid_t pid;
249
250	/* If SIGCLD is ignored sys_wait4 won't populate the status. */
251	spin_lock_irq(&current->sighand->siglock);
252	current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL;
253	spin_unlock_irq(&current->sighand->siglock);
254
255	pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
256	if (pid < 0) {
257		sub_info->retval = pid;
258	} else {
259		int ret = -ECHILD;
260		/*
261		 * Normally it is bogus to call wait4() from in-kernel because
262		 * wait4() wants to write the exit code to a userspace address.
263		 * But wait_for_helper() always runs as keventd, and put_user()
264		 * to a kernel address works OK for kernel threads, due to their
265		 * having an mm_segment_t which spans the entire address space.
266		 *
267		 * Thus the __user pointer cast is valid here.
268		 */
269		sys_wait4(pid, (int __user *)&ret, 0, NULL);
270
271		/*
272		 * If ret is 0, either ____call_usermodehelper failed and the
273		 * real error code is already in sub_info->retval or
274		 * sub_info->retval is 0 anyway, so don't mess with it then.
275		 */
276		if (ret)
277			sub_info->retval = ret;
278	}
279
280	umh_complete(sub_info);
281	return 0;
282}
283
284/* This is run by khelper thread  */
285static void __call_usermodehelper(struct work_struct *work)
286{
287	struct subprocess_info *sub_info =
288		container_of(work, struct subprocess_info, work);
289	int wait = sub_info->wait & ~UMH_KILLABLE;
290	pid_t pid;
291
292	/* CLONE_VFORK: wait until the usermode helper has execve'd
293	 * successfully We need the data structures to stay around
294	 * until that is done.  */
295	if (wait == UMH_WAIT_PROC)
296		pid = kernel_thread(wait_for_helper, sub_info,
297				    CLONE_FS | CLONE_FILES | SIGCHLD);
298	else
299		pid = kernel_thread(____call_usermodehelper, sub_info,
300				    CLONE_VFORK | SIGCHLD);
301
302	switch (wait) {
303	case UMH_NO_WAIT:
304		call_usermodehelper_freeinfo(sub_info);
305		break;
306
307	case UMH_WAIT_PROC:
308		if (pid > 0)
309			break;
310		/* FALLTHROUGH */
311	case UMH_WAIT_EXEC:
312		if (pid < 0)
313			sub_info->retval = pid;
314		umh_complete(sub_info);
315	}
316}
317
318/*
319 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
320 * (used for preventing user land processes from being created after the user
321 * land has been frozen during a system-wide hibernation or suspend operation).
322 * Should always be manipulated under umhelper_sem acquired for write.
323 */
324static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
325
326/* Number of helpers running */
327static atomic_t running_helpers = ATOMIC_INIT(0);
328
329/*
330 * Wait queue head used by usermodehelper_disable() to wait for all running
331 * helpers to finish.
332 */
333static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
334
335/*
336 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
337 * to become 'false'.
338 */
339static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
340
341/*
342 * Time to wait for running_helpers to become zero before the setting of
343 * usermodehelper_disabled in usermodehelper_disable() fails
344 */
345#define RUNNING_HELPERS_TIMEOUT	(5 * HZ)
346
347int usermodehelper_read_trylock(void)
348{
349	DEFINE_WAIT(wait);
350	int ret = 0;
351
352	down_read(&umhelper_sem);
353	for (;;) {
354		prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
355				TASK_INTERRUPTIBLE);
356		if (!usermodehelper_disabled)
357			break;
358
359		if (usermodehelper_disabled == UMH_DISABLED)
360			ret = -EAGAIN;
361
362		up_read(&umhelper_sem);
363
364		if (ret)
365			break;
366
367		schedule();
368		try_to_freeze();
369
370		down_read(&umhelper_sem);
371	}
372	finish_wait(&usermodehelper_disabled_waitq, &wait);
373	return ret;
374}
375EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
376
377long usermodehelper_read_lock_wait(long timeout)
378{
379	DEFINE_WAIT(wait);
380
381	if (timeout < 0)
382		return -EINVAL;
383
384	down_read(&umhelper_sem);
385	for (;;) {
386		prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
387				TASK_UNINTERRUPTIBLE);
388		if (!usermodehelper_disabled)
389			break;
390
391		up_read(&umhelper_sem);
392
393		timeout = schedule_timeout(timeout);
394		if (!timeout)
395			break;
396
397		down_read(&umhelper_sem);
398	}
399	finish_wait(&usermodehelper_disabled_waitq, &wait);
400	return timeout;
401}
402EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
403
404void usermodehelper_read_unlock(void)
405{
406	up_read(&umhelper_sem);
407}
408EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
409
410/**
411 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
412 * @depth: New value to assign to usermodehelper_disabled.
413 *
414 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
415 * writing) and wakeup tasks waiting for it to change.
416 */
417void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
418{
419	down_write(&umhelper_sem);
420	usermodehelper_disabled = depth;
421	wake_up(&usermodehelper_disabled_waitq);
422	up_write(&umhelper_sem);
423}
424
425/**
426 * __usermodehelper_disable - Prevent new helpers from being started.
427 * @depth: New value to assign to usermodehelper_disabled.
428 *
429 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
430 */
431int __usermodehelper_disable(enum umh_disable_depth depth)
432{
433	long retval;
434
435	if (!depth)
436		return -EINVAL;
437
438	down_write(&umhelper_sem);
439	usermodehelper_disabled = depth;
440	up_write(&umhelper_sem);
441
442	/*
443	 * From now on call_usermodehelper_exec() won't start any new
444	 * helpers, so it is sufficient if running_helpers turns out to
445	 * be zero at one point (it may be increased later, but that
446	 * doesn't matter).
447	 */
448	retval = wait_event_timeout(running_helpers_waitq,
449					atomic_read(&running_helpers) == 0,
450					RUNNING_HELPERS_TIMEOUT);
451	if (retval)
452		return 0;
453
454	__usermodehelper_set_disable_depth(UMH_ENABLED);
455	return -EAGAIN;
456}
457
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458static void helper_lock(void)
459{
460	atomic_inc(&running_helpers);
461	smp_mb__after_atomic_inc();
462}
463
464static void helper_unlock(void)
465{
466	if (atomic_dec_and_test(&running_helpers))
467		wake_up(&running_helpers_waitq);
468}
469
470/**
471 * call_usermodehelper_setup - prepare to call a usermode helper
472 * @path: path to usermode executable
473 * @argv: arg vector for process
474 * @envp: environment for process
475 * @gfp_mask: gfp mask for memory allocation
476 *
477 * Returns either %NULL on allocation failure, or a subprocess_info
478 * structure.  This should be passed to call_usermodehelper_exec to
479 * exec the process and free the structure.
480 */
481static
482struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
483						  char **envp, gfp_t gfp_mask)
484{
485	struct subprocess_info *sub_info;
486	sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
487	if (!sub_info)
488		goto out;
489
490	INIT_WORK(&sub_info->work, __call_usermodehelper);
491	sub_info->path = path;
492	sub_info->argv = argv;
493	sub_info->envp = envp;
494  out:
495	return sub_info;
496}
 
497
498/**
499 * call_usermodehelper_setfns - set a cleanup/init function
500 * @info: a subprocess_info returned by call_usermodehelper_setup
501 * @cleanup: a cleanup function
502 * @init: an init function
503 * @data: arbitrary context sensitive data
504 *
505 * The init function is used to customize the helper process prior to
506 * exec.  A non-zero return code causes the process to error out, exit,
507 * and return the failure to the calling process
508 *
509 * The cleanup function is just before ethe subprocess_info is about to
510 * be freed.  This can be used for freeing the argv and envp.  The
511 * Function must be runnable in either a process context or the
512 * context in which call_usermodehelper_exec is called.
513 */
514static
515void call_usermodehelper_setfns(struct subprocess_info *info,
516		    int (*init)(struct subprocess_info *info, struct cred *new),
517		    void (*cleanup)(struct subprocess_info *info),
518		    void *data)
519{
520	info->cleanup = cleanup;
521	info->init = init;
522	info->data = data;
523}
 
524
525/**
526 * call_usermodehelper_exec - start a usermode application
527 * @sub_info: information about the subprocessa
528 * @wait: wait for the application to finish and return status.
529 *        when -1 don't wait at all, but you get no useful error back when
530 *        the program couldn't be exec'ed. This makes it safe to call
531 *        from interrupt context.
532 *
533 * Runs a user-space application.  The application is started
534 * asynchronously if wait is not set, and runs as a child of keventd.
535 * (ie. it runs with full root capabilities).
536 */
537static
538int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
539{
540	DECLARE_COMPLETION_ONSTACK(done);
541	int retval = 0;
542
543	helper_lock();
544	if (sub_info->path[0] == '\0')
545		goto out;
546
547	if (!khelper_wq || usermodehelper_disabled) {
548		retval = -EBUSY;
549		goto out;
550	}
551
552	sub_info->complete = &done;
553	sub_info->wait = wait;
554
555	queue_work(khelper_wq, &sub_info->work);
556	if (wait == UMH_NO_WAIT)	/* task has freed sub_info */
557		goto unlock;
558
559	if (wait & UMH_KILLABLE) {
560		retval = wait_for_completion_killable(&done);
561		if (!retval)
562			goto wait_done;
563
564		/* umh_complete() will see NULL and free sub_info */
565		if (xchg(&sub_info->complete, NULL))
566			goto unlock;
567		/* fallthrough, umh_complete() was already called */
568	}
569
570	wait_for_completion(&done);
571wait_done:
572	retval = sub_info->retval;
 
573out:
574	call_usermodehelper_freeinfo(sub_info);
575unlock:
576	helper_unlock();
577	return retval;
578}
579
580int call_usermodehelper_fns(
581	char *path, char **argv, char **envp, int wait,
582	int (*init)(struct subprocess_info *info, struct cred *new),
583	void (*cleanup)(struct subprocess_info *), void *data)
584{
585	struct subprocess_info *info;
586	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
587
588	info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
589
590	if (info == NULL)
591		return -ENOMEM;
592
593	call_usermodehelper_setfns(info, init, cleanup, data);
594
595	return call_usermodehelper_exec(info, wait);
596}
597EXPORT_SYMBOL(call_usermodehelper_fns);
598
599static int proc_cap_handler(struct ctl_table *table, int write,
600			 void __user *buffer, size_t *lenp, loff_t *ppos)
601{
602	struct ctl_table t;
603	unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
604	kernel_cap_t new_cap;
605	int err, i;
606
607	if (write && (!capable(CAP_SETPCAP) ||
608		      !capable(CAP_SYS_MODULE)))
609		return -EPERM;
610
611	/*
612	 * convert from the global kernel_cap_t to the ulong array to print to
613	 * userspace if this is a read.
614	 */
615	spin_lock(&umh_sysctl_lock);
616	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
617		if (table->data == CAP_BSET)
618			cap_array[i] = usermodehelper_bset.cap[i];
619		else if (table->data == CAP_PI)
620			cap_array[i] = usermodehelper_inheritable.cap[i];
621		else
622			BUG();
623	}
624	spin_unlock(&umh_sysctl_lock);
625
626	t = *table;
627	t.data = &cap_array;
628
629	/*
630	 * actually read or write and array of ulongs from userspace.  Remember
631	 * these are least significant 32 bits first
632	 */
633	err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
634	if (err < 0)
635		return err;
636
637	/*
638	 * convert from the sysctl array of ulongs to the kernel_cap_t
639	 * internal representation
640	 */
641	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
642		new_cap.cap[i] = cap_array[i];
643
644	/*
645	 * Drop everything not in the new_cap (but don't add things)
646	 */
647	spin_lock(&umh_sysctl_lock);
648	if (write) {
649		if (table->data == CAP_BSET)
650			usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
651		if (table->data == CAP_PI)
652			usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
653	}
654	spin_unlock(&umh_sysctl_lock);
655
656	return 0;
657}
658
659struct ctl_table usermodehelper_table[] = {
660	{
661		.procname	= "bset",
662		.data		= CAP_BSET,
663		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
664		.mode		= 0600,
665		.proc_handler	= proc_cap_handler,
666	},
667	{
668		.procname	= "inheritable",
669		.data		= CAP_PI,
670		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
671		.mode		= 0600,
672		.proc_handler	= proc_cap_handler,
673	},
674	{ }
675};
676
677void __init usermodehelper_init(void)
678{
679	khelper_wq = create_singlethread_workqueue("khelper");
680	BUG_ON(!khelper_wq);
681}