Loading...
1/*
2 * kernel userspace event delivery
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2004 Novell, Inc. All rights reserved.
6 * Copyright (C) 2004 IBM, Inc. All rights reserved.
7 *
8 * Licensed under the GNU GPL v2.
9 *
10 * Authors:
11 * Robert Love <rml@novell.com>
12 * Kay Sievers <kay.sievers@vrfy.org>
13 * Arjan van de Ven <arjanv@redhat.com>
14 * Greg Kroah-Hartman <greg@kroah.com>
15 */
16
17#include <linux/spinlock.h>
18#include <linux/string.h>
19#include <linux/kobject.h>
20#include <linux/export.h>
21#include <linux/kmod.h>
22#include <linux/slab.h>
23#include <linux/user_namespace.h>
24#include <linux/socket.h>
25#include <linux/skbuff.h>
26#include <linux/netlink.h>
27#include <net/sock.h>
28#include <net/net_namespace.h>
29
30
31u64 uevent_seqnum;
32char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
33#ifdef CONFIG_NET
34struct uevent_sock {
35 struct list_head list;
36 struct sock *sk;
37};
38static LIST_HEAD(uevent_sock_list);
39#endif
40
41/* This lock protects uevent_seqnum and uevent_sock_list */
42static DEFINE_MUTEX(uevent_sock_mutex);
43
44/* the strings here must match the enum in include/linux/kobject.h */
45static const char *kobject_actions[] = {
46 [KOBJ_ADD] = "add",
47 [KOBJ_REMOVE] = "remove",
48 [KOBJ_CHANGE] = "change",
49 [KOBJ_MOVE] = "move",
50 [KOBJ_ONLINE] = "online",
51 [KOBJ_OFFLINE] = "offline",
52};
53
54/**
55 * kobject_action_type - translate action string to numeric type
56 *
57 * @buf: buffer containing the action string, newline is ignored
58 * @len: length of buffer
59 * @type: pointer to the location to store the action type
60 *
61 * Returns 0 if the action string was recognized.
62 */
63int kobject_action_type(const char *buf, size_t count,
64 enum kobject_action *type)
65{
66 enum kobject_action action;
67 int ret = -EINVAL;
68
69 if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
70 count--;
71
72 if (!count)
73 goto out;
74
75 for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) {
76 if (strncmp(kobject_actions[action], buf, count) != 0)
77 continue;
78 if (kobject_actions[action][count] != '\0')
79 continue;
80 *type = action;
81 ret = 0;
82 break;
83 }
84out:
85 return ret;
86}
87
88#ifdef CONFIG_NET
89static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
90{
91 struct kobject *kobj = data, *ksobj;
92 const struct kobj_ns_type_operations *ops;
93
94 ops = kobj_ns_ops(kobj);
95 if (!ops && kobj->kset) {
96 ksobj = &kobj->kset->kobj;
97 if (ksobj->parent != NULL)
98 ops = kobj_ns_ops(ksobj->parent);
99 }
100
101 if (ops && ops->netlink_ns && kobj->ktype->namespace) {
102 const void *sock_ns, *ns;
103 ns = kobj->ktype->namespace(kobj);
104 sock_ns = ops->netlink_ns(dsk);
105 return sock_ns != ns;
106 }
107
108 return 0;
109}
110#endif
111
112static int kobj_usermode_filter(struct kobject *kobj)
113{
114 const struct kobj_ns_type_operations *ops;
115
116 ops = kobj_ns_ops(kobj);
117 if (ops) {
118 const void *init_ns, *ns;
119 ns = kobj->ktype->namespace(kobj);
120 init_ns = ops->initial_ns();
121 return ns != init_ns;
122 }
123
124 return 0;
125}
126
127static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
128{
129 int len;
130
131 len = strlcpy(&env->buf[env->buflen], subsystem,
132 sizeof(env->buf) - env->buflen);
133 if (len >= (sizeof(env->buf) - env->buflen)) {
134 WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
135 return -ENOMEM;
136 }
137
138 env->argv[0] = uevent_helper;
139 env->argv[1] = &env->buf[env->buflen];
140 env->argv[2] = NULL;
141
142 env->buflen += len + 1;
143 return 0;
144}
145
146static void cleanup_uevent_env(struct subprocess_info *info)
147{
148 kfree(info->data);
149}
150
151/**
152 * kobject_uevent_env - send an uevent with environmental data
153 *
154 * @action: action that is happening
155 * @kobj: struct kobject that the action is happening to
156 * @envp_ext: pointer to environmental data
157 *
158 * Returns 0 if kobject_uevent_env() is completed with success or the
159 * corresponding error when it fails.
160 */
161int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
162 char *envp_ext[])
163{
164 struct kobj_uevent_env *env;
165 const char *action_string = kobject_actions[action];
166 const char *devpath = NULL;
167 const char *subsystem;
168 struct kobject *top_kobj;
169 struct kset *kset;
170 const struct kset_uevent_ops *uevent_ops;
171 int i = 0;
172 int retval = 0;
173#ifdef CONFIG_NET
174 struct uevent_sock *ue_sk;
175#endif
176
177 pr_debug("kobject: '%s' (%p): %s\n",
178 kobject_name(kobj), kobj, __func__);
179
180 /* search the kset we belong to */
181 top_kobj = kobj;
182 while (!top_kobj->kset && top_kobj->parent)
183 top_kobj = top_kobj->parent;
184
185 if (!top_kobj->kset) {
186 pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
187 "without kset!\n", kobject_name(kobj), kobj,
188 __func__);
189 return -EINVAL;
190 }
191
192 kset = top_kobj->kset;
193 uevent_ops = kset->uevent_ops;
194
195 /* skip the event, if uevent_suppress is set*/
196 if (kobj->uevent_suppress) {
197 pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
198 "caused the event to drop!\n",
199 kobject_name(kobj), kobj, __func__);
200 return 0;
201 }
202 /* skip the event, if the filter returns zero. */
203 if (uevent_ops && uevent_ops->filter)
204 if (!uevent_ops->filter(kset, kobj)) {
205 pr_debug("kobject: '%s' (%p): %s: filter function "
206 "caused the event to drop!\n",
207 kobject_name(kobj), kobj, __func__);
208 return 0;
209 }
210
211 /* originating subsystem */
212 if (uevent_ops && uevent_ops->name)
213 subsystem = uevent_ops->name(kset, kobj);
214 else
215 subsystem = kobject_name(&kset->kobj);
216 if (!subsystem) {
217 pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
218 "event to drop!\n", kobject_name(kobj), kobj,
219 __func__);
220 return 0;
221 }
222
223 /* environment buffer */
224 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
225 if (!env)
226 return -ENOMEM;
227
228 /* complete object path */
229 devpath = kobject_get_path(kobj, GFP_KERNEL);
230 if (!devpath) {
231 retval = -ENOENT;
232 goto exit;
233 }
234
235 /* default keys */
236 retval = add_uevent_var(env, "ACTION=%s", action_string);
237 if (retval)
238 goto exit;
239 retval = add_uevent_var(env, "DEVPATH=%s", devpath);
240 if (retval)
241 goto exit;
242 retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
243 if (retval)
244 goto exit;
245
246 /* keys passed in from the caller */
247 if (envp_ext) {
248 for (i = 0; envp_ext[i]; i++) {
249 retval = add_uevent_var(env, "%s", envp_ext[i]);
250 if (retval)
251 goto exit;
252 }
253 }
254
255 /* let the kset specific function add its stuff */
256 if (uevent_ops && uevent_ops->uevent) {
257 retval = uevent_ops->uevent(kset, kobj, env);
258 if (retval) {
259 pr_debug("kobject: '%s' (%p): %s: uevent() returned "
260 "%d\n", kobject_name(kobj), kobj,
261 __func__, retval);
262 goto exit;
263 }
264 }
265
266 /*
267 * Mark "add" and "remove" events in the object to ensure proper
268 * events to userspace during automatic cleanup. If the object did
269 * send an "add" event, "remove" will automatically generated by
270 * the core, if not already done by the caller.
271 */
272 if (action == KOBJ_ADD)
273 kobj->state_add_uevent_sent = 1;
274 else if (action == KOBJ_REMOVE)
275 kobj->state_remove_uevent_sent = 1;
276
277 mutex_lock(&uevent_sock_mutex);
278 /* we will send an event, so request a new sequence number */
279 retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
280 if (retval) {
281 mutex_unlock(&uevent_sock_mutex);
282 goto exit;
283 }
284
285#if defined(CONFIG_NET)
286 /* send netlink message */
287 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
288 struct sock *uevent_sock = ue_sk->sk;
289 struct sk_buff *skb;
290 size_t len;
291
292 if (!netlink_has_listeners(uevent_sock, 1))
293 continue;
294
295 /* allocate message with the maximum possible size */
296 len = strlen(action_string) + strlen(devpath) + 2;
297 skb = alloc_skb(len + env->buflen, GFP_KERNEL);
298 if (skb) {
299 char *scratch;
300
301 /* add header */
302 scratch = skb_put(skb, len);
303 sprintf(scratch, "%s@%s", action_string, devpath);
304
305 /* copy keys to our continuous event payload buffer */
306 for (i = 0; i < env->envp_idx; i++) {
307 len = strlen(env->envp[i]) + 1;
308 scratch = skb_put(skb, len);
309 strcpy(scratch, env->envp[i]);
310 }
311
312 NETLINK_CB(skb).dst_group = 1;
313 retval = netlink_broadcast_filtered(uevent_sock, skb,
314 0, 1, GFP_KERNEL,
315 kobj_bcast_filter,
316 kobj);
317 /* ENOBUFS should be handled in userspace */
318 if (retval == -ENOBUFS || retval == -ESRCH)
319 retval = 0;
320 } else
321 retval = -ENOMEM;
322 }
323#endif
324 mutex_unlock(&uevent_sock_mutex);
325
326 /* call uevent_helper, usually only enabled during early boot */
327 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
328 struct subprocess_info *info;
329
330 retval = add_uevent_var(env, "HOME=/");
331 if (retval)
332 goto exit;
333 retval = add_uevent_var(env,
334 "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
335 if (retval)
336 goto exit;
337 retval = init_uevent_argv(env, subsystem);
338 if (retval)
339 goto exit;
340
341 retval = -ENOMEM;
342 info = call_usermodehelper_setup(env->argv[0], env->argv,
343 env->envp, GFP_KERNEL,
344 NULL, cleanup_uevent_env, env);
345 if (info) {
346 retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
347 env = NULL; /* freed by cleanup_uevent_env */
348 }
349 }
350
351exit:
352 kfree(devpath);
353 kfree(env);
354 return retval;
355}
356EXPORT_SYMBOL_GPL(kobject_uevent_env);
357
358/**
359 * kobject_uevent - notify userspace by sending an uevent
360 *
361 * @action: action that is happening
362 * @kobj: struct kobject that the action is happening to
363 *
364 * Returns 0 if kobject_uevent() is completed with success or the
365 * corresponding error when it fails.
366 */
367int kobject_uevent(struct kobject *kobj, enum kobject_action action)
368{
369 return kobject_uevent_env(kobj, action, NULL);
370}
371EXPORT_SYMBOL_GPL(kobject_uevent);
372
373/**
374 * add_uevent_var - add key value string to the environment buffer
375 * @env: environment buffer structure
376 * @format: printf format for the key=value pair
377 *
378 * Returns 0 if environment variable was added successfully or -ENOMEM
379 * if no space was available.
380 */
381int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
382{
383 va_list args;
384 int len;
385
386 if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
387 WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
388 return -ENOMEM;
389 }
390
391 va_start(args, format);
392 len = vsnprintf(&env->buf[env->buflen],
393 sizeof(env->buf) - env->buflen,
394 format, args);
395 va_end(args);
396
397 if (len >= (sizeof(env->buf) - env->buflen)) {
398 WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
399 return -ENOMEM;
400 }
401
402 env->envp[env->envp_idx++] = &env->buf[env->buflen];
403 env->buflen += len + 1;
404 return 0;
405}
406EXPORT_SYMBOL_GPL(add_uevent_var);
407
408#if defined(CONFIG_NET)
409static int uevent_net_init(struct net *net)
410{
411 struct uevent_sock *ue_sk;
412 struct netlink_kernel_cfg cfg = {
413 .groups = 1,
414 .flags = NL_CFG_F_NONROOT_RECV,
415 };
416
417 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
418 if (!ue_sk)
419 return -ENOMEM;
420
421 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
422 if (!ue_sk->sk) {
423 printk(KERN_ERR
424 "kobject_uevent: unable to create netlink socket!\n");
425 kfree(ue_sk);
426 return -ENODEV;
427 }
428 mutex_lock(&uevent_sock_mutex);
429 list_add_tail(&ue_sk->list, &uevent_sock_list);
430 mutex_unlock(&uevent_sock_mutex);
431 return 0;
432}
433
434static void uevent_net_exit(struct net *net)
435{
436 struct uevent_sock *ue_sk;
437
438 mutex_lock(&uevent_sock_mutex);
439 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
440 if (sock_net(ue_sk->sk) == net)
441 goto found;
442 }
443 mutex_unlock(&uevent_sock_mutex);
444 return;
445
446found:
447 list_del(&ue_sk->list);
448 mutex_unlock(&uevent_sock_mutex);
449
450 netlink_kernel_release(ue_sk->sk);
451 kfree(ue_sk);
452}
453
454static struct pernet_operations uevent_net_ops = {
455 .init = uevent_net_init,
456 .exit = uevent_net_exit,
457};
458
459static int __init kobject_uevent_init(void)
460{
461 return register_pernet_subsys(&uevent_net_ops);
462}
463
464
465postcore_initcall(kobject_uevent_init);
466#endif
1/*
2 * kernel userspace event delivery
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2004 Novell, Inc. All rights reserved.
6 * Copyright (C) 2004 IBM, Inc. All rights reserved.
7 *
8 * Licensed under the GNU GPL v2.
9 *
10 * Authors:
11 * Robert Love <rml@novell.com>
12 * Kay Sievers <kay.sievers@vrfy.org>
13 * Arjan van de Ven <arjanv@redhat.com>
14 * Greg Kroah-Hartman <greg@kroah.com>
15 */
16
17#include <linux/spinlock.h>
18#include <linux/string.h>
19#include <linux/kobject.h>
20#include <linux/export.h>
21#include <linux/kmod.h>
22#include <linux/slab.h>
23#include <linux/socket.h>
24#include <linux/skbuff.h>
25#include <linux/netlink.h>
26#include <net/sock.h>
27#include <net/net_namespace.h>
28
29
30u64 uevent_seqnum;
31#ifdef CONFIG_UEVENT_HELPER
32char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
33#endif
34#ifdef CONFIG_NET
35struct uevent_sock {
36 struct list_head list;
37 struct sock *sk;
38};
39static LIST_HEAD(uevent_sock_list);
40#endif
41
42/* This lock protects uevent_seqnum and uevent_sock_list */
43static DEFINE_MUTEX(uevent_sock_mutex);
44
45/* the strings here must match the enum in include/linux/kobject.h */
46static const char *kobject_actions[] = {
47 [KOBJ_ADD] = "add",
48 [KOBJ_REMOVE] = "remove",
49 [KOBJ_CHANGE] = "change",
50 [KOBJ_MOVE] = "move",
51 [KOBJ_ONLINE] = "online",
52 [KOBJ_OFFLINE] = "offline",
53};
54
55/**
56 * kobject_action_type - translate action string to numeric type
57 *
58 * @buf: buffer containing the action string, newline is ignored
59 * @count: length of buffer
60 * @type: pointer to the location to store the action type
61 *
62 * Returns 0 if the action string was recognized.
63 */
64int kobject_action_type(const char *buf, size_t count,
65 enum kobject_action *type)
66{
67 enum kobject_action action;
68 int ret = -EINVAL;
69
70 if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
71 count--;
72
73 if (!count)
74 goto out;
75
76 for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) {
77 if (strncmp(kobject_actions[action], buf, count) != 0)
78 continue;
79 if (kobject_actions[action][count] != '\0')
80 continue;
81 *type = action;
82 ret = 0;
83 break;
84 }
85out:
86 return ret;
87}
88
89#ifdef CONFIG_NET
90static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
91{
92 struct kobject *kobj = data, *ksobj;
93 const struct kobj_ns_type_operations *ops;
94
95 ops = kobj_ns_ops(kobj);
96 if (!ops && kobj->kset) {
97 ksobj = &kobj->kset->kobj;
98 if (ksobj->parent != NULL)
99 ops = kobj_ns_ops(ksobj->parent);
100 }
101
102 if (ops && ops->netlink_ns && kobj->ktype->namespace) {
103 const void *sock_ns, *ns;
104 ns = kobj->ktype->namespace(kobj);
105 sock_ns = ops->netlink_ns(dsk);
106 return sock_ns != ns;
107 }
108
109 return 0;
110}
111#endif
112
113#ifdef CONFIG_UEVENT_HELPER
114static int kobj_usermode_filter(struct kobject *kobj)
115{
116 const struct kobj_ns_type_operations *ops;
117
118 ops = kobj_ns_ops(kobj);
119 if (ops) {
120 const void *init_ns, *ns;
121 ns = kobj->ktype->namespace(kobj);
122 init_ns = ops->initial_ns();
123 return ns != init_ns;
124 }
125
126 return 0;
127}
128
129static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
130{
131 int len;
132
133 len = strlcpy(&env->buf[env->buflen], subsystem,
134 sizeof(env->buf) - env->buflen);
135 if (len >= (sizeof(env->buf) - env->buflen)) {
136 WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
137 return -ENOMEM;
138 }
139
140 env->argv[0] = uevent_helper;
141 env->argv[1] = &env->buf[env->buflen];
142 env->argv[2] = NULL;
143
144 env->buflen += len + 1;
145 return 0;
146}
147
148static void cleanup_uevent_env(struct subprocess_info *info)
149{
150 kfree(info->data);
151}
152#endif
153
154/**
155 * kobject_uevent_env - send an uevent with environmental data
156 *
157 * @kobj: struct kobject that the action is happening to
158 * @action: action that is happening
159 * @envp_ext: pointer to environmental data
160 *
161 * Returns 0 if kobject_uevent_env() is completed with success or the
162 * corresponding error when it fails.
163 */
164int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
165 char *envp_ext[])
166{
167 struct kobj_uevent_env *env;
168 const char *action_string = kobject_actions[action];
169 const char *devpath = NULL;
170 const char *subsystem;
171 struct kobject *top_kobj;
172 struct kset *kset;
173 const struct kset_uevent_ops *uevent_ops;
174 int i = 0;
175 int retval = 0;
176#ifdef CONFIG_NET
177 struct uevent_sock *ue_sk;
178#endif
179
180 pr_debug("kobject: '%s' (%p): %s\n",
181 kobject_name(kobj), kobj, __func__);
182
183 /* search the kset we belong to */
184 top_kobj = kobj;
185 while (!top_kobj->kset && top_kobj->parent)
186 top_kobj = top_kobj->parent;
187
188 if (!top_kobj->kset) {
189 pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
190 "without kset!\n", kobject_name(kobj), kobj,
191 __func__);
192 return -EINVAL;
193 }
194
195 kset = top_kobj->kset;
196 uevent_ops = kset->uevent_ops;
197
198 /* skip the event, if uevent_suppress is set*/
199 if (kobj->uevent_suppress) {
200 pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
201 "caused the event to drop!\n",
202 kobject_name(kobj), kobj, __func__);
203 return 0;
204 }
205 /* skip the event, if the filter returns zero. */
206 if (uevent_ops && uevent_ops->filter)
207 if (!uevent_ops->filter(kset, kobj)) {
208 pr_debug("kobject: '%s' (%p): %s: filter function "
209 "caused the event to drop!\n",
210 kobject_name(kobj), kobj, __func__);
211 return 0;
212 }
213
214 /* originating subsystem */
215 if (uevent_ops && uevent_ops->name)
216 subsystem = uevent_ops->name(kset, kobj);
217 else
218 subsystem = kobject_name(&kset->kobj);
219 if (!subsystem) {
220 pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
221 "event to drop!\n", kobject_name(kobj), kobj,
222 __func__);
223 return 0;
224 }
225
226 /* environment buffer */
227 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
228 if (!env)
229 return -ENOMEM;
230
231 /* complete object path */
232 devpath = kobject_get_path(kobj, GFP_KERNEL);
233 if (!devpath) {
234 retval = -ENOENT;
235 goto exit;
236 }
237
238 /* default keys */
239 retval = add_uevent_var(env, "ACTION=%s", action_string);
240 if (retval)
241 goto exit;
242 retval = add_uevent_var(env, "DEVPATH=%s", devpath);
243 if (retval)
244 goto exit;
245 retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
246 if (retval)
247 goto exit;
248
249 /* keys passed in from the caller */
250 if (envp_ext) {
251 for (i = 0; envp_ext[i]; i++) {
252 retval = add_uevent_var(env, "%s", envp_ext[i]);
253 if (retval)
254 goto exit;
255 }
256 }
257
258 /* let the kset specific function add its stuff */
259 if (uevent_ops && uevent_ops->uevent) {
260 retval = uevent_ops->uevent(kset, kobj, env);
261 if (retval) {
262 pr_debug("kobject: '%s' (%p): %s: uevent() returned "
263 "%d\n", kobject_name(kobj), kobj,
264 __func__, retval);
265 goto exit;
266 }
267 }
268
269 /*
270 * Mark "add" and "remove" events in the object to ensure proper
271 * events to userspace during automatic cleanup. If the object did
272 * send an "add" event, "remove" will automatically generated by
273 * the core, if not already done by the caller.
274 */
275 if (action == KOBJ_ADD)
276 kobj->state_add_uevent_sent = 1;
277 else if (action == KOBJ_REMOVE)
278 kobj->state_remove_uevent_sent = 1;
279
280 mutex_lock(&uevent_sock_mutex);
281 /* we will send an event, so request a new sequence number */
282 retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
283 if (retval) {
284 mutex_unlock(&uevent_sock_mutex);
285 goto exit;
286 }
287
288#if defined(CONFIG_NET)
289 /* send netlink message */
290 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
291 struct sock *uevent_sock = ue_sk->sk;
292 struct sk_buff *skb;
293 size_t len;
294
295 if (!netlink_has_listeners(uevent_sock, 1))
296 continue;
297
298 /* allocate message with the maximum possible size */
299 len = strlen(action_string) + strlen(devpath) + 2;
300 skb = alloc_skb(len + env->buflen, GFP_KERNEL);
301 if (skb) {
302 char *scratch;
303
304 /* add header */
305 scratch = skb_put(skb, len);
306 sprintf(scratch, "%s@%s", action_string, devpath);
307
308 /* copy keys to our continuous event payload buffer */
309 for (i = 0; i < env->envp_idx; i++) {
310 len = strlen(env->envp[i]) + 1;
311 scratch = skb_put(skb, len);
312 strcpy(scratch, env->envp[i]);
313 }
314
315 NETLINK_CB(skb).dst_group = 1;
316 retval = netlink_broadcast_filtered(uevent_sock, skb,
317 0, 1, GFP_KERNEL,
318 kobj_bcast_filter,
319 kobj);
320 /* ENOBUFS should be handled in userspace */
321 if (retval == -ENOBUFS || retval == -ESRCH)
322 retval = 0;
323 } else
324 retval = -ENOMEM;
325 }
326#endif
327 mutex_unlock(&uevent_sock_mutex);
328
329#ifdef CONFIG_UEVENT_HELPER
330 /* call uevent_helper, usually only enabled during early boot */
331 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
332 struct subprocess_info *info;
333
334 retval = add_uevent_var(env, "HOME=/");
335 if (retval)
336 goto exit;
337 retval = add_uevent_var(env,
338 "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
339 if (retval)
340 goto exit;
341 retval = init_uevent_argv(env, subsystem);
342 if (retval)
343 goto exit;
344
345 retval = -ENOMEM;
346 info = call_usermodehelper_setup(env->argv[0], env->argv,
347 env->envp, GFP_KERNEL,
348 NULL, cleanup_uevent_env, env);
349 if (info) {
350 retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
351 env = NULL; /* freed by cleanup_uevent_env */
352 }
353 }
354#endif
355
356exit:
357 kfree(devpath);
358 kfree(env);
359 return retval;
360}
361EXPORT_SYMBOL_GPL(kobject_uevent_env);
362
363/**
364 * kobject_uevent - notify userspace by sending an uevent
365 *
366 * @kobj: struct kobject that the action is happening to
367 * @action: action that is happening
368 *
369 * Returns 0 if kobject_uevent() is completed with success or the
370 * corresponding error when it fails.
371 */
372int kobject_uevent(struct kobject *kobj, enum kobject_action action)
373{
374 return kobject_uevent_env(kobj, action, NULL);
375}
376EXPORT_SYMBOL_GPL(kobject_uevent);
377
378/**
379 * add_uevent_var - add key value string to the environment buffer
380 * @env: environment buffer structure
381 * @format: printf format for the key=value pair
382 *
383 * Returns 0 if environment variable was added successfully or -ENOMEM
384 * if no space was available.
385 */
386int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
387{
388 va_list args;
389 int len;
390
391 if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
392 WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
393 return -ENOMEM;
394 }
395
396 va_start(args, format);
397 len = vsnprintf(&env->buf[env->buflen],
398 sizeof(env->buf) - env->buflen,
399 format, args);
400 va_end(args);
401
402 if (len >= (sizeof(env->buf) - env->buflen)) {
403 WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
404 return -ENOMEM;
405 }
406
407 env->envp[env->envp_idx++] = &env->buf[env->buflen];
408 env->buflen += len + 1;
409 return 0;
410}
411EXPORT_SYMBOL_GPL(add_uevent_var);
412
413#if defined(CONFIG_NET)
414static int uevent_net_init(struct net *net)
415{
416 struct uevent_sock *ue_sk;
417 struct netlink_kernel_cfg cfg = {
418 .groups = 1,
419 .flags = NL_CFG_F_NONROOT_RECV,
420 };
421
422 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
423 if (!ue_sk)
424 return -ENOMEM;
425
426 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
427 if (!ue_sk->sk) {
428 printk(KERN_ERR
429 "kobject_uevent: unable to create netlink socket!\n");
430 kfree(ue_sk);
431 return -ENODEV;
432 }
433 mutex_lock(&uevent_sock_mutex);
434 list_add_tail(&ue_sk->list, &uevent_sock_list);
435 mutex_unlock(&uevent_sock_mutex);
436 return 0;
437}
438
439static void uevent_net_exit(struct net *net)
440{
441 struct uevent_sock *ue_sk;
442
443 mutex_lock(&uevent_sock_mutex);
444 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
445 if (sock_net(ue_sk->sk) == net)
446 goto found;
447 }
448 mutex_unlock(&uevent_sock_mutex);
449 return;
450
451found:
452 list_del(&ue_sk->list);
453 mutex_unlock(&uevent_sock_mutex);
454
455 netlink_kernel_release(ue_sk->sk);
456 kfree(ue_sk);
457}
458
459static struct pernet_operations uevent_net_ops = {
460 .init = uevent_net_init,
461 .exit = uevent_net_exit,
462};
463
464static int __init kobject_uevent_init(void)
465{
466 return register_pernet_subsys(&uevent_net_ops);
467}
468
469
470postcore_initcall(kobject_uevent_init);
471#endif