Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Device Mapper Uevent Support (dm-uevent)
4 *
5 * Copyright IBM Corporation, 2007
6 * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
7 */
8#include <linux/list.h>
9#include <linux/slab.h>
10#include <linux/kobject.h>
11#include <linux/dm-ioctl.h>
12#include <linux/export.h>
13
14#include "dm.h"
15#include "dm-uevent.h"
16
17#define DM_MSG_PREFIX "uevent"
18
19static const struct {
20 enum dm_uevent_type type;
21 enum kobject_action action;
22 char *name;
23} _dm_uevent_type_names[] = {
24 {DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"},
25 {DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"},
26};
27
28static struct kmem_cache *_dm_event_cache;
29
30struct dm_uevent {
31 struct mapped_device *md;
32 enum kobject_action action;
33 struct kobj_uevent_env ku_env;
34 struct list_head elist;
35 char name[DM_NAME_LEN];
36 char uuid[DM_UUID_LEN];
37};
38
39static void dm_uevent_free(struct dm_uevent *event)
40{
41 kmem_cache_free(_dm_event_cache, event);
42}
43
44static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md)
45{
46 struct dm_uevent *event;
47
48 event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC);
49 if (!event)
50 return NULL;
51
52 INIT_LIST_HEAD(&event->elist);
53 event->md = md;
54
55 return event;
56}
57
58static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
59 struct dm_target *ti,
60 enum kobject_action action,
61 const char *dm_action,
62 const char *path,
63 unsigned int nr_valid_paths)
64{
65 struct dm_uevent *event;
66
67 event = dm_uevent_alloc(md);
68 if (!event) {
69 DMERR("%s: dm_uevent_alloc() failed", __func__);
70 goto err_nomem;
71 }
72
73 event->action = action;
74
75 if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
76 DMERR("%s: add_uevent_var() for DM_TARGET failed",
77 __func__);
78 goto err_add;
79 }
80
81 if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
82 DMERR("%s: add_uevent_var() for DM_ACTION failed",
83 __func__);
84 goto err_add;
85 }
86
87 if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
88 dm_next_uevent_seq(md))) {
89 DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
90 __func__);
91 goto err_add;
92 }
93
94 if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
95 DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
96 goto err_add;
97 }
98
99 if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
100 nr_valid_paths)) {
101 DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
102 __func__);
103 goto err_add;
104 }
105
106 return event;
107
108err_add:
109 dm_uevent_free(event);
110err_nomem:
111 return ERR_PTR(-ENOMEM);
112}
113
114/**
115 * dm_send_uevents - send uevents for given list
116 *
117 * @events: list of events to send
118 * @kobj: kobject generating event
119 *
120 */
121void dm_send_uevents(struct list_head *events, struct kobject *kobj)
122{
123 int r;
124 struct dm_uevent *event, *next;
125
126 list_for_each_entry_safe(event, next, events, elist) {
127 list_del_init(&event->elist);
128
129 /*
130 * When a device is being removed this copy fails and we
131 * discard these unsent events.
132 */
133 if (dm_copy_name_and_uuid(event->md, event->name,
134 event->uuid)) {
135 DMINFO("%s: skipping sending uevent for lost device",
136 __func__);
137 goto uevent_free;
138 }
139
140 if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
141 DMERR("%s: add_uevent_var() for DM_NAME failed",
142 __func__);
143 goto uevent_free;
144 }
145
146 if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
147 DMERR("%s: add_uevent_var() for DM_UUID failed",
148 __func__);
149 goto uevent_free;
150 }
151
152 r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
153 if (r)
154 DMERR("%s: kobject_uevent_env failed", __func__);
155uevent_free:
156 dm_uevent_free(event);
157 }
158}
159EXPORT_SYMBOL_GPL(dm_send_uevents);
160
161/**
162 * dm_path_uevent - called to create a new path event and queue it
163 *
164 * @event_type: path event type enum
165 * @ti: pointer to a dm_target
166 * @path: string containing pathname
167 * @nr_valid_paths: number of valid paths remaining
168 *
169 */
170void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
171 const char *path, unsigned int nr_valid_paths)
172{
173 struct mapped_device *md = dm_table_get_md(ti->table);
174 struct dm_uevent *event;
175
176 if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
177 DMERR("%s: Invalid event_type %d", __func__, event_type);
178 return;
179 }
180
181 event = dm_build_path_uevent(md, ti,
182 _dm_uevent_type_names[event_type].action,
183 _dm_uevent_type_names[event_type].name,
184 path, nr_valid_paths);
185 if (IS_ERR(event))
186 return;
187
188 dm_uevent_add(md, &event->elist);
189}
190EXPORT_SYMBOL_GPL(dm_path_uevent);
191
192int dm_uevent_init(void)
193{
194 _dm_event_cache = KMEM_CACHE(dm_uevent, 0);
195 if (!_dm_event_cache)
196 return -ENOMEM;
197
198 DMINFO("version 1.0.3");
199
200 return 0;
201}
202
203void dm_uevent_exit(void)
204{
205 kmem_cache_destroy(_dm_event_cache);
206}
1/*
2 * Device Mapper Uevent Support (dm-uevent)
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2007
19 * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
20 */
21#include <linux/list.h>
22#include <linux/slab.h>
23#include <linux/kobject.h>
24#include <linux/dm-ioctl.h>
25
26#include "dm.h"
27#include "dm-uevent.h"
28
29#define DM_MSG_PREFIX "uevent"
30
31static const struct {
32 enum dm_uevent_type type;
33 enum kobject_action action;
34 char *name;
35} _dm_uevent_type_names[] = {
36 {DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"},
37 {DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"},
38};
39
40static struct kmem_cache *_dm_event_cache;
41
42struct dm_uevent {
43 struct mapped_device *md;
44 enum kobject_action action;
45 struct kobj_uevent_env ku_env;
46 struct list_head elist;
47 char name[DM_NAME_LEN];
48 char uuid[DM_UUID_LEN];
49};
50
51static void dm_uevent_free(struct dm_uevent *event)
52{
53 kmem_cache_free(_dm_event_cache, event);
54}
55
56static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md)
57{
58 struct dm_uevent *event;
59
60 event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC);
61 if (!event)
62 return NULL;
63
64 INIT_LIST_HEAD(&event->elist);
65 event->md = md;
66
67 return event;
68}
69
70static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
71 struct dm_target *ti,
72 enum kobject_action action,
73 const char *dm_action,
74 const char *path,
75 unsigned nr_valid_paths)
76{
77 struct dm_uevent *event;
78
79 event = dm_uevent_alloc(md);
80 if (!event) {
81 DMERR("%s: dm_uevent_alloc() failed", __func__);
82 goto err_nomem;
83 }
84
85 event->action = action;
86
87 if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
88 DMERR("%s: add_uevent_var() for DM_TARGET failed",
89 __func__);
90 goto err_add;
91 }
92
93 if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
94 DMERR("%s: add_uevent_var() for DM_ACTION failed",
95 __func__);
96 goto err_add;
97 }
98
99 if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
100 dm_next_uevent_seq(md))) {
101 DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
102 __func__);
103 goto err_add;
104 }
105
106 if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
107 DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
108 goto err_add;
109 }
110
111 if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
112 nr_valid_paths)) {
113 DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
114 __func__);
115 goto err_add;
116 }
117
118 return event;
119
120err_add:
121 dm_uevent_free(event);
122err_nomem:
123 return ERR_PTR(-ENOMEM);
124}
125
126/**
127 * dm_send_uevents - send uevents for given list
128 *
129 * @events: list of events to send
130 * @kobj: kobject generating event
131 *
132 */
133void dm_send_uevents(struct list_head *events, struct kobject *kobj)
134{
135 int r;
136 struct dm_uevent *event, *next;
137
138 list_for_each_entry_safe(event, next, events, elist) {
139 list_del_init(&event->elist);
140
141 /*
142 * When a device is being removed this copy fails and we
143 * discard these unsent events.
144 */
145 if (dm_copy_name_and_uuid(event->md, event->name,
146 event->uuid)) {
147 DMINFO("%s: skipping sending uevent for lost device",
148 __func__);
149 goto uevent_free;
150 }
151
152 if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
153 DMERR("%s: add_uevent_var() for DM_NAME failed",
154 __func__);
155 goto uevent_free;
156 }
157
158 if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
159 DMERR("%s: add_uevent_var() for DM_UUID failed",
160 __func__);
161 goto uevent_free;
162 }
163
164 r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
165 if (r)
166 DMERR("%s: kobject_uevent_env failed", __func__);
167uevent_free:
168 dm_uevent_free(event);
169 }
170}
171EXPORT_SYMBOL_GPL(dm_send_uevents);
172
173/**
174 * dm_path_uevent - called to create a new path event and queue it
175 *
176 * @event_type: path event type enum
177 * @ti: pointer to a dm_target
178 * @path: string containing pathname
179 * @nr_valid_paths: number of valid paths remaining
180 *
181 */
182void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
183 const char *path, unsigned nr_valid_paths)
184{
185 struct mapped_device *md = dm_table_get_md(ti->table);
186 struct dm_uevent *event;
187
188 if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
189 DMERR("%s: Invalid event_type %d", __func__, event_type);
190 return;
191 }
192
193 event = dm_build_path_uevent(md, ti,
194 _dm_uevent_type_names[event_type].action,
195 _dm_uevent_type_names[event_type].name,
196 path, nr_valid_paths);
197 if (IS_ERR(event))
198 return;
199
200 dm_uevent_add(md, &event->elist);
201}
202EXPORT_SYMBOL_GPL(dm_path_uevent);
203
204int dm_uevent_init(void)
205{
206 _dm_event_cache = KMEM_CACHE(dm_uevent, 0);
207 if (!_dm_event_cache)
208 return -ENOMEM;
209
210 DMINFO("version 1.0.3");
211
212 return 0;
213}
214
215void dm_uevent_exit(void)
216{
217 kmem_cache_destroy(_dm_event_cache);
218}