Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
 
 
 
 
  3 * Copyright(c) 2014 Intel Mobile Communications GmbH
  4 * Copyright(c) 2015 Intel Deutschland GmbH
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 *
  6 * Author: Johannes Berg <johannes@sipsolutions.net>
  7 */
  8#include <linux/module.h>
  9#include <linux/device.h>
 10#include <linux/devcoredump.h>
 11#include <linux/list.h>
 12#include <linux/slab.h>
 13#include <linux/fs.h>
 14#include <linux/workqueue.h>
 15
 16static struct class devcd_class;
 17
 18/* global disable flag, for security purposes */
 19static bool devcd_disabled;
 20
 
 
 
 21struct devcd_entry {
 22	struct device devcd_dev;
 23	void *data;
 24	size_t datalen;
 25	/*
 26	 * Here, mutex is required to serialize the calls to del_wk work between
 27	 * user/kernel space which happens when devcd is added with device_add()
 28	 * and that sends uevent to user space. User space reads the uevents,
 29	 * and calls to devcd_data_write() which try to modify the work which is
 30	 * not even initialized/queued from devcoredump.
 31	 *
 32	 *
 33	 *
 34	 *        cpu0(X)                                 cpu1(Y)
 35	 *
 36	 *        dev_coredump() uevent sent to user space
 37	 *        device_add()  ======================> user space process Y reads the
 38	 *                                              uevents writes to devcd fd
 39	 *                                              which results into writes to
 40	 *
 41	 *                                             devcd_data_write()
 42	 *                                               mod_delayed_work()
 43	 *                                                 try_to_grab_pending()
 44	 *                                                   del_timer()
 45	 *                                                     debug_assert_init()
 46	 *       INIT_DELAYED_WORK()
 47	 *       schedule_delayed_work()
 48	 *
 49	 *
 50	 * Also, mutex alone would not be enough to avoid scheduling of
 51	 * del_wk work after it get flush from a call to devcd_free()
 52	 * mentioned as below.
 53	 *
 54	 *	disabled_store()
 55	 *        devcd_free()
 56	 *          mutex_lock()             devcd_data_write()
 57	 *          flush_delayed_work()
 58	 *          mutex_unlock()
 59	 *                                   mutex_lock()
 60	 *                                   mod_delayed_work()
 61	 *                                   mutex_unlock()
 62	 * So, delete_work flag is required.
 63	 */
 64	struct mutex mutex;
 65	bool delete_work;
 66	struct module *owner;
 67	ssize_t (*read)(char *buffer, loff_t offset, size_t count,
 68			void *data, size_t datalen);
 69	void (*free)(void *data);
 70	struct delayed_work del_wk;
 71	struct device *failing_dev;
 72};
 73
 74static struct devcd_entry *dev_to_devcd(struct device *dev)
 75{
 76	return container_of(dev, struct devcd_entry, devcd_dev);
 77}
 78
 79static void devcd_dev_release(struct device *dev)
 80{
 81	struct devcd_entry *devcd = dev_to_devcd(dev);
 82
 83	devcd->free(devcd->data);
 84	module_put(devcd->owner);
 85
 86	/*
 87	 * this seems racy, but I don't see a notifier or such on
 88	 * a struct device to know when it goes away?
 89	 */
 90	if (devcd->failing_dev->kobj.sd)
 91		sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj,
 92				  "devcoredump");
 93
 94	put_device(devcd->failing_dev);
 95	kfree(devcd);
 96}
 97
 98static void devcd_del(struct work_struct *wk)
 99{
100	struct devcd_entry *devcd;
101
102	devcd = container_of(wk, struct devcd_entry, del_wk.work);
103
104	device_del(&devcd->devcd_dev);
105	put_device(&devcd->devcd_dev);
106}
107
108static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
109			       struct bin_attribute *bin_attr,
110			       char *buffer, loff_t offset, size_t count)
111{
112	struct device *dev = kobj_to_dev(kobj);
113	struct devcd_entry *devcd = dev_to_devcd(dev);
114
115	return devcd->read(buffer, offset, count, devcd->data, devcd->datalen);
116}
117
118static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
119				struct bin_attribute *bin_attr,
120				char *buffer, loff_t offset, size_t count)
121{
122	struct device *dev = kobj_to_dev(kobj);
123	struct devcd_entry *devcd = dev_to_devcd(dev);
124
125	mutex_lock(&devcd->mutex);
126	if (!devcd->delete_work) {
127		devcd->delete_work = true;
128		mod_delayed_work(system_wq, &devcd->del_wk, 0);
129	}
130	mutex_unlock(&devcd->mutex);
131
132	return count;
133}
134
135static struct bin_attribute devcd_attr_data = {
136	.attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
137	.size = 0,
138	.read = devcd_data_read,
139	.write = devcd_data_write,
140};
141
142static struct bin_attribute *devcd_dev_bin_attrs[] = {
143	&devcd_attr_data, NULL,
144};
145
146static const struct attribute_group devcd_dev_group = {
147	.bin_attrs = devcd_dev_bin_attrs,
148};
149
150static const struct attribute_group *devcd_dev_groups[] = {
151	&devcd_dev_group, NULL,
152};
153
154static int devcd_free(struct device *dev, void *data)
155{
156	struct devcd_entry *devcd = dev_to_devcd(dev);
157
158	mutex_lock(&devcd->mutex);
159	if (!devcd->delete_work)
160		devcd->delete_work = true;
161
162	flush_delayed_work(&devcd->del_wk);
163	mutex_unlock(&devcd->mutex);
164	return 0;
165}
166
167static ssize_t disabled_show(const struct class *class, const struct class_attribute *attr,
168			     char *buf)
169{
170	return sysfs_emit(buf, "%d\n", devcd_disabled);
171}
172
173/*
174 *
175 *	disabled_store()                                	worker()
176 *	 class_for_each_device(&devcd_class,
177 *		NULL, NULL, devcd_free)
178 *         ...
179 *         ...
180 *	   while ((dev = class_dev_iter_next(&iter))
181 *                                                             devcd_del()
182 *                                                               device_del()
183 *                                                                 put_device() <- last reference
184 *             error = fn(dev, data)                           devcd_dev_release()
185 *             devcd_free(dev, data)                           kfree(devcd)
186 *             mutex_lock(&devcd->mutex);
187 *
188 *
189 * In the above diagram, It looks like disabled_store() would be racing with parallely
190 * running devcd_del() and result in memory abort while acquiring devcd->mutex which
191 * is called after kfree of devcd memory  after dropping its last reference with
192 * put_device(). However, this will not happens as fn(dev, data) runs
193 * with its own reference to device via klist_node so it is not its last reference.
194 * so, above situation would not occur.
195 */
196
197static ssize_t disabled_store(const struct class *class, const struct class_attribute *attr,
198			      const char *buf, size_t count)
199{
200	long tmp = simple_strtol(buf, NULL, 10);
201
202	/*
203	 * This essentially makes the attribute write-once, since you can't
204	 * go back to not having it disabled. This is intentional, it serves
205	 * as a system lockdown feature.
206	 */
207	if (tmp != 1)
208		return -EINVAL;
209
210	devcd_disabled = true;
211
212	class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
213
214	return count;
215}
216static CLASS_ATTR_RW(disabled);
217
218static struct attribute *devcd_class_attrs[] = {
219	&class_attr_disabled.attr,
220	NULL,
221};
222ATTRIBUTE_GROUPS(devcd_class);
223
224static struct class devcd_class = {
225	.name		= "devcoredump",
 
226	.dev_release	= devcd_dev_release,
227	.dev_groups	= devcd_dev_groups,
228	.class_groups	= devcd_class_groups,
229};
230
231static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
232			   void *data, size_t datalen)
233{
234	return memory_read_from_buffer(buffer, count, &offset, data, datalen);
235}
236
237static void devcd_freev(void *data)
238{
239	vfree(data);
 
 
 
 
240}
241
242/**
243 * dev_coredumpv - create device coredump with vmalloc data
244 * @dev: the struct device for the crashed device
245 * @data: vmalloc data containing the device coredump
246 * @datalen: length of the data
247 * @gfp: allocation flags
248 *
249 * This function takes ownership of the vmalloc'ed data and will free
250 * it when it is no longer used. See dev_coredumpm() for more information.
251 */
252void dev_coredumpv(struct device *dev, void *data, size_t datalen,
253		   gfp_t gfp)
254{
255	dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev);
256}
257EXPORT_SYMBOL_GPL(dev_coredumpv);
258
259static int devcd_match_failing(struct device *dev, const void *failing)
260{
261	struct devcd_entry *devcd = dev_to_devcd(dev);
262
263	return devcd->failing_dev == failing;
264}
265
266/**
267 * devcd_free_sgtable - free all the memory of the given scatterlist table
268 * (i.e. both pages and scatterlist instances)
269 * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained
270 * using the sg_chain function then that function should be called only once
271 * on the chained table
272 * @data: pointer to sg_table to free
273 */
274static void devcd_free_sgtable(void *data)
275{
276	_devcd_free_sgtable(data);
277}
278
279/**
280 * devcd_read_from_sgtable - copy data from sg_table to a given buffer
281 * and return the number of bytes read
282 * @buffer: the buffer to copy the data to it
283 * @buf_len: the length of the buffer
284 * @data: the scatterlist table to copy from
285 * @offset: start copy from @offset@ bytes from the head of the data
286 *	in the given scatterlist
287 * @data_len: the length of the data in the sg_table
288 */
289static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
290				       size_t buf_len, void *data,
291				       size_t data_len)
292{
293	struct scatterlist *table = data;
294
295	if (offset > data_len)
296		return -EINVAL;
297
298	if (offset + buf_len > data_len)
299		buf_len = data_len - offset;
300	return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len,
301				  offset);
302}
303
304/**
305 * dev_coredump_put - remove device coredump
306 * @dev: the struct device for the crashed device
307 *
308 * dev_coredump_put() removes coredump, if exists, for a given device from
309 * the file system and free its associated data otherwise, does nothing.
310 *
311 * It is useful for modules that do not want to keep coredump
312 * available after its unload.
313 */
314void dev_coredump_put(struct device *dev)
315{
316	struct device *existing;
317
318	existing = class_find_device(&devcd_class, NULL, dev,
319				     devcd_match_failing);
320	if (existing) {
321		devcd_free(existing, NULL);
322		put_device(existing);
323	}
324}
325EXPORT_SYMBOL_GPL(dev_coredump_put);
326
327/**
328 * dev_coredumpm_timeout - create device coredump with read/free methods with a
329 * custom timeout.
330 * @dev: the struct device for the crashed device
331 * @owner: the module that contains the read/free functions, use %THIS_MODULE
332 * @data: data cookie for the @read/@free functions
333 * @datalen: length of the data
334 * @gfp: allocation flags
335 * @read: function to read from the given buffer
336 * @free: function to free the given buffer
337 * @timeout: time in jiffies to remove coredump
338 *
339 * Creates a new device coredump for the given device. If a previous one hasn't
340 * been read yet, the new coredump is discarded. The data lifetime is determined
341 * by the device coredump framework and when it is no longer needed the @free
342 * function will be called to free the data.
343 */
344void dev_coredumpm_timeout(struct device *dev, struct module *owner,
345			   void *data, size_t datalen, gfp_t gfp,
346			   ssize_t (*read)(char *buffer, loff_t offset,
347					   size_t count, void *data,
348					   size_t datalen),
349			   void (*free)(void *data),
350			   unsigned long timeout)
351{
352	static atomic_t devcd_count = ATOMIC_INIT(0);
353	struct devcd_entry *devcd;
354	struct device *existing;
355
356	if (devcd_disabled)
357		goto free;
358
359	existing = class_find_device(&devcd_class, NULL, dev,
360				     devcd_match_failing);
361	if (existing) {
362		put_device(existing);
363		goto free;
364	}
365
366	if (!try_module_get(owner))
367		goto free;
368
369	devcd = kzalloc(sizeof(*devcd), gfp);
370	if (!devcd)
371		goto put_module;
372
373	devcd->owner = owner;
374	devcd->data = data;
375	devcd->datalen = datalen;
376	devcd->read = read;
377	devcd->free = free;
378	devcd->failing_dev = get_device(dev);
379	devcd->delete_work = false;
380
381	mutex_init(&devcd->mutex);
382	device_initialize(&devcd->devcd_dev);
383
384	dev_set_name(&devcd->devcd_dev, "devcd%d",
385		     atomic_inc_return(&devcd_count));
386	devcd->devcd_dev.class = &devcd_class;
387
388	mutex_lock(&devcd->mutex);
389	dev_set_uevent_suppress(&devcd->devcd_dev, true);
390	if (device_add(&devcd->devcd_dev))
391		goto put_device;
392
393	/*
394	 * These should normally not fail, but there is no problem
395	 * continuing without the links, so just warn instead of
396	 * failing.
397	 */
398	if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
399			      "failing_device") ||
400	    sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
401		              "devcoredump"))
402		dev_warn(dev, "devcoredump create_link failed\n");
 
 
403
404	dev_set_uevent_suppress(&devcd->devcd_dev, false);
405	kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
406	INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
407	schedule_delayed_work(&devcd->del_wk, timeout);
408	mutex_unlock(&devcd->mutex);
409	return;
410 put_device:
411	put_device(&devcd->devcd_dev);
412	mutex_unlock(&devcd->mutex);
413 put_module:
414	module_put(owner);
415 free:
416	free(data);
417}
418EXPORT_SYMBOL_GPL(dev_coredumpm_timeout);
419
420/**
421 * dev_coredumpsg - create device coredump that uses scatterlist as data
422 * parameter
423 * @dev: the struct device for the crashed device
424 * @table: the dump data
425 * @datalen: length of the data
426 * @gfp: allocation flags
427 *
428 * Creates a new device coredump for the given device. If a previous one hasn't
429 * been read yet, the new coredump is discarded. The data lifetime is determined
430 * by the device coredump framework and when it is no longer needed
431 * it will free the data.
432 */
433void dev_coredumpsg(struct device *dev, struct scatterlist *table,
434		    size_t datalen, gfp_t gfp)
435{
436	dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable,
437		      devcd_free_sgtable);
438}
439EXPORT_SYMBOL_GPL(dev_coredumpsg);
440
441static int __init devcoredump_init(void)
442{
443	return class_register(&devcd_class);
444}
445__initcall(devcoredump_init);
446
447static void __exit devcoredump_exit(void)
448{
449	class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
450	class_unregister(&devcd_class);
451}
452__exitcall(devcoredump_exit);
v4.6
 
  1/*
  2 * This file is provided under the GPLv2 license.
  3 *
  4 * GPL LICENSE SUMMARY
  5 *
  6 * Copyright(c) 2014 Intel Mobile Communications GmbH
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of version 2 of the GNU General Public License as
 10 * published by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope that it will be useful, but
 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 15 * General Public License for more details.
 16 *
 17 * The full GNU General Public License is included in this distribution
 18 * in the file called COPYING.
 19 *
 20 * Contact Information:
 21 *  Intel Linux Wireless <ilw@linux.intel.com>
 22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 23 *
 24 * Author: Johannes Berg <johannes@sipsolutions.net>
 25 */
 26#include <linux/module.h>
 27#include <linux/device.h>
 28#include <linux/devcoredump.h>
 29#include <linux/list.h>
 30#include <linux/slab.h>
 31#include <linux/fs.h>
 32#include <linux/workqueue.h>
 33
 34static struct class devcd_class;
 35
 36/* global disable flag, for security purposes */
 37static bool devcd_disabled;
 38
 39/* if data isn't read by userspace after 5 minutes then delete it */
 40#define DEVCD_TIMEOUT	(HZ * 60 * 5)
 41
 42struct devcd_entry {
 43	struct device devcd_dev;
 44	const void *data;
 45	size_t datalen;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46	struct module *owner;
 47	ssize_t (*read)(char *buffer, loff_t offset, size_t count,
 48			const void *data, size_t datalen);
 49	void (*free)(const void *data);
 50	struct delayed_work del_wk;
 51	struct device *failing_dev;
 52};
 53
 54static struct devcd_entry *dev_to_devcd(struct device *dev)
 55{
 56	return container_of(dev, struct devcd_entry, devcd_dev);
 57}
 58
 59static void devcd_dev_release(struct device *dev)
 60{
 61	struct devcd_entry *devcd = dev_to_devcd(dev);
 62
 63	devcd->free(devcd->data);
 64	module_put(devcd->owner);
 65
 66	/*
 67	 * this seems racy, but I don't see a notifier or such on
 68	 * a struct device to know when it goes away?
 69	 */
 70	if (devcd->failing_dev->kobj.sd)
 71		sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj,
 72				  "devcoredump");
 73
 74	put_device(devcd->failing_dev);
 75	kfree(devcd);
 76}
 77
 78static void devcd_del(struct work_struct *wk)
 79{
 80	struct devcd_entry *devcd;
 81
 82	devcd = container_of(wk, struct devcd_entry, del_wk.work);
 83
 84	device_del(&devcd->devcd_dev);
 85	put_device(&devcd->devcd_dev);
 86}
 87
 88static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
 89			       struct bin_attribute *bin_attr,
 90			       char *buffer, loff_t offset, size_t count)
 91{
 92	struct device *dev = kobj_to_dev(kobj);
 93	struct devcd_entry *devcd = dev_to_devcd(dev);
 94
 95	return devcd->read(buffer, offset, count, devcd->data, devcd->datalen);
 96}
 97
 98static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
 99				struct bin_attribute *bin_attr,
100				char *buffer, loff_t offset, size_t count)
101{
102	struct device *dev = kobj_to_dev(kobj);
103	struct devcd_entry *devcd = dev_to_devcd(dev);
104
105	mod_delayed_work(system_wq, &devcd->del_wk, 0);
 
 
 
 
 
106
107	return count;
108}
109
110static struct bin_attribute devcd_attr_data = {
111	.attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
112	.size = 0,
113	.read = devcd_data_read,
114	.write = devcd_data_write,
115};
116
117static struct bin_attribute *devcd_dev_bin_attrs[] = {
118	&devcd_attr_data, NULL,
119};
120
121static const struct attribute_group devcd_dev_group = {
122	.bin_attrs = devcd_dev_bin_attrs,
123};
124
125static const struct attribute_group *devcd_dev_groups[] = {
126	&devcd_dev_group, NULL,
127};
128
129static int devcd_free(struct device *dev, void *data)
130{
131	struct devcd_entry *devcd = dev_to_devcd(dev);
132
 
 
 
 
133	flush_delayed_work(&devcd->del_wk);
 
134	return 0;
135}
136
137static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
138			     char *buf)
139{
140	return sprintf(buf, "%d\n", devcd_disabled);
141}
142
143static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144			      const char *buf, size_t count)
145{
146	long tmp = simple_strtol(buf, NULL, 10);
147
148	/*
149	 * This essentially makes the attribute write-once, since you can't
150	 * go back to not having it disabled. This is intentional, it serves
151	 * as a system lockdown feature.
152	 */
153	if (tmp != 1)
154		return -EINVAL;
155
156	devcd_disabled = true;
157
158	class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
159
160	return count;
161}
 
162
163static struct class_attribute devcd_class_attrs[] = {
164	__ATTR_RW(disabled),
165	__ATTR_NULL
166};
 
167
168static struct class devcd_class = {
169	.name		= "devcoredump",
170	.owner		= THIS_MODULE,
171	.dev_release	= devcd_dev_release,
172	.dev_groups	= devcd_dev_groups,
173	.class_attrs	= devcd_class_attrs,
174};
175
176static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
177			   const void *data, size_t datalen)
178{
179	if (offset > datalen)
180		return -EINVAL;
181
182	if (offset + count > datalen)
183		count = datalen - offset;
184
185	if (count)
186		memcpy(buffer, ((u8 *)data) + offset, count);
187
188	return count;
189}
190
191/**
192 * dev_coredumpv - create device coredump with vmalloc data
193 * @dev: the struct device for the crashed device
194 * @data: vmalloc data containing the device coredump
195 * @datalen: length of the data
196 * @gfp: allocation flags
197 *
198 * This function takes ownership of the vmalloc'ed data and will free
199 * it when it is no longer used. See dev_coredumpm() for more information.
200 */
201void dev_coredumpv(struct device *dev, const void *data, size_t datalen,
202		   gfp_t gfp)
203{
204	dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, vfree);
205}
206EXPORT_SYMBOL_GPL(dev_coredumpv);
207
208static int devcd_match_failing(struct device *dev, const void *failing)
209{
210	struct devcd_entry *devcd = dev_to_devcd(dev);
211
212	return devcd->failing_dev == failing;
213}
214
215/**
216 * dev_coredumpm - create device coredump with read/free methods
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217 * @dev: the struct device for the crashed device
218 * @owner: the module that contains the read/free functions, use %THIS_MODULE
219 * @data: data cookie for the @read/@free functions
220 * @datalen: length of the data
221 * @gfp: allocation flags
222 * @read: function to read from the given buffer
223 * @free: function to free the given buffer
 
224 *
225 * Creates a new device coredump for the given device. If a previous one hasn't
226 * been read yet, the new coredump is discarded. The data lifetime is determined
227 * by the device coredump framework and when it is no longer needed the @free
228 * function will be called to free the data.
229 */
230void dev_coredumpm(struct device *dev, struct module *owner,
231		   const void *data, size_t datalen, gfp_t gfp,
232		   ssize_t (*read)(char *buffer, loff_t offset, size_t count,
233				   const void *data, size_t datalen),
234		   void (*free)(const void *data))
 
 
235{
236	static atomic_t devcd_count = ATOMIC_INIT(0);
237	struct devcd_entry *devcd;
238	struct device *existing;
239
240	if (devcd_disabled)
241		goto free;
242
243	existing = class_find_device(&devcd_class, NULL, dev,
244				     devcd_match_failing);
245	if (existing) {
246		put_device(existing);
247		goto free;
248	}
249
250	if (!try_module_get(owner))
251		goto free;
252
253	devcd = kzalloc(sizeof(*devcd), gfp);
254	if (!devcd)
255		goto put_module;
256
257	devcd->owner = owner;
258	devcd->data = data;
259	devcd->datalen = datalen;
260	devcd->read = read;
261	devcd->free = free;
262	devcd->failing_dev = get_device(dev);
 
263
 
264	device_initialize(&devcd->devcd_dev);
265
266	dev_set_name(&devcd->devcd_dev, "devcd%d",
267		     atomic_inc_return(&devcd_count));
268	devcd->devcd_dev.class = &devcd_class;
269
 
 
270	if (device_add(&devcd->devcd_dev))
271		goto put_device;
272
 
 
 
 
 
273	if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
274			      "failing_device"))
275		/* nothing - symlink will be missing */;
276
277	if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
278			      "devcoredump"))
279		/* nothing - symlink will be missing */;
280
 
 
281	INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
282	schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
283
284	return;
285 put_device:
286	put_device(&devcd->devcd_dev);
 
287 put_module:
288	module_put(owner);
289 free:
290	free(data);
291}
292EXPORT_SYMBOL_GPL(dev_coredumpm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
294static int __init devcoredump_init(void)
295{
296	return class_register(&devcd_class);
297}
298__initcall(devcoredump_init);
299
300static void __exit devcoredump_exit(void)
301{
302	class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
303	class_unregister(&devcd_class);
304}
305__exitcall(devcoredump_exit);