Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
  1/*
  2 * drivers/misc/logger.c
  3 *
  4 * A Logging Subsystem
  5 *
  6 * Copyright (C) 2007-2008 Google, Inc.
  7 *
  8 * Robert Love <rlove@google.com>
  9 *
 10 * This software is licensed under the terms of the GNU General Public
 11 * License version 2, as published by the Free Software Foundation, and
 12 * may be copied, distributed, and modified under those terms.
 13 *
 14 * This program is distributed in the hope that it will be useful,
 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 17 * GNU General Public License for more details.
 18 */
 19
 20#define pr_fmt(fmt) "logger: " fmt
 21
 22#include <linux/sched.h>
 23#include <linux/module.h>
 24#include <linux/fs.h>
 25#include <linux/miscdevice.h>
 26#include <linux/uaccess.h>
 27#include <linux/poll.h>
 28#include <linux/slab.h>
 29#include <linux/time.h>
 30#include <linux/vmalloc.h>
 31#include <linux/aio.h>
 32#include "logger.h"
 33
 34#include <asm/ioctls.h>
 35
 36/**
 37 * struct logger_log - represents a specific log, such as 'main' or 'radio'
 38 * @buffer:	The actual ring buffer
 39 * @misc:	The "misc" device representing the log
 40 * @wq:		The wait queue for @readers
 41 * @readers:	This log's readers
 42 * @mutex:	The mutex that protects the @buffer
 43 * @w_off:	The current write head offset
 44 * @head:	The head, or location that readers start reading at.
 45 * @size:	The size of the log
 46 * @logs:	The list of log channels
 47 *
 48 * This structure lives from module insertion until module removal, so it does
 49 * not need additional reference counting. The structure is protected by the
 50 * mutex 'mutex'.
 51 */
 52struct logger_log {
 53	unsigned char		*buffer;
 54	struct miscdevice	misc;
 55	wait_queue_head_t	wq;
 56	struct list_head	readers;
 57	struct mutex		mutex;
 58	size_t			w_off;
 59	size_t			head;
 60	size_t			size;
 61	struct list_head	logs;
 62};
 63
 64static LIST_HEAD(log_list);
 65
 66
 67/**
 68 * struct logger_reader - a logging device open for reading
 69 * @log:	The associated log
 70 * @list:	The associated entry in @logger_log's list
 71 * @r_off:	The current read head offset.
 72 * @r_all:	Reader can read all entries
 73 * @r_ver:	Reader ABI version
 74 *
 75 * This object lives from open to release, so we don't need additional
 76 * reference counting. The structure is protected by log->mutex.
 77 */
 78struct logger_reader {
 79	struct logger_log	*log;
 80	struct list_head	list;
 81	size_t			r_off;
 82	bool			r_all;
 83	int			r_ver;
 84};
 85
 86/* logger_offset - returns index 'n' into the log via (optimized) modulus */
 87static size_t logger_offset(struct logger_log *log, size_t n)
 88{
 89	return n & (log->size - 1);
 90}
 91
 92
 93/*
 94 * file_get_log - Given a file structure, return the associated log
 95 *
 96 * This isn't aesthetic. We have several goals:
 97 *
 98 *	1) Need to quickly obtain the associated log during an I/O operation
 99 *	2) Readers need to maintain state (logger_reader)
100 *	3) Writers need to be very fast (open() should be a near no-op)
101 *
102 * In the reader case, we can trivially go file->logger_reader->logger_log.
103 * For a writer, we don't want to maintain a logger_reader, so we just go
104 * file->logger_log. Thus what file->private_data points at depends on whether
105 * or not the file was opened for reading. This function hides that dirtiness.
106 */
107static inline struct logger_log *file_get_log(struct file *file)
108{
109	if (file->f_mode & FMODE_READ) {
110		struct logger_reader *reader = file->private_data;
111		return reader->log;
112	} else
113		return file->private_data;
114}
115
116/*
117 * get_entry_header - returns a pointer to the logger_entry header within
118 * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
119 * be provided. Typically the return value will be a pointer within
120 * 'logger->buf'.  However, a pointer to 'scratch' may be returned if
121 * the log entry spans the end and beginning of the circular buffer.
122 */
123static struct logger_entry *get_entry_header(struct logger_log *log,
124		size_t off, struct logger_entry *scratch)
125{
126	size_t len = min(sizeof(struct logger_entry), log->size - off);
127	if (len != sizeof(struct logger_entry)) {
128		memcpy(((void *) scratch), log->buffer + off, len);
129		memcpy(((void *) scratch) + len, log->buffer,
130			sizeof(struct logger_entry) - len);
131		return scratch;
132	}
133
134	return (struct logger_entry *) (log->buffer + off);
135}
136
137/*
138 * get_entry_msg_len - Grabs the length of the message of the entry
139 * starting from from 'off'.
140 *
141 * An entry length is 2 bytes (16 bits) in host endian order.
142 * In the log, the length does not include the size of the log entry structure.
143 * This function returns the size including the log entry structure.
144 *
145 * Caller needs to hold log->mutex.
146 */
147static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
148{
149	struct logger_entry scratch;
150	struct logger_entry *entry;
151
152	entry = get_entry_header(log, off, &scratch);
153	return entry->len;
154}
155
156static size_t get_user_hdr_len(int ver)
157{
158	if (ver < 2)
159		return sizeof(struct user_logger_entry_compat);
160	else
161		return sizeof(struct logger_entry);
162}
163
164static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
165					 char __user *buf)
166{
167	void *hdr;
168	size_t hdr_len;
169	struct user_logger_entry_compat v1;
170
171	if (ver < 2) {
172		v1.len      = entry->len;
173		v1.__pad    = 0;
174		v1.pid      = entry->pid;
175		v1.tid      = entry->tid;
176		v1.sec      = entry->sec;
177		v1.nsec     = entry->nsec;
178		hdr         = &v1;
179		hdr_len     = sizeof(struct user_logger_entry_compat);
180	} else {
181		hdr         = entry;
182		hdr_len     = sizeof(struct logger_entry);
183	}
184
185	return copy_to_user(buf, hdr, hdr_len);
186}
187
188/*
189 * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
190 * user-space buffer 'buf'. Returns 'count' on success.
191 *
192 * Caller must hold log->mutex.
193 */
194static ssize_t do_read_log_to_user(struct logger_log *log,
195				   struct logger_reader *reader,
196				   char __user *buf,
197				   size_t count)
198{
199	struct logger_entry scratch;
200	struct logger_entry *entry;
201	size_t len;
202	size_t msg_start;
203
204	/*
205	 * First, copy the header to userspace, using the version of
206	 * the header requested
207	 */
208	entry = get_entry_header(log, reader->r_off, &scratch);
209	if (copy_header_to_user(reader->r_ver, entry, buf))
210		return -EFAULT;
211
212	count -= get_user_hdr_len(reader->r_ver);
213	buf += get_user_hdr_len(reader->r_ver);
214	msg_start = logger_offset(log,
215		reader->r_off + sizeof(struct logger_entry));
216
217	/*
218	 * We read from the msg in two disjoint operations. First, we read from
219	 * the current msg head offset up to 'count' bytes or to the end of
220	 * the log, whichever comes first.
221	 */
222	len = min(count, log->size - msg_start);
223	if (copy_to_user(buf, log->buffer + msg_start, len))
224		return -EFAULT;
225
226	/*
227	 * Second, we read any remaining bytes, starting back at the head of
228	 * the log.
229	 */
230	if (count != len)
231		if (copy_to_user(buf + len, log->buffer, count - len))
232			return -EFAULT;
233
234	reader->r_off = logger_offset(log, reader->r_off +
235		sizeof(struct logger_entry) + count);
236
237	return count + get_user_hdr_len(reader->r_ver);
238}
239
240/*
241 * get_next_entry_by_uid - Starting at 'off', returns an offset into
242 * 'log->buffer' which contains the first entry readable by 'euid'
243 */
244static size_t get_next_entry_by_uid(struct logger_log *log,
245		size_t off, kuid_t euid)
246{
247	while (off != log->w_off) {
248		struct logger_entry *entry;
249		struct logger_entry scratch;
250		size_t next_len;
251
252		entry = get_entry_header(log, off, &scratch);
253
254		if (uid_eq(entry->euid, euid))
255			return off;
256
257		next_len = sizeof(struct logger_entry) + entry->len;
258		off = logger_offset(log, off + next_len);
259	}
260
261	return off;
262}
263
264/*
265 * logger_read - our log's read() method
266 *
267 * Behavior:
268 *
269 *	- O_NONBLOCK works
270 *	- If there are no log entries to read, blocks until log is written to
271 *	- Atomically reads exactly one log entry
272 *
273 * Will set errno to EINVAL if read
274 * buffer is insufficient to hold next entry.
275 */
276static ssize_t logger_read(struct file *file, char __user *buf,
277			   size_t count, loff_t *pos)
278{
279	struct logger_reader *reader = file->private_data;
280	struct logger_log *log = reader->log;
281	ssize_t ret;
282	DEFINE_WAIT(wait);
283
284start:
285	while (1) {
286		mutex_lock(&log->mutex);
287
288		prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
289
290		ret = (log->w_off == reader->r_off);
291		mutex_unlock(&log->mutex);
292		if (!ret)
293			break;
294
295		if (file->f_flags & O_NONBLOCK) {
296			ret = -EAGAIN;
297			break;
298		}
299
300		if (signal_pending(current)) {
301			ret = -EINTR;
302			break;
303		}
304
305		schedule();
306	}
307
308	finish_wait(&log->wq, &wait);
309	if (ret)
310		return ret;
311
312	mutex_lock(&log->mutex);
313
314	if (!reader->r_all)
315		reader->r_off = get_next_entry_by_uid(log,
316			reader->r_off, current_euid());
317
318	/* is there still something to read or did we race? */
319	if (unlikely(log->w_off == reader->r_off)) {
320		mutex_unlock(&log->mutex);
321		goto start;
322	}
323
324	/* get the size of the next entry */
325	ret = get_user_hdr_len(reader->r_ver) +
326		get_entry_msg_len(log, reader->r_off);
327	if (count < ret) {
328		ret = -EINVAL;
329		goto out;
330	}
331
332	/* get exactly one entry from the log */
333	ret = do_read_log_to_user(log, reader, buf, ret);
334
335out:
336	mutex_unlock(&log->mutex);
337
338	return ret;
339}
340
341/*
342 * get_next_entry - return the offset of the first valid entry at least 'len'
343 * bytes after 'off'.
344 *
345 * Caller must hold log->mutex.
346 */
347static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
348{
349	size_t count = 0;
350
351	do {
352		size_t nr = sizeof(struct logger_entry) +
353			get_entry_msg_len(log, off);
354		off = logger_offset(log, off + nr);
355		count += nr;
356	} while (count < len);
357
358	return off;
359}
360
361/*
362 * is_between - is a < c < b, accounting for wrapping of a, b, and c
363 *    positions in the buffer
364 *
365 * That is, if a<b, check for c between a and b
366 * and if a>b, check for c outside (not between) a and b
367 *
368 * |------- a xxxxxxxx b --------|
369 *               c^
370 *
371 * |xxxxx b --------- a xxxxxxxxx|
372 *    c^
373 *  or                    c^
374 */
375static inline int is_between(size_t a, size_t b, size_t c)
376{
377	if (a < b) {
378		/* is c between a and b? */
379		if (a < c && c <= b)
380			return 1;
381	} else {
382		/* is c outside of b through a? */
383		if (c <= b || a < c)
384			return 1;
385	}
386
387	return 0;
388}
389
390/*
391 * fix_up_readers - walk the list of all readers and "fix up" any who were
392 * lapped by the writer; also do the same for the default "start head".
393 * We do this by "pulling forward" the readers and start head to the first
394 * entry after the new write head.
395 *
396 * The caller needs to hold log->mutex.
397 */
398static void fix_up_readers(struct logger_log *log, size_t len)
399{
400	size_t old = log->w_off;
401	size_t new = logger_offset(log, old + len);
402	struct logger_reader *reader;
403
404	if (is_between(old, new, log->head))
405		log->head = get_next_entry(log, log->head, len);
406
407	list_for_each_entry(reader, &log->readers, list)
408		if (is_between(old, new, reader->r_off))
409			reader->r_off = get_next_entry(log, reader->r_off, len);
410}
411
412/*
413 * do_write_log - writes 'len' bytes from 'buf' to 'log'
414 *
415 * The caller needs to hold log->mutex.
416 */
417static void do_write_log(struct logger_log *log, const void *buf, size_t count)
418{
419	size_t len;
420
421	len = min(count, log->size - log->w_off);
422	memcpy(log->buffer + log->w_off, buf, len);
423
424	if (count != len)
425		memcpy(log->buffer, buf + len, count - len);
426
427	log->w_off = logger_offset(log, log->w_off + count);
428
429}
430
431/*
432 * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
433 * the log 'log'
434 *
435 * The caller needs to hold log->mutex.
436 *
437 * Returns 'count' on success, negative error code on failure.
438 */
439static ssize_t do_write_log_from_user(struct logger_log *log,
440				      const void __user *buf, size_t count)
441{
442	size_t len;
443
444	len = min(count, log->size - log->w_off);
445	if (len && copy_from_user(log->buffer + log->w_off, buf, len))
446		return -EFAULT;
447
448	if (count != len)
449		if (copy_from_user(log->buffer, buf + len, count - len))
450			/*
451			 * Note that by not updating w_off, this abandons the
452			 * portion of the new entry that *was* successfully
453			 * copied, just above.  This is intentional to avoid
454			 * message corruption from missing fragments.
455			 */
456			return -EFAULT;
457
458	log->w_off = logger_offset(log, log->w_off + count);
459
460	return count;
461}
462
463/*
464 * logger_aio_write - our write method, implementing support for write(),
465 * writev(), and aio_write(). Writes are our fast path, and we try to optimize
466 * them above all else.
467 */
468static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
469			 unsigned long nr_segs, loff_t ppos)
470{
471	struct logger_log *log = file_get_log(iocb->ki_filp);
472	size_t orig;
473	struct logger_entry header;
474	struct timespec now;
475	ssize_t ret = 0;
476
477	now = current_kernel_time();
478
479	header.pid = current->tgid;
480	header.tid = current->pid;
481	header.sec = now.tv_sec;
482	header.nsec = now.tv_nsec;
483	header.euid = current_euid();
484	header.len = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
485	header.hdr_size = sizeof(struct logger_entry);
486
487	/* null writes succeed, return zero */
488	if (unlikely(!header.len))
489		return 0;
490
491	mutex_lock(&log->mutex);
492
493	orig = log->w_off;
494
495	/*
496	 * Fix up any readers, pulling them forward to the first readable
497	 * entry after (what will be) the new write offset. We do this now
498	 * because if we partially fail, we can end up with clobbered log
499	 * entries that encroach on readable buffer.
500	 */
501	fix_up_readers(log, sizeof(struct logger_entry) + header.len);
502
503	do_write_log(log, &header, sizeof(struct logger_entry));
504
505	while (nr_segs-- > 0) {
506		size_t len;
507		ssize_t nr;
508
509		/* figure out how much of this vector we can keep */
510		len = min_t(size_t, iov->iov_len, header.len - ret);
511
512		/* write out this segment's payload */
513		nr = do_write_log_from_user(log, iov->iov_base, len);
514		if (unlikely(nr < 0)) {
515			log->w_off = orig;
516			mutex_unlock(&log->mutex);
517			return nr;
518		}
519
520		iov++;
521		ret += nr;
522	}
523
524	mutex_unlock(&log->mutex);
525
526	/* wake up any blocked readers */
527	wake_up_interruptible(&log->wq);
528
529	return ret;
530}
531
532static struct logger_log *get_log_from_minor(int minor)
533{
534	struct logger_log *log;
535
536	list_for_each_entry(log, &log_list, logs)
537		if (log->misc.minor == minor)
538			return log;
539	return NULL;
540}
541
542/*
543 * logger_open - the log's open() file operation
544 *
545 * Note how near a no-op this is in the write-only case. Keep it that way!
546 */
547static int logger_open(struct inode *inode, struct file *file)
548{
549	struct logger_log *log;
550	int ret;
551
552	ret = nonseekable_open(inode, file);
553	if (ret)
554		return ret;
555
556	log = get_log_from_minor(MINOR(inode->i_rdev));
557	if (!log)
558		return -ENODEV;
559
560	if (file->f_mode & FMODE_READ) {
561		struct logger_reader *reader;
562
563		reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
564		if (!reader)
565			return -ENOMEM;
566
567		reader->log = log;
568		reader->r_ver = 1;
569		reader->r_all = in_egroup_p(inode->i_gid) ||
570			capable(CAP_SYSLOG);
571
572		INIT_LIST_HEAD(&reader->list);
573
574		mutex_lock(&log->mutex);
575		reader->r_off = log->head;
576		list_add_tail(&reader->list, &log->readers);
577		mutex_unlock(&log->mutex);
578
579		file->private_data = reader;
580	} else
581		file->private_data = log;
582
583	return 0;
584}
585
586/*
587 * logger_release - the log's release file operation
588 *
589 * Note this is a total no-op in the write-only case. Keep it that way!
590 */
591static int logger_release(struct inode *ignored, struct file *file)
592{
593	if (file->f_mode & FMODE_READ) {
594		struct logger_reader *reader = file->private_data;
595		struct logger_log *log = reader->log;
596
597		mutex_lock(&log->mutex);
598		list_del(&reader->list);
599		mutex_unlock(&log->mutex);
600
601		kfree(reader);
602	}
603
604	return 0;
605}
606
607/*
608 * logger_poll - the log's poll file operation, for poll/select/epoll
609 *
610 * Note we always return POLLOUT, because you can always write() to the log.
611 * Note also that, strictly speaking, a return value of POLLIN does not
612 * guarantee that the log is readable without blocking, as there is a small
613 * chance that the writer can lap the reader in the interim between poll()
614 * returning and the read() request.
615 */
616static unsigned int logger_poll(struct file *file, poll_table *wait)
617{
618	struct logger_reader *reader;
619	struct logger_log *log;
620	unsigned int ret = POLLOUT | POLLWRNORM;
621
622	if (!(file->f_mode & FMODE_READ))
623		return ret;
624
625	reader = file->private_data;
626	log = reader->log;
627
628	poll_wait(file, &log->wq, wait);
629
630	mutex_lock(&log->mutex);
631	if (!reader->r_all)
632		reader->r_off = get_next_entry_by_uid(log,
633			reader->r_off, current_euid());
634
635	if (log->w_off != reader->r_off)
636		ret |= POLLIN | POLLRDNORM;
637	mutex_unlock(&log->mutex);
638
639	return ret;
640}
641
642static long logger_set_version(struct logger_reader *reader, void __user *arg)
643{
644	int version;
645	if (copy_from_user(&version, arg, sizeof(int)))
646		return -EFAULT;
647
648	if ((version < 1) || (version > 2))
649		return -EINVAL;
650
651	reader->r_ver = version;
652	return 0;
653}
654
655static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
656{
657	struct logger_log *log = file_get_log(file);
658	struct logger_reader *reader;
659	long ret = -EINVAL;
660	void __user *argp = (void __user *) arg;
661
662	mutex_lock(&log->mutex);
663
664	switch (cmd) {
665	case LOGGER_GET_LOG_BUF_SIZE:
666		ret = log->size;
667		break;
668	case LOGGER_GET_LOG_LEN:
669		if (!(file->f_mode & FMODE_READ)) {
670			ret = -EBADF;
671			break;
672		}
673		reader = file->private_data;
674		if (log->w_off >= reader->r_off)
675			ret = log->w_off - reader->r_off;
676		else
677			ret = (log->size - reader->r_off) + log->w_off;
678		break;
679	case LOGGER_GET_NEXT_ENTRY_LEN:
680		if (!(file->f_mode & FMODE_READ)) {
681			ret = -EBADF;
682			break;
683		}
684		reader = file->private_data;
685
686		if (!reader->r_all)
687			reader->r_off = get_next_entry_by_uid(log,
688				reader->r_off, current_euid());
689
690		if (log->w_off != reader->r_off)
691			ret = get_user_hdr_len(reader->r_ver) +
692				get_entry_msg_len(log, reader->r_off);
693		else
694			ret = 0;
695		break;
696	case LOGGER_FLUSH_LOG:
697		if (!(file->f_mode & FMODE_WRITE)) {
698			ret = -EBADF;
699			break;
700		}
701		if (!(in_egroup_p(file_inode(file)->i_gid) ||
702				capable(CAP_SYSLOG))) {
703			ret = -EPERM;
704			break;
705		}
706		list_for_each_entry(reader, &log->readers, list)
707			reader->r_off = log->w_off;
708		log->head = log->w_off;
709		ret = 0;
710		break;
711	case LOGGER_GET_VERSION:
712		if (!(file->f_mode & FMODE_READ)) {
713			ret = -EBADF;
714			break;
715		}
716		reader = file->private_data;
717		ret = reader->r_ver;
718		break;
719	case LOGGER_SET_VERSION:
720		if (!(file->f_mode & FMODE_READ)) {
721			ret = -EBADF;
722			break;
723		}
724		reader = file->private_data;
725		ret = logger_set_version(reader, argp);
726		break;
727	}
728
729	mutex_unlock(&log->mutex);
730
731	return ret;
732}
733
734static const struct file_operations logger_fops = {
735	.owner = THIS_MODULE,
736	.read = logger_read,
737	.aio_write = logger_aio_write,
738	.poll = logger_poll,
739	.unlocked_ioctl = logger_ioctl,
740	.compat_ioctl = logger_ioctl,
741	.open = logger_open,
742	.release = logger_release,
743};
744
745/*
746 * Log size must must be a power of two, and greater than
747 * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
748 */
749static int __init create_log(char *log_name, int size)
750{
751	int ret = 0;
752	struct logger_log *log;
753	unsigned char *buffer;
754
755	buffer = vmalloc(size);
756	if (buffer == NULL)
757		return -ENOMEM;
758
759	log = kzalloc(sizeof(struct logger_log), GFP_KERNEL);
760	if (log == NULL) {
761		ret = -ENOMEM;
762		goto out_free_buffer;
763	}
764	log->buffer = buffer;
765
766	log->misc.minor = MISC_DYNAMIC_MINOR;
767	log->misc.name = kstrdup(log_name, GFP_KERNEL);
768	if (log->misc.name == NULL) {
769		ret = -ENOMEM;
770		goto out_free_log;
771	}
772
773	log->misc.fops = &logger_fops;
774	log->misc.parent = NULL;
775
776	init_waitqueue_head(&log->wq);
777	INIT_LIST_HEAD(&log->readers);
778	mutex_init(&log->mutex);
779	log->w_off = 0;
780	log->head = 0;
781	log->size = size;
782
783	INIT_LIST_HEAD(&log->logs);
784	list_add_tail(&log->logs, &log_list);
785
786	/* finally, initialize the misc device for this log */
787	ret = misc_register(&log->misc);
788	if (unlikely(ret)) {
789		pr_err("failed to register misc device for log '%s'!\n",
790				log->misc.name);
791		goto out_free_log;
792	}
793
794	pr_info("created %luK log '%s'\n",
795		(unsigned long) log->size >> 10, log->misc.name);
796
797	return 0;
798
799out_free_log:
800	kfree(log);
801
802out_free_buffer:
803	vfree(buffer);
804	return ret;
805}
806
807static int __init logger_init(void)
808{
809	int ret;
810
811	ret = create_log(LOGGER_LOG_MAIN, 256*1024);
812	if (unlikely(ret))
813		goto out;
814
815	ret = create_log(LOGGER_LOG_EVENTS, 256*1024);
816	if (unlikely(ret))
817		goto out;
818
819	ret = create_log(LOGGER_LOG_RADIO, 256*1024);
820	if (unlikely(ret))
821		goto out;
822
823	ret = create_log(LOGGER_LOG_SYSTEM, 256*1024);
824	if (unlikely(ret))
825		goto out;
826
827out:
828	return ret;
829}
830
831static void __exit logger_exit(void)
832{
833	struct logger_log *current_log, *next_log;
834
835	list_for_each_entry_safe(current_log, next_log, &log_list, logs) {
836		/* we have to delete all the entry inside log_list */
837		misc_deregister(&current_log->misc);
838		vfree(current_log->buffer);
839		kfree(current_log->misc.name);
840		list_del(&current_log->logs);
841		kfree(current_log);
842	}
843}
844
845
846device_initcall(logger_init);
847module_exit(logger_exit);
848
849MODULE_LICENSE("GPL");
850MODULE_AUTHOR("Robert Love, <rlove@google.com>");
851MODULE_DESCRIPTION("Android Logger");