Linux Audio

Check our new training course

Loading...
  1/* mm/ashmem.c
  2**
  3** Anonymous Shared Memory Subsystem, ashmem
  4**
  5** Copyright (C) 2008 Google, Inc.
  6**
  7** Robert Love <rlove@google.com>
  8**
  9** This software is licensed under the terms of the GNU General Public
 10** License version 2, as published by the Free Software Foundation, and
 11** may be copied, distributed, and modified under those terms.
 12**
 13** This program is distributed in the hope that it will be useful,
 14** but WITHOUT ANY WARRANTY; without even the implied warranty of
 15** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16** GNU General Public License for more details.
 17*/
 18
 19#include <linux/module.h>
 20#include <linux/file.h>
 21#include <linux/fs.h>
 22#include <linux/falloc.h>
 23#include <linux/miscdevice.h>
 24#include <linux/security.h>
 25#include <linux/mm.h>
 26#include <linux/mman.h>
 27#include <linux/uaccess.h>
 28#include <linux/personality.h>
 29#include <linux/bitops.h>
 30#include <linux/mutex.h>
 31#include <linux/shmem_fs.h>
 32#include "ashmem.h"
 33
 34#define ASHMEM_NAME_PREFIX "dev/ashmem/"
 35#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
 36#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
 37
 38/*
 39 * ashmem_area - anonymous shared memory area
 40 * Lifecycle: From our parent file's open() until its release()
 41 * Locking: Protected by `ashmem_mutex'
 42 * Big Note: Mappings do NOT pin this structure; it dies on close()
 43 */
 44struct ashmem_area {
 45	char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
 46	struct list_head unpinned_list;	 /* list of all ashmem areas */
 47	struct file *file;		 /* the shmem-based backing file */
 48	size_t size;			 /* size of the mapping, in bytes */
 49	unsigned long prot_mask;	 /* allowed prot bits, as vm_flags */
 50};
 51
 52/*
 53 * ashmem_range - represents an interval of unpinned (evictable) pages
 54 * Lifecycle: From unpin to pin
 55 * Locking: Protected by `ashmem_mutex'
 56 */
 57struct ashmem_range {
 58	struct list_head lru;		/* entry in LRU list */
 59	struct list_head unpinned;	/* entry in its area's unpinned list */
 60	struct ashmem_area *asma;	/* associated area */
 61	size_t pgstart;			/* starting page, inclusive */
 62	size_t pgend;			/* ending page, inclusive */
 63	unsigned int purged;		/* ASHMEM_NOT or ASHMEM_WAS_PURGED */
 64};
 65
 66/* LRU list of unpinned pages, protected by ashmem_mutex */
 67static LIST_HEAD(ashmem_lru_list);
 68
 69/* Count of pages on our LRU list, protected by ashmem_mutex */
 70static unsigned long lru_count;
 71
 72/*
 73 * ashmem_mutex - protects the list of and each individual ashmem_area
 74 *
 75 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
 76 */
 77static DEFINE_MUTEX(ashmem_mutex);
 78
 79static struct kmem_cache *ashmem_area_cachep __read_mostly;
 80static struct kmem_cache *ashmem_range_cachep __read_mostly;
 81
 82#define range_size(range) \
 83	((range)->pgend - (range)->pgstart + 1)
 84
 85#define range_on_lru(range) \
 86	((range)->purged == ASHMEM_NOT_PURGED)
 87
 88#define page_range_subsumes_range(range, start, end) \
 89	(((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
 90
 91#define page_range_subsumed_by_range(range, start, end) \
 92	(((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
 93
 94#define page_in_range(range, page) \
 95	(((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
 96
 97#define page_range_in_range(range, start, end) \
 98	(page_in_range(range, start) || page_in_range(range, end) || \
 99		page_range_subsumes_range(range, start, end))
100
101#define range_before_page(range, page) \
102	((range)->pgend < (page))
103
104#define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE)
105
106static inline void lru_add(struct ashmem_range *range)
107{
108	list_add_tail(&range->lru, &ashmem_lru_list);
109	lru_count += range_size(range);
110}
111
112static inline void lru_del(struct ashmem_range *range)
113{
114	list_del(&range->lru);
115	lru_count -= range_size(range);
116}
117
118/*
119 * range_alloc - allocate and initialize a new ashmem_range structure
120 *
121 * 'asma' - associated ashmem_area
122 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
123 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
124 * 'start' - starting page, inclusive
125 * 'end' - ending page, inclusive
126 *
127 * Caller must hold ashmem_mutex.
128 */
129static int range_alloc(struct ashmem_area *asma,
130		       struct ashmem_range *prev_range, unsigned int purged,
131		       size_t start, size_t end)
132{
133	struct ashmem_range *range;
134
135	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
136	if (unlikely(!range))
137		return -ENOMEM;
138
139	range->asma = asma;
140	range->pgstart = start;
141	range->pgend = end;
142	range->purged = purged;
143
144	list_add_tail(&range->unpinned, &prev_range->unpinned);
145
146	if (range_on_lru(range))
147		lru_add(range);
148
149	return 0;
150}
151
152static void range_del(struct ashmem_range *range)
153{
154	list_del(&range->unpinned);
155	if (range_on_lru(range))
156		lru_del(range);
157	kmem_cache_free(ashmem_range_cachep, range);
158}
159
160/*
161 * range_shrink - shrinks a range
162 *
163 * Caller must hold ashmem_mutex.
164 */
165static inline void range_shrink(struct ashmem_range *range,
166				size_t start, size_t end)
167{
168	size_t pre = range_size(range);
169
170	range->pgstart = start;
171	range->pgend = end;
172
173	if (range_on_lru(range))
174		lru_count -= pre - range_size(range);
175}
176
177static int ashmem_open(struct inode *inode, struct file *file)
178{
179	struct ashmem_area *asma;
180	int ret;
181
182	ret = generic_file_open(inode, file);
183	if (unlikely(ret))
184		return ret;
185
186	asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
187	if (unlikely(!asma))
188		return -ENOMEM;
189
190	INIT_LIST_HEAD(&asma->unpinned_list);
191	memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
192	asma->prot_mask = PROT_MASK;
193	file->private_data = asma;
194
195	return 0;
196}
197
198static int ashmem_release(struct inode *ignored, struct file *file)
199{
200	struct ashmem_area *asma = file->private_data;
201	struct ashmem_range *range, *next;
202
203	mutex_lock(&ashmem_mutex);
204	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
205		range_del(range);
206	mutex_unlock(&ashmem_mutex);
207
208	if (asma->file)
209		fput(asma->file);
210	kmem_cache_free(ashmem_area_cachep, asma);
211
212	return 0;
213}
214
215static ssize_t ashmem_read(struct file *file, char __user *buf,
216			   size_t len, loff_t *pos)
217{
218	struct ashmem_area *asma = file->private_data;
219	int ret = 0;
220
221	mutex_lock(&ashmem_mutex);
222
223	/* If size is not set, or set to 0, always return EOF. */
224	if (asma->size == 0)
225		goto out;
226
227	if (!asma->file) {
228		ret = -EBADF;
229		goto out;
230	}
231
232	ret = asma->file->f_op->read(asma->file, buf, len, pos);
233	if (ret < 0)
234		goto out;
235
236	/** Update backing file pos, since f_ops->read() doesn't */
237	asma->file->f_pos = *pos;
238
239out:
240	mutex_unlock(&ashmem_mutex);
241	return ret;
242}
243
244static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
245{
246	struct ashmem_area *asma = file->private_data;
247	int ret;
248
249	mutex_lock(&ashmem_mutex);
250
251	if (asma->size == 0) {
252		ret = -EINVAL;
253		goto out;
254	}
255
256	if (!asma->file) {
257		ret = -EBADF;
258		goto out;
259	}
260
261	ret = asma->file->f_op->llseek(asma->file, offset, origin);
262	if (ret < 0)
263		goto out;
264
265	/** Copy f_pos from backing file, since f_ops->llseek() sets it */
266	file->f_pos = asma->file->f_pos;
267
268out:
269	mutex_unlock(&ashmem_mutex);
270	return ret;
271}
272
273static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
274{
275	return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
276	       _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
277	       _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
278}
279
280static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
281{
282	struct ashmem_area *asma = file->private_data;
283	int ret = 0;
284
285	mutex_lock(&ashmem_mutex);
286
287	/* user needs to SET_SIZE before mapping */
288	if (unlikely(!asma->size)) {
289		ret = -EINVAL;
290		goto out;
291	}
292
293	/* requested protection bits must match our allowed protection mask */
294	if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
295		     calc_vm_prot_bits(PROT_MASK))) {
296		ret = -EPERM;
297		goto out;
298	}
299	vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
300
301	if (!asma->file) {
302		char *name = ASHMEM_NAME_DEF;
303		struct file *vmfile;
304
305		if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
306			name = asma->name;
307
308		/* ... and allocate the backing shmem file */
309		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
310		if (unlikely(IS_ERR(vmfile))) {
311			ret = PTR_ERR(vmfile);
312			goto out;
313		}
314		asma->file = vmfile;
315	}
316	get_file(asma->file);
317
318	/*
319	 * XXX - Reworked to use shmem_zero_setup() instead of
320	 * shmem_set_file while we're in staging. -jstultz
321	 */
322	if (vma->vm_flags & VM_SHARED) {
323		ret = shmem_zero_setup(vma);
324		if (ret) {
325			fput(asma->file);
326			goto out;
327		}
328	}
329
330	if (vma->vm_file)
331		fput(vma->vm_file);
332	vma->vm_file = asma->file;
333	vma->vm_flags |= VM_CAN_NONLINEAR;
334
335out:
336	mutex_unlock(&ashmem_mutex);
337	return ret;
338}
339
340/*
341 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
342 *
343 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
344 * many objects (pages) we have in total.
345 *
346 * 'gfp_mask' is the mask of the allocation that got us into this mess.
347 *
348 * Return value is the number of objects (pages) remaining, or -1 if we cannot
349 * proceed without risk of deadlock (due to gfp_mask).
350 *
351 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
352 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
353 * pages freed.
354 */
355static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
356{
357	struct ashmem_range *range, *next;
358
359	/* We might recurse into filesystem code, so bail out if necessary */
360	if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
361		return -1;
362	if (!sc->nr_to_scan)
363		return lru_count;
364
365	mutex_lock(&ashmem_mutex);
366	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
367		loff_t start = range->pgstart * PAGE_SIZE;
368		loff_t end = (range->pgend + 1) * PAGE_SIZE;
369
370		do_fallocate(range->asma->file,
371				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
372				start, end - start);
373		range->purged = ASHMEM_WAS_PURGED;
374		lru_del(range);
375
376		sc->nr_to_scan -= range_size(range);
377		if (sc->nr_to_scan <= 0)
378			break;
379	}
380	mutex_unlock(&ashmem_mutex);
381
382	return lru_count;
383}
384
385static struct shrinker ashmem_shrinker = {
386	.shrink = ashmem_shrink,
387	.seeks = DEFAULT_SEEKS * 4,
388};
389
390static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
391{
392	int ret = 0;
393
394	mutex_lock(&ashmem_mutex);
395
396	/* the user can only remove, not add, protection bits */
397	if (unlikely((asma->prot_mask & prot) != prot)) {
398		ret = -EINVAL;
399		goto out;
400	}
401
402	/* does the application expect PROT_READ to imply PROT_EXEC? */
403	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
404		prot |= PROT_EXEC;
405
406	asma->prot_mask = prot;
407
408out:
409	mutex_unlock(&ashmem_mutex);
410	return ret;
411}
412
413static int set_name(struct ashmem_area *asma, void __user *name)
414{
415	int ret = 0;
416
417	mutex_lock(&ashmem_mutex);
418
419	/* cannot change an existing mapping's name */
420	if (unlikely(asma->file)) {
421		ret = -EINVAL;
422		goto out;
423	}
424
425	if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
426				    name, ASHMEM_NAME_LEN)))
427		ret = -EFAULT;
428	asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
429
430out:
431	mutex_unlock(&ashmem_mutex);
432
433	return ret;
434}
435
436static int get_name(struct ashmem_area *asma, void __user *name)
437{
438	int ret = 0;
439
440	mutex_lock(&ashmem_mutex);
441	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
442		size_t len;
443
444		/*
445		 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
446		 * prevents us from revealing one user's stack to another.
447		 */
448		len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
449		if (unlikely(copy_to_user(name,
450				asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
451			ret = -EFAULT;
452	} else {
453		if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
454					  sizeof(ASHMEM_NAME_DEF))))
455			ret = -EFAULT;
456	}
457	mutex_unlock(&ashmem_mutex);
458
459	return ret;
460}
461
462/*
463 * ashmem_pin - pin the given ashmem region, returning whether it was
464 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
465 *
466 * Caller must hold ashmem_mutex.
467 */
468static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
469{
470	struct ashmem_range *range, *next;
471	int ret = ASHMEM_NOT_PURGED;
472
473	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
474		/* moved past last applicable page; we can short circuit */
475		if (range_before_page(range, pgstart))
476			break;
477
478		/*
479		 * The user can ask us to pin pages that span multiple ranges,
480		 * or to pin pages that aren't even unpinned, so this is messy.
481		 *
482		 * Four cases:
483		 * 1. The requested range subsumes an existing range, so we
484		 *    just remove the entire matching range.
485		 * 2. The requested range overlaps the start of an existing
486		 *    range, so we just update that range.
487		 * 3. The requested range overlaps the end of an existing
488		 *    range, so we just update that range.
489		 * 4. The requested range punches a hole in an existing range,
490		 *    so we have to update one side of the range and then
491		 *    create a new range for the other side.
492		 */
493		if (page_range_in_range(range, pgstart, pgend)) {
494			ret |= range->purged;
495
496			/* Case #1: Easy. Just nuke the whole thing. */
497			if (page_range_subsumes_range(range, pgstart, pgend)) {
498				range_del(range);
499				continue;
500			}
501
502			/* Case #2: We overlap from the start, so adjust it */
503			if (range->pgstart >= pgstart) {
504				range_shrink(range, pgend + 1, range->pgend);
505				continue;
506			}
507
508			/* Case #3: We overlap from the rear, so adjust it */
509			if (range->pgend <= pgend) {
510				range_shrink(range, range->pgstart, pgstart-1);
511				continue;
512			}
513
514			/*
515			 * Case #4: We eat a chunk out of the middle. A bit
516			 * more complicated, we allocate a new range for the
517			 * second half and adjust the first chunk's endpoint.
518			 */
519			range_alloc(asma, range, range->purged,
520				    pgend + 1, range->pgend);
521			range_shrink(range, range->pgstart, pgstart - 1);
522			break;
523		}
524	}
525
526	return ret;
527}
528
529/*
530 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
531 *
532 * Caller must hold ashmem_mutex.
533 */
534static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
535{
536	struct ashmem_range *range, *next;
537	unsigned int purged = ASHMEM_NOT_PURGED;
538
539restart:
540	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
541		/* short circuit: this is our insertion point */
542		if (range_before_page(range, pgstart))
543			break;
544
545		/*
546		 * The user can ask us to unpin pages that are already entirely
547		 * or partially pinned. We handle those two cases here.
548		 */
549		if (page_range_subsumed_by_range(range, pgstart, pgend))
550			return 0;
551		if (page_range_in_range(range, pgstart, pgend)) {
552			pgstart = min_t(size_t, range->pgstart, pgstart),
553			pgend = max_t(size_t, range->pgend, pgend);
554			purged |= range->purged;
555			range_del(range);
556			goto restart;
557		}
558	}
559
560	return range_alloc(asma, range, purged, pgstart, pgend);
561}
562
563/*
564 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
565 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
566 *
567 * Caller must hold ashmem_mutex.
568 */
569static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
570				 size_t pgend)
571{
572	struct ashmem_range *range;
573	int ret = ASHMEM_IS_PINNED;
574
575	list_for_each_entry(range, &asma->unpinned_list, unpinned) {
576		if (range_before_page(range, pgstart))
577			break;
578		if (page_range_in_range(range, pgstart, pgend)) {
579			ret = ASHMEM_IS_UNPINNED;
580			break;
581		}
582	}
583
584	return ret;
585}
586
587static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
588			    void __user *p)
589{
590	struct ashmem_pin pin;
591	size_t pgstart, pgend;
592	int ret = -EINVAL;
593
594	if (unlikely(!asma->file))
595		return -EINVAL;
596
597	if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
598		return -EFAULT;
599
600	/* per custom, you can pass zero for len to mean "everything onward" */
601	if (!pin.len)
602		pin.len = PAGE_ALIGN(asma->size) - pin.offset;
603
604	if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
605		return -EINVAL;
606
607	if (unlikely(((__u32) -1) - pin.offset < pin.len))
608		return -EINVAL;
609
610	if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
611		return -EINVAL;
612
613	pgstart = pin.offset / PAGE_SIZE;
614	pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
615
616	mutex_lock(&ashmem_mutex);
617
618	switch (cmd) {
619	case ASHMEM_PIN:
620		ret = ashmem_pin(asma, pgstart, pgend);
621		break;
622	case ASHMEM_UNPIN:
623		ret = ashmem_unpin(asma, pgstart, pgend);
624		break;
625	case ASHMEM_GET_PIN_STATUS:
626		ret = ashmem_get_pin_status(asma, pgstart, pgend);
627		break;
628	}
629
630	mutex_unlock(&ashmem_mutex);
631
632	return ret;
633}
634
635static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
636{
637	struct ashmem_area *asma = file->private_data;
638	long ret = -ENOTTY;
639
640	switch (cmd) {
641	case ASHMEM_SET_NAME:
642		ret = set_name(asma, (void __user *) arg);
643		break;
644	case ASHMEM_GET_NAME:
645		ret = get_name(asma, (void __user *) arg);
646		break;
647	case ASHMEM_SET_SIZE:
648		ret = -EINVAL;
649		if (!asma->file) {
650			ret = 0;
651			asma->size = (size_t) arg;
652		}
653		break;
654	case ASHMEM_GET_SIZE:
655		ret = asma->size;
656		break;
657	case ASHMEM_SET_PROT_MASK:
658		ret = set_prot_mask(asma, arg);
659		break;
660	case ASHMEM_GET_PROT_MASK:
661		ret = asma->prot_mask;
662		break;
663	case ASHMEM_PIN:
664	case ASHMEM_UNPIN:
665	case ASHMEM_GET_PIN_STATUS:
666		ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
667		break;
668	case ASHMEM_PURGE_ALL_CACHES:
669		ret = -EPERM;
670		if (capable(CAP_SYS_ADMIN)) {
671			struct shrink_control sc = {
672				.gfp_mask = GFP_KERNEL,
673				.nr_to_scan = 0,
674			};
675			ret = ashmem_shrink(&ashmem_shrinker, &sc);
676			sc.nr_to_scan = ret;
677			ashmem_shrink(&ashmem_shrinker, &sc);
678		}
679		break;
680	}
681
682	return ret;
683}
684
685static const struct file_operations ashmem_fops = {
686	.owner = THIS_MODULE,
687	.open = ashmem_open,
688	.release = ashmem_release,
689	.read = ashmem_read,
690	.llseek = ashmem_llseek,
691	.mmap = ashmem_mmap,
692	.unlocked_ioctl = ashmem_ioctl,
693	.compat_ioctl = ashmem_ioctl,
694};
695
696static struct miscdevice ashmem_misc = {
697	.minor = MISC_DYNAMIC_MINOR,
698	.name = "ashmem",
699	.fops = &ashmem_fops,
700};
701
702static int __init ashmem_init(void)
703{
704	int ret;
705
706	ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
707					  sizeof(struct ashmem_area),
708					  0, 0, NULL);
709	if (unlikely(!ashmem_area_cachep)) {
710		printk(KERN_ERR "ashmem: failed to create slab cache\n");
711		return -ENOMEM;
712	}
713
714	ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
715					  sizeof(struct ashmem_range),
716					  0, 0, NULL);
717	if (unlikely(!ashmem_range_cachep)) {
718		printk(KERN_ERR "ashmem: failed to create slab cache\n");
719		return -ENOMEM;
720	}
721
722	ret = misc_register(&ashmem_misc);
723	if (unlikely(ret)) {
724		printk(KERN_ERR "ashmem: failed to register misc device!\n");
725		return ret;
726	}
727
728	register_shrinker(&ashmem_shrinker);
729
730	printk(KERN_INFO "ashmem: initialized\n");
731
732	return 0;
733}
734
735static void __exit ashmem_exit(void)
736{
737	int ret;
738
739	unregister_shrinker(&ashmem_shrinker);
740
741	ret = misc_deregister(&ashmem_misc);
742	if (unlikely(ret))
743		printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
744
745	kmem_cache_destroy(ashmem_range_cachep);
746	kmem_cache_destroy(ashmem_area_cachep);
747
748	printk(KERN_INFO "ashmem: unloaded\n");
749}
750
751module_init(ashmem_init);
752module_exit(ashmem_exit);
753
754MODULE_LICENSE("GPL");