Linux Audio

Check our new training course

Loading...
  1/*
  2 * proc/fs/generic.c --- generic routines for the proc-fs
  3 *
  4 * This file contains generic proc-fs routines for handling
  5 * directories and files.
  6 * 
  7 * Copyright (C) 1991, 1992 Linus Torvalds.
  8 * Copyright (C) 1997 Theodore Ts'o
  9 */
 10
 11#include <linux/errno.h>
 12#include <linux/time.h>
 13#include <linux/proc_fs.h>
 14#include <linux/stat.h>
 15#include <linux/mm.h>
 16#include <linux/module.h>
 17#include <linux/slab.h>
 18#include <linux/mount.h>
 19#include <linux/init.h>
 20#include <linux/idr.h>
 21#include <linux/namei.h>
 22#include <linux/bitops.h>
 23#include <linux/spinlock.h>
 24#include <linux/completion.h>
 25#include <asm/uaccess.h>
 26
 27#include "internal.h"
 28
 29DEFINE_SPINLOCK(proc_subdir_lock);
 30
 31static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de)
 32{
 33	if (de->namelen != len)
 34		return 0;
 35	return !memcmp(name, de->name, len);
 36}
 37
 38/* buffer size is one page but our output routines use some slack for overruns */
 39#define PROC_BLOCK_SIZE	(PAGE_SIZE - 1024)
 40
 41static ssize_t
 42__proc_file_read(struct file *file, char __user *buf, size_t nbytes,
 43	       loff_t *ppos)
 44{
 45	struct inode * inode = file->f_path.dentry->d_inode;
 46	char 	*page;
 47	ssize_t	retval=0;
 48	int	eof=0;
 49	ssize_t	n, count;
 50	char	*start;
 51	struct proc_dir_entry * dp;
 52	unsigned long long pos;
 53
 54	/*
 55	 * Gaah, please just use "seq_file" instead. The legacy /proc
 56	 * interfaces cut loff_t down to off_t for reads, and ignore
 57	 * the offset entirely for writes..
 58	 */
 59	pos = *ppos;
 60	if (pos > MAX_NON_LFS)
 61		return 0;
 62	if (nbytes > MAX_NON_LFS - pos)
 63		nbytes = MAX_NON_LFS - pos;
 64
 65	dp = PDE(inode);
 66	if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
 67		return -ENOMEM;
 68
 69	while ((nbytes > 0) && !eof) {
 70		count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
 71
 72		start = NULL;
 73		if (dp->read_proc) {
 74			/*
 75			 * How to be a proc read function
 76			 * ------------------------------
 77			 * Prototype:
 78			 *    int f(char *buffer, char **start, off_t offset,
 79			 *          int count, int *peof, void *dat)
 80			 *
 81			 * Assume that the buffer is "count" bytes in size.
 82			 *
 83			 * If you know you have supplied all the data you
 84			 * have, set *peof.
 85			 *
 86			 * You have three ways to return data:
 87			 * 0) Leave *start = NULL.  (This is the default.)
 88			 *    Put the data of the requested offset at that
 89			 *    offset within the buffer.  Return the number (n)
 90			 *    of bytes there are from the beginning of the
 91			 *    buffer up to the last byte of data.  If the
 92			 *    number of supplied bytes (= n - offset) is 
 93			 *    greater than zero and you didn't signal eof
 94			 *    and the reader is prepared to take more data
 95			 *    you will be called again with the requested
 96			 *    offset advanced by the number of bytes 
 97			 *    absorbed.  This interface is useful for files
 98			 *    no larger than the buffer.
 99			 * 1) Set *start = an unsigned long value less than
100			 *    the buffer address but greater than zero.
101			 *    Put the data of the requested offset at the
102			 *    beginning of the buffer.  Return the number of
103			 *    bytes of data placed there.  If this number is
104			 *    greater than zero and you didn't signal eof
105			 *    and the reader is prepared to take more data
106			 *    you will be called again with the requested
107			 *    offset advanced by *start.  This interface is
108			 *    useful when you have a large file consisting
109			 *    of a series of blocks which you want to count
110			 *    and return as wholes.
111			 *    (Hack by Paul.Russell@rustcorp.com.au)
112			 * 2) Set *start = an address within the buffer.
113			 *    Put the data of the requested offset at *start.
114			 *    Return the number of bytes of data placed there.
115			 *    If this number is greater than zero and you
116			 *    didn't signal eof and the reader is prepared to
117			 *    take more data you will be called again with the
118			 *    requested offset advanced by the number of bytes
119			 *    absorbed.
120			 */
121			n = dp->read_proc(page, &start, *ppos,
122					  count, &eof, dp->data);
123		} else
124			break;
125
126		if (n == 0)   /* end of file */
127			break;
128		if (n < 0) {  /* error */
129			if (retval == 0)
130				retval = n;
131			break;
132		}
133
134		if (start == NULL) {
135			if (n > PAGE_SIZE) {
136				printk(KERN_ERR
137				       "proc_file_read: Apparent buffer overflow!\n");
138				n = PAGE_SIZE;
139			}
140			n -= *ppos;
141			if (n <= 0)
142				break;
143			if (n > count)
144				n = count;
145			start = page + *ppos;
146		} else if (start < page) {
147			if (n > PAGE_SIZE) {
148				printk(KERN_ERR
149				       "proc_file_read: Apparent buffer overflow!\n");
150				n = PAGE_SIZE;
151			}
152			if (n > count) {
153				/*
154				 * Don't reduce n because doing so might
155				 * cut off part of a data block.
156				 */
157				printk(KERN_WARNING
158				       "proc_file_read: Read count exceeded\n");
159			}
160		} else /* start >= page */ {
161			unsigned long startoff = (unsigned long)(start - page);
162			if (n > (PAGE_SIZE - startoff)) {
163				printk(KERN_ERR
164				       "proc_file_read: Apparent buffer overflow!\n");
165				n = PAGE_SIZE - startoff;
166			}
167			if (n > count)
168				n = count;
169		}
170		
171 		n -= copy_to_user(buf, start < page ? page : start, n);
172		if (n == 0) {
173			if (retval == 0)
174				retval = -EFAULT;
175			break;
176		}
177
178		*ppos += start < page ? (unsigned long)start : n;
179		nbytes -= n;
180		buf += n;
181		retval += n;
182	}
183	free_page((unsigned long) page);
184	return retval;
185}
186
187static ssize_t
188proc_file_read(struct file *file, char __user *buf, size_t nbytes,
189	       loff_t *ppos)
190{
191	struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
192	ssize_t rv = -EIO;
193
194	spin_lock(&pde->pde_unload_lock);
195	if (!pde->proc_fops) {
196		spin_unlock(&pde->pde_unload_lock);
197		return rv;
198	}
199	pde->pde_users++;
200	spin_unlock(&pde->pde_unload_lock);
201
202	rv = __proc_file_read(file, buf, nbytes, ppos);
203
204	pde_users_dec(pde);
205	return rv;
206}
207
208static ssize_t
209proc_file_write(struct file *file, const char __user *buffer,
210		size_t count, loff_t *ppos)
211{
212	struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
213	ssize_t rv = -EIO;
214
215	if (pde->write_proc) {
216		spin_lock(&pde->pde_unload_lock);
217		if (!pde->proc_fops) {
218			spin_unlock(&pde->pde_unload_lock);
219			return rv;
220		}
221		pde->pde_users++;
222		spin_unlock(&pde->pde_unload_lock);
223
224		/* FIXME: does this routine need ppos?  probably... */
225		rv = pde->write_proc(file, buffer, count, pde->data);
226		pde_users_dec(pde);
227	}
228	return rv;
229}
230
231
232static loff_t
233proc_file_lseek(struct file *file, loff_t offset, int orig)
234{
235	loff_t retval = -EINVAL;
236	switch (orig) {
237	case 1:
238		offset += file->f_pos;
239	/* fallthrough */
240	case 0:
241		if (offset < 0 || offset > MAX_NON_LFS)
242			break;
243		file->f_pos = retval = offset;
244	}
245	return retval;
246}
247
248static const struct file_operations proc_file_operations = {
249	.llseek		= proc_file_lseek,
250	.read		= proc_file_read,
251	.write		= proc_file_write,
252};
253
254static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
255{
256	struct inode *inode = dentry->d_inode;
257	struct proc_dir_entry *de = PDE(inode);
258	int error;
259
260	error = inode_change_ok(inode, iattr);
261	if (error)
262		return error;
263
264	if ((iattr->ia_valid & ATTR_SIZE) &&
265	    iattr->ia_size != i_size_read(inode)) {
266		error = vmtruncate(inode, iattr->ia_size);
267		if (error)
268			return error;
269	}
270
271	setattr_copy(inode, iattr);
272	mark_inode_dirty(inode);
273	
274	de->uid = inode->i_uid;
275	de->gid = inode->i_gid;
276	de->mode = inode->i_mode;
277	return 0;
278}
279
280static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
281			struct kstat *stat)
282{
283	struct inode *inode = dentry->d_inode;
284	struct proc_dir_entry *de = PROC_I(inode)->pde;
285	if (de && de->nlink)
286		set_nlink(inode, de->nlink);
287
288	generic_fillattr(inode, stat);
289	return 0;
290}
291
292static const struct inode_operations proc_file_inode_operations = {
293	.setattr	= proc_notify_change,
294};
295
296/*
297 * This function parses a name such as "tty/driver/serial", and
298 * returns the struct proc_dir_entry for "/proc/tty/driver", and
299 * returns "serial" in residual.
300 */
301static int __xlate_proc_name(const char *name, struct proc_dir_entry **ret,
302			     const char **residual)
303{
304	const char     		*cp = name, *next;
305	struct proc_dir_entry	*de;
306	unsigned int		len;
307
308	de = *ret;
309	if (!de)
310		de = &proc_root;
311
312	while (1) {
313		next = strchr(cp, '/');
314		if (!next)
315			break;
316
317		len = next - cp;
318		for (de = de->subdir; de ; de = de->next) {
319			if (proc_match(len, cp, de))
320				break;
321		}
322		if (!de) {
323			WARN(1, "name '%s'\n", name);
324			return -ENOENT;
325		}
326		cp += len + 1;
327	}
328	*residual = cp;
329	*ret = de;
330	return 0;
331}
332
333static int xlate_proc_name(const char *name, struct proc_dir_entry **ret,
334			   const char **residual)
335{
336	int rv;
337
338	spin_lock(&proc_subdir_lock);
339	rv = __xlate_proc_name(name, ret, residual);
340	spin_unlock(&proc_subdir_lock);
341	return rv;
342}
343
344static DEFINE_IDA(proc_inum_ida);
345static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
346
347#define PROC_DYNAMIC_FIRST 0xF0000000U
348
349/*
350 * Return an inode number between PROC_DYNAMIC_FIRST and
351 * 0xffffffff, or zero on failure.
352 */
353static unsigned int get_inode_number(void)
354{
355	unsigned int i;
356	int error;
357
358retry:
359	if (ida_pre_get(&proc_inum_ida, GFP_KERNEL) == 0)
360		return 0;
361
362	spin_lock(&proc_inum_lock);
363	error = ida_get_new(&proc_inum_ida, &i);
364	spin_unlock(&proc_inum_lock);
365	if (error == -EAGAIN)
366		goto retry;
367	else if (error)
368		return 0;
369
370	if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
371		spin_lock(&proc_inum_lock);
372		ida_remove(&proc_inum_ida, i);
373		spin_unlock(&proc_inum_lock);
374		return 0;
375	}
376	return PROC_DYNAMIC_FIRST + i;
377}
378
379static void release_inode_number(unsigned int inum)
380{
381	spin_lock(&proc_inum_lock);
382	ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
383	spin_unlock(&proc_inum_lock);
384}
385
386static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
387{
388	nd_set_link(nd, PDE(dentry->d_inode)->data);
389	return NULL;
390}
391
392static const struct inode_operations proc_link_inode_operations = {
393	.readlink	= generic_readlink,
394	.follow_link	= proc_follow_link,
395};
396
397/*
398 * As some entries in /proc are volatile, we want to 
399 * get rid of unused dentries.  This could be made 
400 * smarter: we could keep a "volatile" flag in the 
401 * inode to indicate which ones to keep.
402 */
403static int proc_delete_dentry(const struct dentry * dentry)
404{
405	return 1;
406}
407
408static const struct dentry_operations proc_dentry_operations =
409{
410	.d_delete	= proc_delete_dentry,
411};
412
413/*
414 * Don't create negative dentries here, return -ENOENT by hand
415 * instead.
416 */
417struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
418		struct dentry *dentry)
419{
420	struct inode *inode = NULL;
421	int error = -ENOENT;
422
423	spin_lock(&proc_subdir_lock);
424	for (de = de->subdir; de ; de = de->next) {
425		if (de->namelen != dentry->d_name.len)
426			continue;
427		if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
428			pde_get(de);
429			spin_unlock(&proc_subdir_lock);
430			error = -EINVAL;
431			inode = proc_get_inode(dir->i_sb, de);
432			goto out_unlock;
433		}
434	}
435	spin_unlock(&proc_subdir_lock);
436out_unlock:
437
438	if (inode) {
439		d_set_d_op(dentry, &proc_dentry_operations);
440		d_add(dentry, inode);
441		return NULL;
442	}
443	if (de)
444		pde_put(de);
445	return ERR_PTR(error);
446}
447
448struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
449		struct nameidata *nd)
450{
451	return proc_lookup_de(PDE(dir), dir, dentry);
452}
453
454/*
455 * This returns non-zero if at EOF, so that the /proc
456 * root directory can use this and check if it should
457 * continue with the <pid> entries..
458 *
459 * Note that the VFS-layer doesn't care about the return
460 * value of the readdir() call, as long as it's non-negative
461 * for success..
462 */
463int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
464		filldir_t filldir)
465{
466	unsigned int ino;
467	int i;
468	struct inode *inode = filp->f_path.dentry->d_inode;
469	int ret = 0;
470
471	ino = inode->i_ino;
472	i = filp->f_pos;
473	switch (i) {
474		case 0:
475			if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
476				goto out;
477			i++;
478			filp->f_pos++;
479			/* fall through */
480		case 1:
481			if (filldir(dirent, "..", 2, i,
482				    parent_ino(filp->f_path.dentry),
483				    DT_DIR) < 0)
484				goto out;
485			i++;
486			filp->f_pos++;
487			/* fall through */
488		default:
489			spin_lock(&proc_subdir_lock);
490			de = de->subdir;
491			i -= 2;
492			for (;;) {
493				if (!de) {
494					ret = 1;
495					spin_unlock(&proc_subdir_lock);
496					goto out;
497				}
498				if (!i)
499					break;
500				de = de->next;
501				i--;
502			}
503
504			do {
505				struct proc_dir_entry *next;
506
507				/* filldir passes info to user space */
508				pde_get(de);
509				spin_unlock(&proc_subdir_lock);
510				if (filldir(dirent, de->name, de->namelen, filp->f_pos,
511					    de->low_ino, de->mode >> 12) < 0) {
512					pde_put(de);
513					goto out;
514				}
515				spin_lock(&proc_subdir_lock);
516				filp->f_pos++;
517				next = de->next;
518				pde_put(de);
519				de = next;
520			} while (de);
521			spin_unlock(&proc_subdir_lock);
522	}
523	ret = 1;
524out:
525	return ret;	
526}
527
528int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
529{
530	struct inode *inode = filp->f_path.dentry->d_inode;
531
532	return proc_readdir_de(PDE(inode), filp, dirent, filldir);
533}
534
535/*
536 * These are the generic /proc directory operations. They
537 * use the in-memory "struct proc_dir_entry" tree to parse
538 * the /proc directory.
539 */
540static const struct file_operations proc_dir_operations = {
541	.llseek			= generic_file_llseek,
542	.read			= generic_read_dir,
543	.readdir		= proc_readdir,
544};
545
546/*
547 * proc directories can do almost nothing..
548 */
549static const struct inode_operations proc_dir_inode_operations = {
550	.lookup		= proc_lookup,
551	.getattr	= proc_getattr,
552	.setattr	= proc_notify_change,
553};
554
555static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
556{
557	unsigned int i;
558	struct proc_dir_entry *tmp;
559	
560	i = get_inode_number();
561	if (i == 0)
562		return -EAGAIN;
563	dp->low_ino = i;
564
565	if (S_ISDIR(dp->mode)) {
566		if (dp->proc_iops == NULL) {
567			dp->proc_fops = &proc_dir_operations;
568			dp->proc_iops = &proc_dir_inode_operations;
569		}
570		dir->nlink++;
571	} else if (S_ISLNK(dp->mode)) {
572		if (dp->proc_iops == NULL)
573			dp->proc_iops = &proc_link_inode_operations;
574	} else if (S_ISREG(dp->mode)) {
575		if (dp->proc_fops == NULL)
576			dp->proc_fops = &proc_file_operations;
577		if (dp->proc_iops == NULL)
578			dp->proc_iops = &proc_file_inode_operations;
579	}
580
581	spin_lock(&proc_subdir_lock);
582
583	for (tmp = dir->subdir; tmp; tmp = tmp->next)
584		if (strcmp(tmp->name, dp->name) == 0) {
585			WARN(1, KERN_WARNING "proc_dir_entry '%s/%s' already registered\n",
586				dir->name, dp->name);
587			break;
588		}
589
590	dp->next = dir->subdir;
591	dp->parent = dir;
592	dir->subdir = dp;
593	spin_unlock(&proc_subdir_lock);
594
595	return 0;
596}
597
598static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
599					  const char *name,
600					  umode_t mode,
601					  nlink_t nlink)
602{
603	struct proc_dir_entry *ent = NULL;
604	const char *fn = name;
605	unsigned int len;
606
607	/* make sure name is valid */
608	if (!name || !strlen(name)) goto out;
609
610	if (xlate_proc_name(name, parent, &fn) != 0)
611		goto out;
612
613	/* At this point there must not be any '/' characters beyond *fn */
614	if (strchr(fn, '/'))
615		goto out;
616
617	len = strlen(fn);
618
619	ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
620	if (!ent) goto out;
621
622	memset(ent, 0, sizeof(struct proc_dir_entry));
623	memcpy(ent->name, fn, len + 1);
624	ent->namelen = len;
625	ent->mode = mode;
626	ent->nlink = nlink;
627	atomic_set(&ent->count, 1);
628	ent->pde_users = 0;
629	spin_lock_init(&ent->pde_unload_lock);
630	ent->pde_unload_completion = NULL;
631	INIT_LIST_HEAD(&ent->pde_openers);
632 out:
633	return ent;
634}
635
636struct proc_dir_entry *proc_symlink(const char *name,
637		struct proc_dir_entry *parent, const char *dest)
638{
639	struct proc_dir_entry *ent;
640
641	ent = __proc_create(&parent, name,
642			  (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
643
644	if (ent) {
645		ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
646		if (ent->data) {
647			strcpy((char*)ent->data,dest);
648			if (proc_register(parent, ent) < 0) {
649				kfree(ent->data);
650				kfree(ent);
651				ent = NULL;
652			}
653		} else {
654			kfree(ent);
655			ent = NULL;
656		}
657	}
658	return ent;
659}
660EXPORT_SYMBOL(proc_symlink);
661
662struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
663		struct proc_dir_entry *parent)
664{
665	struct proc_dir_entry *ent;
666
667	ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
668	if (ent) {
669		if (proc_register(parent, ent) < 0) {
670			kfree(ent);
671			ent = NULL;
672		}
673	}
674	return ent;
675}
676EXPORT_SYMBOL(proc_mkdir_mode);
677
678struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
679		struct proc_dir_entry *parent)
680{
681	struct proc_dir_entry *ent;
682
683	ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2);
684	if (ent) {
685		ent->data = net;
686		if (proc_register(parent, ent) < 0) {
687			kfree(ent);
688			ent = NULL;
689		}
690	}
691	return ent;
692}
693EXPORT_SYMBOL_GPL(proc_net_mkdir);
694
695struct proc_dir_entry *proc_mkdir(const char *name,
696		struct proc_dir_entry *parent)
697{
698	return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
699}
700EXPORT_SYMBOL(proc_mkdir);
701
702struct proc_dir_entry *create_proc_entry(const char *name, umode_t mode,
703					 struct proc_dir_entry *parent)
704{
705	struct proc_dir_entry *ent;
706	nlink_t nlink;
707
708	if (S_ISDIR(mode)) {
709		if ((mode & S_IALLUGO) == 0)
710			mode |= S_IRUGO | S_IXUGO;
711		nlink = 2;
712	} else {
713		if ((mode & S_IFMT) == 0)
714			mode |= S_IFREG;
715		if ((mode & S_IALLUGO) == 0)
716			mode |= S_IRUGO;
717		nlink = 1;
718	}
719
720	ent = __proc_create(&parent, name, mode, nlink);
721	if (ent) {
722		if (proc_register(parent, ent) < 0) {
723			kfree(ent);
724			ent = NULL;
725		}
726	}
727	return ent;
728}
729EXPORT_SYMBOL(create_proc_entry);
730
731struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
732					struct proc_dir_entry *parent,
733					const struct file_operations *proc_fops,
734					void *data)
735{
736	struct proc_dir_entry *pde;
737	nlink_t nlink;
738
739	if (S_ISDIR(mode)) {
740		if ((mode & S_IALLUGO) == 0)
741			mode |= S_IRUGO | S_IXUGO;
742		nlink = 2;
743	} else {
744		if ((mode & S_IFMT) == 0)
745			mode |= S_IFREG;
746		if ((mode & S_IALLUGO) == 0)
747			mode |= S_IRUGO;
748		nlink = 1;
749	}
750
751	pde = __proc_create(&parent, name, mode, nlink);
752	if (!pde)
753		goto out;
754	pde->proc_fops = proc_fops;
755	pde->data = data;
756	if (proc_register(parent, pde) < 0)
757		goto out_free;
758	return pde;
759out_free:
760	kfree(pde);
761out:
762	return NULL;
763}
764EXPORT_SYMBOL(proc_create_data);
765
766static void free_proc_entry(struct proc_dir_entry *de)
767{
768	release_inode_number(de->low_ino);
769
770	if (S_ISLNK(de->mode))
771		kfree(de->data);
772	kfree(de);
773}
774
775void pde_put(struct proc_dir_entry *pde)
776{
777	if (atomic_dec_and_test(&pde->count))
778		free_proc_entry(pde);
779}
780
781/*
782 * Remove a /proc entry and free it if it's not currently in use.
783 */
784void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
785{
786	struct proc_dir_entry **p;
787	struct proc_dir_entry *de = NULL;
788	const char *fn = name;
789	unsigned int len;
790
791	spin_lock(&proc_subdir_lock);
792	if (__xlate_proc_name(name, &parent, &fn) != 0) {
793		spin_unlock(&proc_subdir_lock);
794		return;
795	}
796	len = strlen(fn);
797
798	for (p = &parent->subdir; *p; p=&(*p)->next ) {
799		if (proc_match(len, fn, *p)) {
800			de = *p;
801			*p = de->next;
802			de->next = NULL;
803			break;
804		}
805	}
806	spin_unlock(&proc_subdir_lock);
807	if (!de) {
808		WARN(1, "name '%s'\n", name);
809		return;
810	}
811
812	spin_lock(&de->pde_unload_lock);
813	/*
814	 * Stop accepting new callers into module. If you're
815	 * dynamically allocating ->proc_fops, save a pointer somewhere.
816	 */
817	de->proc_fops = NULL;
818	/* Wait until all existing callers into module are done. */
819	if (de->pde_users > 0) {
820		DECLARE_COMPLETION_ONSTACK(c);
821
822		if (!de->pde_unload_completion)
823			de->pde_unload_completion = &c;
824
825		spin_unlock(&de->pde_unload_lock);
826
827		wait_for_completion(de->pde_unload_completion);
828
829		spin_lock(&de->pde_unload_lock);
830	}
831
832	while (!list_empty(&de->pde_openers)) {
833		struct pde_opener *pdeo;
834
835		pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh);
836		list_del(&pdeo->lh);
837		spin_unlock(&de->pde_unload_lock);
838		pdeo->release(pdeo->inode, pdeo->file);
839		kfree(pdeo);
840		spin_lock(&de->pde_unload_lock);
841	}
842	spin_unlock(&de->pde_unload_lock);
843
844	if (S_ISDIR(de->mode))
845		parent->nlink--;
846	de->nlink = 0;
847	WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
848			"'%s/%s', leaking at least '%s'\n", __func__,
849			de->parent->name, de->name, de->subdir->name);
850	pde_put(de);
851}
852EXPORT_SYMBOL(remove_proc_entry);