Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * linux/ipc/util.c
  3 * Copyright (C) 1992 Krishna Balasubramanian
  4 *
  5 * Sep 1997 - Call suser() last after "normal" permission checks so we
  6 *            get BSD style process accounting right.
  7 *            Occurs in several places in the IPC code.
  8 *            Chris Evans, <chris@ferret.lmh.ox.ac.uk>
  9 * Nov 1999 - ipc helper functions, unified SMP locking
 10 *	      Manfred Spraul <manfred@colorfullife.com>
 11 * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
 12 *            Mingming Cao <cmm@us.ibm.com>
 13 * Mar 2006 - support for audit of ipc object properties
 14 *            Dustin Kirkland <dustin.kirkland@us.ibm.com>
 15 * Jun 2006 - namespaces ssupport
 16 *            OpenVZ, SWsoft Inc.
 17 *            Pavel Emelianov <xemul@openvz.org>
 18 */
 19
 20#include <linux/mm.h>
 21#include <linux/shm.h>
 22#include <linux/init.h>
 23#include <linux/msg.h>
 24#include <linux/vmalloc.h>
 25#include <linux/slab.h>
 26#include <linux/capability.h>
 27#include <linux/highuid.h>
 28#include <linux/security.h>
 29#include <linux/rcupdate.h>
 30#include <linux/workqueue.h>
 31#include <linux/seq_file.h>
 32#include <linux/proc_fs.h>
 33#include <linux/audit.h>
 34#include <linux/nsproxy.h>
 35#include <linux/rwsem.h>
 36#include <linux/memory.h>
 37#include <linux/ipc_namespace.h>
 38
 39#include <asm/unistd.h>
 40
 41#include "util.h"
 42
 43struct ipc_proc_iface {
 44	const char *path;
 45	const char *header;
 46	int ids;
 47	int (*show)(struct seq_file *, void *);
 48};
 49
 50#ifdef CONFIG_MEMORY_HOTPLUG
 51
 52static void ipc_memory_notifier(struct work_struct *work)
 53{
 54	ipcns_notify(IPCNS_MEMCHANGED);
 55}
 56
 57static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
 58
 59
 60static int ipc_memory_callback(struct notifier_block *self,
 61				unsigned long action, void *arg)
 62{
 63	switch (action) {
 64	case MEM_ONLINE:    /* memory successfully brought online */
 65	case MEM_OFFLINE:   /* or offline: it's time to recompute msgmni */
 66		/*
 67		 * This is done by invoking the ipcns notifier chain with the
 68		 * IPC_MEMCHANGED event.
 69		 * In order not to keep the lock on the hotplug memory chain
 70		 * for too long, queue a work item that will, when waken up,
 71		 * activate the ipcns notification chain.
 72		 * No need to keep several ipc work items on the queue.
 73		 */
 74		if (!work_pending(&ipc_memory_wq))
 75			schedule_work(&ipc_memory_wq);
 76		break;
 77	case MEM_GOING_ONLINE:
 78	case MEM_GOING_OFFLINE:
 79	case MEM_CANCEL_ONLINE:
 80	case MEM_CANCEL_OFFLINE:
 81	default:
 82		break;
 83	}
 84
 85	return NOTIFY_OK;
 86}
 87
 88#endif /* CONFIG_MEMORY_HOTPLUG */
 89
 90/**
 91 *	ipc_init	-	initialise IPC subsystem
 92 *
 93 *	The various system5 IPC resources (semaphores, messages and shared
 94 *	memory) are initialised
 95 *	A callback routine is registered into the memory hotplug notifier
 96 *	chain: since msgmni scales to lowmem this callback routine will be
 97 *	called upon successful memory add / remove to recompute msmgni.
 98 */
 99 
100static int __init ipc_init(void)
101{
102	sem_init();
103	msg_init();
104	shm_init();
105	hotplug_memory_notifier(ipc_memory_callback, IPC_CALLBACK_PRI);
106	register_ipcns_notifier(&init_ipc_ns);
107	return 0;
108}
109__initcall(ipc_init);
110
111/**
112 *	ipc_init_ids		-	initialise IPC identifiers
113 *	@ids: Identifier set
114 *
115 *	Set up the sequence range to use for the ipc identifier range (limited
116 *	below IPCMNI) then initialise the ids idr.
117 */
118 
119void ipc_init_ids(struct ipc_ids *ids)
120{
121	init_rwsem(&ids->rw_mutex);
122
123	ids->in_use = 0;
124	ids->seq = 0;
125	{
126		int seq_limit = INT_MAX/SEQ_MULTIPLIER;
127		if (seq_limit > USHRT_MAX)
128			ids->seq_max = USHRT_MAX;
129		 else
130		 	ids->seq_max = seq_limit;
131	}
132
133	idr_init(&ids->ipcs_idr);
134}
135
136#ifdef CONFIG_PROC_FS
137static const struct file_operations sysvipc_proc_fops;
138/**
139 *	ipc_init_proc_interface	-  Create a proc interface for sysipc types using a seq_file interface.
140 *	@path: Path in procfs
141 *	@header: Banner to be printed at the beginning of the file.
142 *	@ids: ipc id table to iterate.
143 *	@show: show routine.
144 */
145void __init ipc_init_proc_interface(const char *path, const char *header,
146		int ids, int (*show)(struct seq_file *, void *))
147{
148	struct proc_dir_entry *pde;
149	struct ipc_proc_iface *iface;
150
151	iface = kmalloc(sizeof(*iface), GFP_KERNEL);
152	if (!iface)
153		return;
154	iface->path	= path;
155	iface->header	= header;
156	iface->ids	= ids;
157	iface->show	= show;
158
159	pde = proc_create_data(path,
160			       S_IRUGO,        /* world readable */
161			       NULL,           /* parent dir */
162			       &sysvipc_proc_fops,
163			       iface);
164	if (!pde) {
165		kfree(iface);
166	}
167}
168#endif
169
170/**
171 *	ipc_findkey	-	find a key in an ipc identifier set	
172 *	@ids: Identifier set
173 *	@key: The key to find
174 *	
175 *	Requires ipc_ids.rw_mutex locked.
176 *	Returns the LOCKED pointer to the ipc structure if found or NULL
177 *	if not.
178 *	If key is found ipc points to the owning ipc structure
179 */
180 
181static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
182{
183	struct kern_ipc_perm *ipc;
184	int next_id;
185	int total;
186
187	for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
188		ipc = idr_find(&ids->ipcs_idr, next_id);
189
190		if (ipc == NULL)
191			continue;
192
193		if (ipc->key != key) {
194			total++;
195			continue;
196		}
197
198		ipc_lock_by_ptr(ipc);
199		return ipc;
200	}
201
202	return NULL;
203}
204
205/**
206 *	ipc_get_maxid 	-	get the last assigned id
207 *	@ids: IPC identifier set
208 *
209 *	Called with ipc_ids.rw_mutex held.
210 */
211
212int ipc_get_maxid(struct ipc_ids *ids)
213{
214	struct kern_ipc_perm *ipc;
215	int max_id = -1;
216	int total, id;
217
218	if (ids->in_use == 0)
219		return -1;
220
221	if (ids->in_use == IPCMNI)
222		return IPCMNI - 1;
223
224	/* Look for the last assigned id */
225	total = 0;
226	for (id = 0; id < IPCMNI && total < ids->in_use; id++) {
227		ipc = idr_find(&ids->ipcs_idr, id);
228		if (ipc != NULL) {
229			max_id = id;
230			total++;
231		}
232	}
233	return max_id;
234}
235
236/**
237 *	ipc_addid 	-	add an IPC identifier
238 *	@ids: IPC identifier set
239 *	@new: new IPC permission set
240 *	@size: limit for the number of used ids
241 *
242 *	Add an entry 'new' to the IPC ids idr. The permissions object is
243 *	initialised and the first free entry is set up and the id assigned
244 *	is returned. The 'new' entry is returned in a locked state on success.
245 *	On failure the entry is not locked and a negative err-code is returned.
246 *
247 *	Called with ipc_ids.rw_mutex held as a writer.
248 */
249 
250int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
251{
252	uid_t euid;
253	gid_t egid;
254	int id, err;
255
256	if (size > IPCMNI)
257		size = IPCMNI;
258
259	if (ids->in_use >= size)
260		return -ENOSPC;
261
262	spin_lock_init(&new->lock);
263	new->deleted = 0;
264	rcu_read_lock();
265	spin_lock(&new->lock);
266
267	err = idr_get_new(&ids->ipcs_idr, new, &id);
268	if (err) {
269		spin_unlock(&new->lock);
270		rcu_read_unlock();
271		return err;
272	}
273
274	ids->in_use++;
275
276	current_euid_egid(&euid, &egid);
277	new->cuid = new->uid = euid;
278	new->gid = new->cgid = egid;
279
280	new->seq = ids->seq++;
281	if(ids->seq > ids->seq_max)
282		ids->seq = 0;
283
284	new->id = ipc_buildid(id, new->seq);
285	return id;
286}
287
288/**
289 *	ipcget_new	-	create a new ipc object
290 *	@ns: namespace
291 *	@ids: IPC identifer set
292 *	@ops: the actual creation routine to call
293 *	@params: its parameters
294 *
295 *	This routine is called by sys_msgget, sys_semget() and sys_shmget()
296 *	when the key is IPC_PRIVATE.
297 */
298static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
299		struct ipc_ops *ops, struct ipc_params *params)
300{
301	int err;
302retry:
303	err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
304
305	if (!err)
306		return -ENOMEM;
307
308	down_write(&ids->rw_mutex);
309	err = ops->getnew(ns, params);
310	up_write(&ids->rw_mutex);
311
312	if (err == -EAGAIN)
313		goto retry;
314
315	return err;
316}
317
318/**
319 *	ipc_check_perms	-	check security and permissions for an IPC
320 *	@ns: IPC namespace
321 *	@ipcp: ipc permission set
322 *	@ops: the actual security routine to call
323 *	@params: its parameters
324 *
325 *	This routine is called by sys_msgget(), sys_semget() and sys_shmget()
326 *      when the key is not IPC_PRIVATE and that key already exists in the
327 *      ids IDR.
328 *
329 *	On success, the IPC id is returned.
330 *
331 *	It is called with ipc_ids.rw_mutex and ipcp->lock held.
332 */
333static int ipc_check_perms(struct ipc_namespace *ns,
334			   struct kern_ipc_perm *ipcp,
335			   struct ipc_ops *ops,
336			   struct ipc_params *params)
337{
338	int err;
339
340	if (ipcperms(ns, ipcp, params->flg))
341		err = -EACCES;
342	else {
343		err = ops->associate(ipcp, params->flg);
344		if (!err)
345			err = ipcp->id;
346	}
347
348	return err;
349}
350
351/**
352 *	ipcget_public	-	get an ipc object or create a new one
353 *	@ns: namespace
354 *	@ids: IPC identifer set
355 *	@ops: the actual creation routine to call
356 *	@params: its parameters
357 *
358 *	This routine is called by sys_msgget, sys_semget() and sys_shmget()
359 *	when the key is not IPC_PRIVATE.
360 *	It adds a new entry if the key is not found and does some permission
361 *      / security checkings if the key is found.
362 *
363 *	On success, the ipc id is returned.
364 */
365static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
366		struct ipc_ops *ops, struct ipc_params *params)
367{
368	struct kern_ipc_perm *ipcp;
369	int flg = params->flg;
370	int err;
371retry:
372	err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
373
374	/*
375	 * Take the lock as a writer since we are potentially going to add
376	 * a new entry + read locks are not "upgradable"
377	 */
378	down_write(&ids->rw_mutex);
379	ipcp = ipc_findkey(ids, params->key);
380	if (ipcp == NULL) {
381		/* key not used */
382		if (!(flg & IPC_CREAT))
383			err = -ENOENT;
384		else if (!err)
385			err = -ENOMEM;
386		else
387			err = ops->getnew(ns, params);
388	} else {
389		/* ipc object has been locked by ipc_findkey() */
390
391		if (flg & IPC_CREAT && flg & IPC_EXCL)
392			err = -EEXIST;
393		else {
394			err = 0;
395			if (ops->more_checks)
396				err = ops->more_checks(ipcp, params);
397			if (!err)
398				/*
399				 * ipc_check_perms returns the IPC id on
400				 * success
401				 */
402				err = ipc_check_perms(ns, ipcp, ops, params);
403		}
404		ipc_unlock(ipcp);
405	}
406	up_write(&ids->rw_mutex);
407
408	if (err == -EAGAIN)
409		goto retry;
410
411	return err;
412}
413
414
415/**
416 *	ipc_rmid	-	remove an IPC identifier
417 *	@ids: IPC identifier set
418 *	@ipcp: ipc perm structure containing the identifier to remove
419 *
420 *	ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
421 *	before this function is called, and remain locked on the exit.
422 */
423 
424void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
425{
426	int lid = ipcid_to_idx(ipcp->id);
427
428	idr_remove(&ids->ipcs_idr, lid);
429
430	ids->in_use--;
431
432	ipcp->deleted = 1;
433
434	return;
435}
436
437/**
438 *	ipc_alloc	-	allocate ipc space
439 *	@size: size desired
440 *
441 *	Allocate memory from the appropriate pools and return a pointer to it.
442 *	NULL is returned if the allocation fails
443 */
444 
445void* ipc_alloc(int size)
446{
447	void* out;
448	if(size > PAGE_SIZE)
449		out = vmalloc(size);
450	else
451		out = kmalloc(size, GFP_KERNEL);
452	return out;
453}
454
455/**
456 *	ipc_free        -       free ipc space
457 *	@ptr: pointer returned by ipc_alloc
458 *	@size: size of block
459 *
460 *	Free a block created with ipc_alloc(). The caller must know the size
461 *	used in the allocation call.
462 */
463
464void ipc_free(void* ptr, int size)
465{
466	if(size > PAGE_SIZE)
467		vfree(ptr);
468	else
469		kfree(ptr);
470}
471
472/*
473 * rcu allocations:
474 * There are three headers that are prepended to the actual allocation:
475 * - during use: ipc_rcu_hdr.
476 * - during the rcu grace period: ipc_rcu_grace.
477 * - [only if vmalloc]: ipc_rcu_sched.
478 * Their lifetime doesn't overlap, thus the headers share the same memory.
479 * Unlike a normal union, they are right-aligned, thus some container_of
480 * forward/backward casting is necessary:
481 */
482struct ipc_rcu_hdr
483{
484	int refcount;
485	int is_vmalloc;
486	void *data[0];
487};
488
489
490struct ipc_rcu_grace
491{
492	struct rcu_head rcu;
493	/* "void *" makes sure alignment of following data is sane. */
494	void *data[0];
495};
496
497struct ipc_rcu_sched
498{
499	struct work_struct work;
500	/* "void *" makes sure alignment of following data is sane. */
501	void *data[0];
502};
503
504#define HDRLEN_KMALLOC		(sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
505					sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
506#define HDRLEN_VMALLOC		(sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
507					sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
508
509static inline int rcu_use_vmalloc(int size)
510{
511	/* Too big for a single page? */
512	if (HDRLEN_KMALLOC + size > PAGE_SIZE)
513		return 1;
514	return 0;
515}
516
517/**
518 *	ipc_rcu_alloc	-	allocate ipc and rcu space 
519 *	@size: size desired
520 *
521 *	Allocate memory for the rcu header structure +  the object.
522 *	Returns the pointer to the object.
523 *	NULL is returned if the allocation fails. 
524 */
525 
526void* ipc_rcu_alloc(int size)
527{
528	void* out;
529	/* 
530	 * We prepend the allocation with the rcu struct, and
531	 * workqueue if necessary (for vmalloc). 
532	 */
533	if (rcu_use_vmalloc(size)) {
534		out = vmalloc(HDRLEN_VMALLOC + size);
535		if (out) {
536			out += HDRLEN_VMALLOC;
537			container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
538			container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
539		}
540	} else {
541		out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
542		if (out) {
543			out += HDRLEN_KMALLOC;
544			container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
545			container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
546		}
547	}
548
549	return out;
550}
551
552void ipc_rcu_getref(void *ptr)
553{
554	container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
555}
556
557static void ipc_do_vfree(struct work_struct *work)
558{
559	vfree(container_of(work, struct ipc_rcu_sched, work));
560}
561
562/**
563 * ipc_schedule_free - free ipc + rcu space
564 * @head: RCU callback structure for queued work
565 * 
566 * Since RCU callback function is called in bh,
567 * we need to defer the vfree to schedule_work().
568 */
569static void ipc_schedule_free(struct rcu_head *head)
570{
571	struct ipc_rcu_grace *grace;
572	struct ipc_rcu_sched *sched;
573
574	grace = container_of(head, struct ipc_rcu_grace, rcu);
575	sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
576				data[0]);
577
578	INIT_WORK(&sched->work, ipc_do_vfree);
579	schedule_work(&sched->work);
580}
581
582void ipc_rcu_putref(void *ptr)
583{
584	if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
585		return;
586
587	if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
588		call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
589				ipc_schedule_free);
590	} else {
591		kfree_rcu(container_of(ptr, struct ipc_rcu_grace, data), rcu);
592	}
593}
594
595/**
596 *	ipcperms	-	check IPC permissions
597 *	@ns: IPC namespace
598 *	@ipcp: IPC permission set
599 *	@flag: desired permission set.
600 *
601 *	Check user, group, other permissions for access
602 *	to ipc resources. return 0 if allowed
603 *
604 * 	@flag will most probably be 0 or S_...UGO from <linux/stat.h>
605 */
606 
607int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
608{
609	uid_t euid = current_euid();
610	int requested_mode, granted_mode;
611
612	audit_ipc_obj(ipcp);
613	requested_mode = (flag >> 6) | (flag >> 3) | flag;
614	granted_mode = ipcp->mode;
615	if (euid == ipcp->cuid ||
616	    euid == ipcp->uid)
617		granted_mode >>= 6;
618	else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
619		granted_mode >>= 3;
620	/* is there some bit set in requested_mode but not in granted_mode? */
621	if ((requested_mode & ~granted_mode & 0007) && 
622	    !ns_capable(ns->user_ns, CAP_IPC_OWNER))
623		return -1;
624
625	return security_ipc_permission(ipcp, flag);
626}
627
628/*
629 * Functions to convert between the kern_ipc_perm structure and the
630 * old/new ipc_perm structures
631 */
632
633/**
634 *	kernel_to_ipc64_perm	-	convert kernel ipc permissions to user
635 *	@in: kernel permissions
636 *	@out: new style IPC permissions
637 *
638 *	Turn the kernel object @in into a set of permissions descriptions
639 *	for returning to userspace (@out).
640 */
641 
642
643void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
644{
645	out->key	= in->key;
646	out->uid	= in->uid;
647	out->gid	= in->gid;
648	out->cuid	= in->cuid;
649	out->cgid	= in->cgid;
650	out->mode	= in->mode;
651	out->seq	= in->seq;
652}
653
654/**
655 *	ipc64_perm_to_ipc_perm	-	convert new ipc permissions to old
656 *	@in: new style IPC permissions
657 *	@out: old style IPC permissions
658 *
659 *	Turn the new style permissions object @in into a compatibility
660 *	object and store it into the @out pointer.
661 */
662 
663void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
664{
665	out->key	= in->key;
666	SET_UID(out->uid, in->uid);
667	SET_GID(out->gid, in->gid);
668	SET_UID(out->cuid, in->cuid);
669	SET_GID(out->cgid, in->cgid);
670	out->mode	= in->mode;
671	out->seq	= in->seq;
672}
673
674/**
675 * ipc_lock - Lock an ipc structure without rw_mutex held
676 * @ids: IPC identifier set
677 * @id: ipc id to look for
678 *
679 * Look for an id in the ipc ids idr and lock the associated ipc object.
680 *
681 * The ipc object is locked on exit.
682 */
683
684struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
685{
686	struct kern_ipc_perm *out;
687	int lid = ipcid_to_idx(id);
688
689	rcu_read_lock();
690	out = idr_find(&ids->ipcs_idr, lid);
691	if (out == NULL) {
692		rcu_read_unlock();
693		return ERR_PTR(-EINVAL);
694	}
695
696	spin_lock(&out->lock);
697	
698	/* ipc_rmid() may have already freed the ID while ipc_lock
699	 * was spinning: here verify that the structure is still valid
700	 */
701	if (out->deleted) {
702		spin_unlock(&out->lock);
703		rcu_read_unlock();
704		return ERR_PTR(-EINVAL);
705	}
706
707	return out;
708}
709
710struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
711{
712	struct kern_ipc_perm *out;
713
714	out = ipc_lock(ids, id);
715	if (IS_ERR(out))
716		return out;
717
718	if (ipc_checkid(out, id)) {
719		ipc_unlock(out);
720		return ERR_PTR(-EIDRM);
721	}
722
723	return out;
724}
725
726/**
727 * ipcget - Common sys_*get() code
728 * @ns : namsepace
729 * @ids : IPC identifier set
730 * @ops : operations to be called on ipc object creation, permission checks
731 *        and further checks
732 * @params : the parameters needed by the previous operations.
733 *
734 * Common routine called by sys_msgget(), sys_semget() and sys_shmget().
735 */
736int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
737			struct ipc_ops *ops, struct ipc_params *params)
738{
739	if (params->key == IPC_PRIVATE)
740		return ipcget_new(ns, ids, ops, params);
741	else
742		return ipcget_public(ns, ids, ops, params);
743}
744
745/**
746 * ipc_update_perm - update the permissions of an IPC.
747 * @in:  the permission given as input.
748 * @out: the permission of the ipc to set.
749 */
750void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
751{
752	out->uid = in->uid;
753	out->gid = in->gid;
754	out->mode = (out->mode & ~S_IRWXUGO)
755		| (in->mode & S_IRWXUGO);
756}
757
758/**
759 * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
760 * @ns:  the ipc namespace
761 * @ids:  the table of ids where to look for the ipc
762 * @id:   the id of the ipc to retrieve
763 * @cmd:  the cmd to check
764 * @perm: the permission to set
765 * @extra_perm: one extra permission parameter used by msq
766 *
767 * This function does some common audit and permissions check for some IPC_XXX
768 * cmd and is called from semctl_down, shmctl_down and msgctl_down.
769 * It must be called without any lock held and
770 *  - retrieves the ipc with the given id in the given table.
771 *  - performs some audit and permission check, depending on the given cmd
772 *  - returns the ipc with both ipc and rw_mutex locks held in case of success
773 *    or an err-code without any lock held otherwise.
774 */
775struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
776				      struct ipc_ids *ids, int id, int cmd,
777				      struct ipc64_perm *perm, int extra_perm)
778{
779	struct kern_ipc_perm *ipcp;
780	uid_t euid;
781	int err;
782
783	down_write(&ids->rw_mutex);
784	ipcp = ipc_lock_check(ids, id);
785	if (IS_ERR(ipcp)) {
786		err = PTR_ERR(ipcp);
787		goto out_up;
788	}
789
790	audit_ipc_obj(ipcp);
791	if (cmd == IPC_SET)
792		audit_ipc_set_perm(extra_perm, perm->uid,
793					 perm->gid, perm->mode);
794
795	euid = current_euid();
796	if (euid == ipcp->cuid || euid == ipcp->uid  ||
797	    ns_capable(ns->user_ns, CAP_SYS_ADMIN))
798		return ipcp;
799
800	err = -EPERM;
801	ipc_unlock(ipcp);
802out_up:
803	up_write(&ids->rw_mutex);
804	return ERR_PTR(err);
805}
806
807#ifdef __ARCH_WANT_IPC_PARSE_VERSION
808
809
810/**
811 *	ipc_parse_version	-	IPC call version
812 *	@cmd: pointer to command
813 *
814 *	Return IPC_64 for new style IPC and IPC_OLD for old style IPC. 
815 *	The @cmd value is turned from an encoding command and version into
816 *	just the command code.
817 */
818 
819int ipc_parse_version (int *cmd)
820{
821	if (*cmd & IPC_64) {
822		*cmd ^= IPC_64;
823		return IPC_64;
824	} else {
825		return IPC_OLD;
826	}
827}
828
829#endif /* __ARCH_WANT_IPC_PARSE_VERSION */
830
831#ifdef CONFIG_PROC_FS
832struct ipc_proc_iter {
833	struct ipc_namespace *ns;
834	struct ipc_proc_iface *iface;
835};
836
837/*
838 * This routine locks the ipc structure found at least at position pos.
839 */
840static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
841					      loff_t *new_pos)
842{
843	struct kern_ipc_perm *ipc;
844	int total, id;
845
846	total = 0;
847	for (id = 0; id < pos && total < ids->in_use; id++) {
848		ipc = idr_find(&ids->ipcs_idr, id);
849		if (ipc != NULL)
850			total++;
851	}
852
853	if (total >= ids->in_use)
854		return NULL;
855
856	for ( ; pos < IPCMNI; pos++) {
857		ipc = idr_find(&ids->ipcs_idr, pos);
858		if (ipc != NULL) {
859			*new_pos = pos + 1;
860			ipc_lock_by_ptr(ipc);
861			return ipc;
862		}
863	}
864
865	/* Out of range - return NULL to terminate iteration */
866	return NULL;
867}
868
869static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
870{
871	struct ipc_proc_iter *iter = s->private;
872	struct ipc_proc_iface *iface = iter->iface;
873	struct kern_ipc_perm *ipc = it;
874
875	/* If we had an ipc id locked before, unlock it */
876	if (ipc && ipc != SEQ_START_TOKEN)
877		ipc_unlock(ipc);
878
879	return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos);
880}
881
882/*
883 * File positions: pos 0 -> header, pos n -> ipc id = n - 1.
884 * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START.
885 */
886static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
887{
888	struct ipc_proc_iter *iter = s->private;
889	struct ipc_proc_iface *iface = iter->iface;
890	struct ipc_ids *ids;
891
892	ids = &iter->ns->ids[iface->ids];
893
894	/*
895	 * Take the lock - this will be released by the corresponding
896	 * call to stop().
897	 */
898	down_read(&ids->rw_mutex);
899
900	/* pos < 0 is invalid */
901	if (*pos < 0)
902		return NULL;
903
904	/* pos == 0 means header */
905	if (*pos == 0)
906		return SEQ_START_TOKEN;
907
908	/* Find the (pos-1)th ipc */
909	return sysvipc_find_ipc(ids, *pos - 1, pos);
910}
911
912static void sysvipc_proc_stop(struct seq_file *s, void *it)
913{
914	struct kern_ipc_perm *ipc = it;
915	struct ipc_proc_iter *iter = s->private;
916	struct ipc_proc_iface *iface = iter->iface;
917	struct ipc_ids *ids;
918
919	/* If we had a locked structure, release it */
920	if (ipc && ipc != SEQ_START_TOKEN)
921		ipc_unlock(ipc);
922
923	ids = &iter->ns->ids[iface->ids];
924	/* Release the lock we took in start() */
925	up_read(&ids->rw_mutex);
926}
927
928static int sysvipc_proc_show(struct seq_file *s, void *it)
929{
930	struct ipc_proc_iter *iter = s->private;
931	struct ipc_proc_iface *iface = iter->iface;
932
933	if (it == SEQ_START_TOKEN)
934		return seq_puts(s, iface->header);
935
936	return iface->show(s, it);
937}
938
939static const struct seq_operations sysvipc_proc_seqops = {
940	.start = sysvipc_proc_start,
941	.stop  = sysvipc_proc_stop,
942	.next  = sysvipc_proc_next,
943	.show  = sysvipc_proc_show,
944};
945
946static int sysvipc_proc_open(struct inode *inode, struct file *file)
947{
948	int ret;
949	struct seq_file *seq;
950	struct ipc_proc_iter *iter;
951
952	ret = -ENOMEM;
953	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
954	if (!iter)
955		goto out;
956
957	ret = seq_open(file, &sysvipc_proc_seqops);
958	if (ret)
959		goto out_kfree;
960
961	seq = file->private_data;
962	seq->private = iter;
963
964	iter->iface = PDE(inode)->data;
965	iter->ns    = get_ipc_ns(current->nsproxy->ipc_ns);
966out:
967	return ret;
968out_kfree:
969	kfree(iter);
970	goto out;
971}
972
973static int sysvipc_proc_release(struct inode *inode, struct file *file)
974{
975	struct seq_file *seq = file->private_data;
976	struct ipc_proc_iter *iter = seq->private;
977	put_ipc_ns(iter->ns);
978	return seq_release_private(inode, file);
979}
980
981static const struct file_operations sysvipc_proc_fops = {
982	.open    = sysvipc_proc_open,
983	.read    = seq_read,
984	.llseek  = seq_lseek,
985	.release = sysvipc_proc_release,
986};
987#endif /* CONFIG_PROC_FS */
v3.1
  1/*
  2 * linux/ipc/util.c
  3 * Copyright (C) 1992 Krishna Balasubramanian
  4 *
  5 * Sep 1997 - Call suser() last after "normal" permission checks so we
  6 *            get BSD style process accounting right.
  7 *            Occurs in several places in the IPC code.
  8 *            Chris Evans, <chris@ferret.lmh.ox.ac.uk>
  9 * Nov 1999 - ipc helper functions, unified SMP locking
 10 *	      Manfred Spraul <manfred@colorfullife.com>
 11 * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
 12 *            Mingming Cao <cmm@us.ibm.com>
 13 * Mar 2006 - support for audit of ipc object properties
 14 *            Dustin Kirkland <dustin.kirkland@us.ibm.com>
 15 * Jun 2006 - namespaces ssupport
 16 *            OpenVZ, SWsoft Inc.
 17 *            Pavel Emelianov <xemul@openvz.org>
 18 */
 19
 20#include <linux/mm.h>
 21#include <linux/shm.h>
 22#include <linux/init.h>
 23#include <linux/msg.h>
 24#include <linux/vmalloc.h>
 25#include <linux/slab.h>
 26#include <linux/capability.h>
 27#include <linux/highuid.h>
 28#include <linux/security.h>
 29#include <linux/rcupdate.h>
 30#include <linux/workqueue.h>
 31#include <linux/seq_file.h>
 32#include <linux/proc_fs.h>
 33#include <linux/audit.h>
 34#include <linux/nsproxy.h>
 35#include <linux/rwsem.h>
 36#include <linux/memory.h>
 37#include <linux/ipc_namespace.h>
 38
 39#include <asm/unistd.h>
 40
 41#include "util.h"
 42
 43struct ipc_proc_iface {
 44	const char *path;
 45	const char *header;
 46	int ids;
 47	int (*show)(struct seq_file *, void *);
 48};
 49
 50#ifdef CONFIG_MEMORY_HOTPLUG
 51
 52static void ipc_memory_notifier(struct work_struct *work)
 53{
 54	ipcns_notify(IPCNS_MEMCHANGED);
 55}
 56
 57static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
 58
 59
 60static int ipc_memory_callback(struct notifier_block *self,
 61				unsigned long action, void *arg)
 62{
 63	switch (action) {
 64	case MEM_ONLINE:    /* memory successfully brought online */
 65	case MEM_OFFLINE:   /* or offline: it's time to recompute msgmni */
 66		/*
 67		 * This is done by invoking the ipcns notifier chain with the
 68		 * IPC_MEMCHANGED event.
 69		 * In order not to keep the lock on the hotplug memory chain
 70		 * for too long, queue a work item that will, when waken up,
 71		 * activate the ipcns notification chain.
 72		 * No need to keep several ipc work items on the queue.
 73		 */
 74		if (!work_pending(&ipc_memory_wq))
 75			schedule_work(&ipc_memory_wq);
 76		break;
 77	case MEM_GOING_ONLINE:
 78	case MEM_GOING_OFFLINE:
 79	case MEM_CANCEL_ONLINE:
 80	case MEM_CANCEL_OFFLINE:
 81	default:
 82		break;
 83	}
 84
 85	return NOTIFY_OK;
 86}
 87
 88#endif /* CONFIG_MEMORY_HOTPLUG */
 89
 90/**
 91 *	ipc_init	-	initialise IPC subsystem
 92 *
 93 *	The various system5 IPC resources (semaphores, messages and shared
 94 *	memory) are initialised
 95 *	A callback routine is registered into the memory hotplug notifier
 96 *	chain: since msgmni scales to lowmem this callback routine will be
 97 *	called upon successful memory add / remove to recompute msmgni.
 98 */
 99 
100static int __init ipc_init(void)
101{
102	sem_init();
103	msg_init();
104	shm_init();
105	hotplug_memory_notifier(ipc_memory_callback, IPC_CALLBACK_PRI);
106	register_ipcns_notifier(&init_ipc_ns);
107	return 0;
108}
109__initcall(ipc_init);
110
111/**
112 *	ipc_init_ids		-	initialise IPC identifiers
113 *	@ids: Identifier set
114 *
115 *	Set up the sequence range to use for the ipc identifier range (limited
116 *	below IPCMNI) then initialise the ids idr.
117 */
118 
119void ipc_init_ids(struct ipc_ids *ids)
120{
121	init_rwsem(&ids->rw_mutex);
122
123	ids->in_use = 0;
124	ids->seq = 0;
125	{
126		int seq_limit = INT_MAX/SEQ_MULTIPLIER;
127		if (seq_limit > USHRT_MAX)
128			ids->seq_max = USHRT_MAX;
129		 else
130		 	ids->seq_max = seq_limit;
131	}
132
133	idr_init(&ids->ipcs_idr);
134}
135
136#ifdef CONFIG_PROC_FS
137static const struct file_operations sysvipc_proc_fops;
138/**
139 *	ipc_init_proc_interface	-  Create a proc interface for sysipc types using a seq_file interface.
140 *	@path: Path in procfs
141 *	@header: Banner to be printed at the beginning of the file.
142 *	@ids: ipc id table to iterate.
143 *	@show: show routine.
144 */
145void __init ipc_init_proc_interface(const char *path, const char *header,
146		int ids, int (*show)(struct seq_file *, void *))
147{
148	struct proc_dir_entry *pde;
149	struct ipc_proc_iface *iface;
150
151	iface = kmalloc(sizeof(*iface), GFP_KERNEL);
152	if (!iface)
153		return;
154	iface->path	= path;
155	iface->header	= header;
156	iface->ids	= ids;
157	iface->show	= show;
158
159	pde = proc_create_data(path,
160			       S_IRUGO,        /* world readable */
161			       NULL,           /* parent dir */
162			       &sysvipc_proc_fops,
163			       iface);
164	if (!pde) {
165		kfree(iface);
166	}
167}
168#endif
169
170/**
171 *	ipc_findkey	-	find a key in an ipc identifier set	
172 *	@ids: Identifier set
173 *	@key: The key to find
174 *	
175 *	Requires ipc_ids.rw_mutex locked.
176 *	Returns the LOCKED pointer to the ipc structure if found or NULL
177 *	if not.
178 *	If key is found ipc points to the owning ipc structure
179 */
180 
181static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
182{
183	struct kern_ipc_perm *ipc;
184	int next_id;
185	int total;
186
187	for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
188		ipc = idr_find(&ids->ipcs_idr, next_id);
189
190		if (ipc == NULL)
191			continue;
192
193		if (ipc->key != key) {
194			total++;
195			continue;
196		}
197
198		ipc_lock_by_ptr(ipc);
199		return ipc;
200	}
201
202	return NULL;
203}
204
205/**
206 *	ipc_get_maxid 	-	get the last assigned id
207 *	@ids: IPC identifier set
208 *
209 *	Called with ipc_ids.rw_mutex held.
210 */
211
212int ipc_get_maxid(struct ipc_ids *ids)
213{
214	struct kern_ipc_perm *ipc;
215	int max_id = -1;
216	int total, id;
217
218	if (ids->in_use == 0)
219		return -1;
220
221	if (ids->in_use == IPCMNI)
222		return IPCMNI - 1;
223
224	/* Look for the last assigned id */
225	total = 0;
226	for (id = 0; id < IPCMNI && total < ids->in_use; id++) {
227		ipc = idr_find(&ids->ipcs_idr, id);
228		if (ipc != NULL) {
229			max_id = id;
230			total++;
231		}
232	}
233	return max_id;
234}
235
236/**
237 *	ipc_addid 	-	add an IPC identifier
238 *	@ids: IPC identifier set
239 *	@new: new IPC permission set
240 *	@size: limit for the number of used ids
241 *
242 *	Add an entry 'new' to the IPC ids idr. The permissions object is
243 *	initialised and the first free entry is set up and the id assigned
244 *	is returned. The 'new' entry is returned in a locked state on success.
245 *	On failure the entry is not locked and a negative err-code is returned.
246 *
247 *	Called with ipc_ids.rw_mutex held as a writer.
248 */
249 
250int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
251{
252	uid_t euid;
253	gid_t egid;
254	int id, err;
255
256	if (size > IPCMNI)
257		size = IPCMNI;
258
259	if (ids->in_use >= size)
260		return -ENOSPC;
261
262	spin_lock_init(&new->lock);
263	new->deleted = 0;
264	rcu_read_lock();
265	spin_lock(&new->lock);
266
267	err = idr_get_new(&ids->ipcs_idr, new, &id);
268	if (err) {
269		spin_unlock(&new->lock);
270		rcu_read_unlock();
271		return err;
272	}
273
274	ids->in_use++;
275
276	current_euid_egid(&euid, &egid);
277	new->cuid = new->uid = euid;
278	new->gid = new->cgid = egid;
279
280	new->seq = ids->seq++;
281	if(ids->seq > ids->seq_max)
282		ids->seq = 0;
283
284	new->id = ipc_buildid(id, new->seq);
285	return id;
286}
287
288/**
289 *	ipcget_new	-	create a new ipc object
290 *	@ns: namespace
291 *	@ids: IPC identifer set
292 *	@ops: the actual creation routine to call
293 *	@params: its parameters
294 *
295 *	This routine is called by sys_msgget, sys_semget() and sys_shmget()
296 *	when the key is IPC_PRIVATE.
297 */
298static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
299		struct ipc_ops *ops, struct ipc_params *params)
300{
301	int err;
302retry:
303	err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
304
305	if (!err)
306		return -ENOMEM;
307
308	down_write(&ids->rw_mutex);
309	err = ops->getnew(ns, params);
310	up_write(&ids->rw_mutex);
311
312	if (err == -EAGAIN)
313		goto retry;
314
315	return err;
316}
317
318/**
319 *	ipc_check_perms	-	check security and permissions for an IPC
320 *	@ns: IPC namespace
321 *	@ipcp: ipc permission set
322 *	@ops: the actual security routine to call
323 *	@params: its parameters
324 *
325 *	This routine is called by sys_msgget(), sys_semget() and sys_shmget()
326 *      when the key is not IPC_PRIVATE and that key already exists in the
327 *      ids IDR.
328 *
329 *	On success, the IPC id is returned.
330 *
331 *	It is called with ipc_ids.rw_mutex and ipcp->lock held.
332 */
333static int ipc_check_perms(struct ipc_namespace *ns,
334			   struct kern_ipc_perm *ipcp,
335			   struct ipc_ops *ops,
336			   struct ipc_params *params)
337{
338	int err;
339
340	if (ipcperms(ns, ipcp, params->flg))
341		err = -EACCES;
342	else {
343		err = ops->associate(ipcp, params->flg);
344		if (!err)
345			err = ipcp->id;
346	}
347
348	return err;
349}
350
351/**
352 *	ipcget_public	-	get an ipc object or create a new one
353 *	@ns: namespace
354 *	@ids: IPC identifer set
355 *	@ops: the actual creation routine to call
356 *	@params: its parameters
357 *
358 *	This routine is called by sys_msgget, sys_semget() and sys_shmget()
359 *	when the key is not IPC_PRIVATE.
360 *	It adds a new entry if the key is not found and does some permission
361 *      / security checkings if the key is found.
362 *
363 *	On success, the ipc id is returned.
364 */
365static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
366		struct ipc_ops *ops, struct ipc_params *params)
367{
368	struct kern_ipc_perm *ipcp;
369	int flg = params->flg;
370	int err;
371retry:
372	err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL);
373
374	/*
375	 * Take the lock as a writer since we are potentially going to add
376	 * a new entry + read locks are not "upgradable"
377	 */
378	down_write(&ids->rw_mutex);
379	ipcp = ipc_findkey(ids, params->key);
380	if (ipcp == NULL) {
381		/* key not used */
382		if (!(flg & IPC_CREAT))
383			err = -ENOENT;
384		else if (!err)
385			err = -ENOMEM;
386		else
387			err = ops->getnew(ns, params);
388	} else {
389		/* ipc object has been locked by ipc_findkey() */
390
391		if (flg & IPC_CREAT && flg & IPC_EXCL)
392			err = -EEXIST;
393		else {
394			err = 0;
395			if (ops->more_checks)
396				err = ops->more_checks(ipcp, params);
397			if (!err)
398				/*
399				 * ipc_check_perms returns the IPC id on
400				 * success
401				 */
402				err = ipc_check_perms(ns, ipcp, ops, params);
403		}
404		ipc_unlock(ipcp);
405	}
406	up_write(&ids->rw_mutex);
407
408	if (err == -EAGAIN)
409		goto retry;
410
411	return err;
412}
413
414
415/**
416 *	ipc_rmid	-	remove an IPC identifier
417 *	@ids: IPC identifier set
418 *	@ipcp: ipc perm structure containing the identifier to remove
419 *
420 *	ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
421 *	before this function is called, and remain locked on the exit.
422 */
423 
424void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
425{
426	int lid = ipcid_to_idx(ipcp->id);
427
428	idr_remove(&ids->ipcs_idr, lid);
429
430	ids->in_use--;
431
432	ipcp->deleted = 1;
433
434	return;
435}
436
437/**
438 *	ipc_alloc	-	allocate ipc space
439 *	@size: size desired
440 *
441 *	Allocate memory from the appropriate pools and return a pointer to it.
442 *	NULL is returned if the allocation fails
443 */
444 
445void* ipc_alloc(int size)
446{
447	void* out;
448	if(size > PAGE_SIZE)
449		out = vmalloc(size);
450	else
451		out = kmalloc(size, GFP_KERNEL);
452	return out;
453}
454
455/**
456 *	ipc_free        -       free ipc space
457 *	@ptr: pointer returned by ipc_alloc
458 *	@size: size of block
459 *
460 *	Free a block created with ipc_alloc(). The caller must know the size
461 *	used in the allocation call.
462 */
463
464void ipc_free(void* ptr, int size)
465{
466	if(size > PAGE_SIZE)
467		vfree(ptr);
468	else
469		kfree(ptr);
470}
471
472/*
473 * rcu allocations:
474 * There are three headers that are prepended to the actual allocation:
475 * - during use: ipc_rcu_hdr.
476 * - during the rcu grace period: ipc_rcu_grace.
477 * - [only if vmalloc]: ipc_rcu_sched.
478 * Their lifetime doesn't overlap, thus the headers share the same memory.
479 * Unlike a normal union, they are right-aligned, thus some container_of
480 * forward/backward casting is necessary:
481 */
482struct ipc_rcu_hdr
483{
484	int refcount;
485	int is_vmalloc;
486	void *data[0];
487};
488
489
490struct ipc_rcu_grace
491{
492	struct rcu_head rcu;
493	/* "void *" makes sure alignment of following data is sane. */
494	void *data[0];
495};
496
497struct ipc_rcu_sched
498{
499	struct work_struct work;
500	/* "void *" makes sure alignment of following data is sane. */
501	void *data[0];
502};
503
504#define HDRLEN_KMALLOC		(sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
505					sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
506#define HDRLEN_VMALLOC		(sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
507					sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
508
509static inline int rcu_use_vmalloc(int size)
510{
511	/* Too big for a single page? */
512	if (HDRLEN_KMALLOC + size > PAGE_SIZE)
513		return 1;
514	return 0;
515}
516
517/**
518 *	ipc_rcu_alloc	-	allocate ipc and rcu space 
519 *	@size: size desired
520 *
521 *	Allocate memory for the rcu header structure +  the object.
522 *	Returns the pointer to the object.
523 *	NULL is returned if the allocation fails. 
524 */
525 
526void* ipc_rcu_alloc(int size)
527{
528	void* out;
529	/* 
530	 * We prepend the allocation with the rcu struct, and
531	 * workqueue if necessary (for vmalloc). 
532	 */
533	if (rcu_use_vmalloc(size)) {
534		out = vmalloc(HDRLEN_VMALLOC + size);
535		if (out) {
536			out += HDRLEN_VMALLOC;
537			container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
538			container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
539		}
540	} else {
541		out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
542		if (out) {
543			out += HDRLEN_KMALLOC;
544			container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
545			container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
546		}
547	}
548
549	return out;
550}
551
552void ipc_rcu_getref(void *ptr)
553{
554	container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
555}
556
557static void ipc_do_vfree(struct work_struct *work)
558{
559	vfree(container_of(work, struct ipc_rcu_sched, work));
560}
561
562/**
563 * ipc_schedule_free - free ipc + rcu space
564 * @head: RCU callback structure for queued work
565 * 
566 * Since RCU callback function is called in bh,
567 * we need to defer the vfree to schedule_work().
568 */
569static void ipc_schedule_free(struct rcu_head *head)
570{
571	struct ipc_rcu_grace *grace;
572	struct ipc_rcu_sched *sched;
573
574	grace = container_of(head, struct ipc_rcu_grace, rcu);
575	sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
576				data[0]);
577
578	INIT_WORK(&sched->work, ipc_do_vfree);
579	schedule_work(&sched->work);
580}
581
582void ipc_rcu_putref(void *ptr)
583{
584	if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
585		return;
586
587	if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
588		call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
589				ipc_schedule_free);
590	} else {
591		kfree_rcu(container_of(ptr, struct ipc_rcu_grace, data), rcu);
592	}
593}
594
595/**
596 *	ipcperms	-	check IPC permissions
597 *	@ns: IPC namespace
598 *	@ipcp: IPC permission set
599 *	@flag: desired permission set.
600 *
601 *	Check user, group, other permissions for access
602 *	to ipc resources. return 0 if allowed
603 *
604 * 	@flag will most probably be 0 or S_...UGO from <linux/stat.h>
605 */
606 
607int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
608{
609	uid_t euid = current_euid();
610	int requested_mode, granted_mode;
611
612	audit_ipc_obj(ipcp);
613	requested_mode = (flag >> 6) | (flag >> 3) | flag;
614	granted_mode = ipcp->mode;
615	if (euid == ipcp->cuid ||
616	    euid == ipcp->uid)
617		granted_mode >>= 6;
618	else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
619		granted_mode >>= 3;
620	/* is there some bit set in requested_mode but not in granted_mode? */
621	if ((requested_mode & ~granted_mode & 0007) && 
622	    !ns_capable(ns->user_ns, CAP_IPC_OWNER))
623		return -1;
624
625	return security_ipc_permission(ipcp, flag);
626}
627
628/*
629 * Functions to convert between the kern_ipc_perm structure and the
630 * old/new ipc_perm structures
631 */
632
633/**
634 *	kernel_to_ipc64_perm	-	convert kernel ipc permissions to user
635 *	@in: kernel permissions
636 *	@out: new style IPC permissions
637 *
638 *	Turn the kernel object @in into a set of permissions descriptions
639 *	for returning to userspace (@out).
640 */
641 
642
643void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
644{
645	out->key	= in->key;
646	out->uid	= in->uid;
647	out->gid	= in->gid;
648	out->cuid	= in->cuid;
649	out->cgid	= in->cgid;
650	out->mode	= in->mode;
651	out->seq	= in->seq;
652}
653
654/**
655 *	ipc64_perm_to_ipc_perm	-	convert new ipc permissions to old
656 *	@in: new style IPC permissions
657 *	@out: old style IPC permissions
658 *
659 *	Turn the new style permissions object @in into a compatibility
660 *	object and store it into the @out pointer.
661 */
662 
663void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
664{
665	out->key	= in->key;
666	SET_UID(out->uid, in->uid);
667	SET_GID(out->gid, in->gid);
668	SET_UID(out->cuid, in->cuid);
669	SET_GID(out->cgid, in->cgid);
670	out->mode	= in->mode;
671	out->seq	= in->seq;
672}
673
674/**
675 * ipc_lock - Lock an ipc structure without rw_mutex held
676 * @ids: IPC identifier set
677 * @id: ipc id to look for
678 *
679 * Look for an id in the ipc ids idr and lock the associated ipc object.
680 *
681 * The ipc object is locked on exit.
682 */
683
684struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
685{
686	struct kern_ipc_perm *out;
687	int lid = ipcid_to_idx(id);
688
689	rcu_read_lock();
690	out = idr_find(&ids->ipcs_idr, lid);
691	if (out == NULL) {
692		rcu_read_unlock();
693		return ERR_PTR(-EINVAL);
694	}
695
696	spin_lock(&out->lock);
697	
698	/* ipc_rmid() may have already freed the ID while ipc_lock
699	 * was spinning: here verify that the structure is still valid
700	 */
701	if (out->deleted) {
702		spin_unlock(&out->lock);
703		rcu_read_unlock();
704		return ERR_PTR(-EINVAL);
705	}
706
707	return out;
708}
709
710struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
711{
712	struct kern_ipc_perm *out;
713
714	out = ipc_lock(ids, id);
715	if (IS_ERR(out))
716		return out;
717
718	if (ipc_checkid(out, id)) {
719		ipc_unlock(out);
720		return ERR_PTR(-EIDRM);
721	}
722
723	return out;
724}
725
726/**
727 * ipcget - Common sys_*get() code
728 * @ns : namsepace
729 * @ids : IPC identifier set
730 * @ops : operations to be called on ipc object creation, permission checks
731 *        and further checks
732 * @params : the parameters needed by the previous operations.
733 *
734 * Common routine called by sys_msgget(), sys_semget() and sys_shmget().
735 */
736int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
737			struct ipc_ops *ops, struct ipc_params *params)
738{
739	if (params->key == IPC_PRIVATE)
740		return ipcget_new(ns, ids, ops, params);
741	else
742		return ipcget_public(ns, ids, ops, params);
743}
744
745/**
746 * ipc_update_perm - update the permissions of an IPC.
747 * @in:  the permission given as input.
748 * @out: the permission of the ipc to set.
749 */
750void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
751{
752	out->uid = in->uid;
753	out->gid = in->gid;
754	out->mode = (out->mode & ~S_IRWXUGO)
755		| (in->mode & S_IRWXUGO);
756}
757
758/**
759 * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
760 * @ns:  the ipc namespace
761 * @ids:  the table of ids where to look for the ipc
762 * @id:   the id of the ipc to retrieve
763 * @cmd:  the cmd to check
764 * @perm: the permission to set
765 * @extra_perm: one extra permission parameter used by msq
766 *
767 * This function does some common audit and permissions check for some IPC_XXX
768 * cmd and is called from semctl_down, shmctl_down and msgctl_down.
769 * It must be called without any lock held and
770 *  - retrieves the ipc with the given id in the given table.
771 *  - performs some audit and permission check, depending on the given cmd
772 *  - returns the ipc with both ipc and rw_mutex locks held in case of success
773 *    or an err-code without any lock held otherwise.
774 */
775struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
776				      struct ipc_ids *ids, int id, int cmd,
777				      struct ipc64_perm *perm, int extra_perm)
778{
779	struct kern_ipc_perm *ipcp;
780	uid_t euid;
781	int err;
782
783	down_write(&ids->rw_mutex);
784	ipcp = ipc_lock_check(ids, id);
785	if (IS_ERR(ipcp)) {
786		err = PTR_ERR(ipcp);
787		goto out_up;
788	}
789
790	audit_ipc_obj(ipcp);
791	if (cmd == IPC_SET)
792		audit_ipc_set_perm(extra_perm, perm->uid,
793					 perm->gid, perm->mode);
794
795	euid = current_euid();
796	if (euid == ipcp->cuid || euid == ipcp->uid  ||
797	    ns_capable(ns->user_ns, CAP_SYS_ADMIN))
798		return ipcp;
799
800	err = -EPERM;
801	ipc_unlock(ipcp);
802out_up:
803	up_write(&ids->rw_mutex);
804	return ERR_PTR(err);
805}
806
807#ifdef __ARCH_WANT_IPC_PARSE_VERSION
808
809
810/**
811 *	ipc_parse_version	-	IPC call version
812 *	@cmd: pointer to command
813 *
814 *	Return IPC_64 for new style IPC and IPC_OLD for old style IPC. 
815 *	The @cmd value is turned from an encoding command and version into
816 *	just the command code.
817 */
818 
819int ipc_parse_version (int *cmd)
820{
821	if (*cmd & IPC_64) {
822		*cmd ^= IPC_64;
823		return IPC_64;
824	} else {
825		return IPC_OLD;
826	}
827}
828
829#endif /* __ARCH_WANT_IPC_PARSE_VERSION */
830
831#ifdef CONFIG_PROC_FS
832struct ipc_proc_iter {
833	struct ipc_namespace *ns;
834	struct ipc_proc_iface *iface;
835};
836
837/*
838 * This routine locks the ipc structure found at least at position pos.
839 */
840static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
841					      loff_t *new_pos)
842{
843	struct kern_ipc_perm *ipc;
844	int total, id;
845
846	total = 0;
847	for (id = 0; id < pos && total < ids->in_use; id++) {
848		ipc = idr_find(&ids->ipcs_idr, id);
849		if (ipc != NULL)
850			total++;
851	}
852
853	if (total >= ids->in_use)
854		return NULL;
855
856	for ( ; pos < IPCMNI; pos++) {
857		ipc = idr_find(&ids->ipcs_idr, pos);
858		if (ipc != NULL) {
859			*new_pos = pos + 1;
860			ipc_lock_by_ptr(ipc);
861			return ipc;
862		}
863	}
864
865	/* Out of range - return NULL to terminate iteration */
866	return NULL;
867}
868
869static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
870{
871	struct ipc_proc_iter *iter = s->private;
872	struct ipc_proc_iface *iface = iter->iface;
873	struct kern_ipc_perm *ipc = it;
874
875	/* If we had an ipc id locked before, unlock it */
876	if (ipc && ipc != SEQ_START_TOKEN)
877		ipc_unlock(ipc);
878
879	return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos);
880}
881
882/*
883 * File positions: pos 0 -> header, pos n -> ipc id = n - 1.
884 * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START.
885 */
886static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
887{
888	struct ipc_proc_iter *iter = s->private;
889	struct ipc_proc_iface *iface = iter->iface;
890	struct ipc_ids *ids;
891
892	ids = &iter->ns->ids[iface->ids];
893
894	/*
895	 * Take the lock - this will be released by the corresponding
896	 * call to stop().
897	 */
898	down_read(&ids->rw_mutex);
899
900	/* pos < 0 is invalid */
901	if (*pos < 0)
902		return NULL;
903
904	/* pos == 0 means header */
905	if (*pos == 0)
906		return SEQ_START_TOKEN;
907
908	/* Find the (pos-1)th ipc */
909	return sysvipc_find_ipc(ids, *pos - 1, pos);
910}
911
912static void sysvipc_proc_stop(struct seq_file *s, void *it)
913{
914	struct kern_ipc_perm *ipc = it;
915	struct ipc_proc_iter *iter = s->private;
916	struct ipc_proc_iface *iface = iter->iface;
917	struct ipc_ids *ids;
918
919	/* If we had a locked structure, release it */
920	if (ipc && ipc != SEQ_START_TOKEN)
921		ipc_unlock(ipc);
922
923	ids = &iter->ns->ids[iface->ids];
924	/* Release the lock we took in start() */
925	up_read(&ids->rw_mutex);
926}
927
928static int sysvipc_proc_show(struct seq_file *s, void *it)
929{
930	struct ipc_proc_iter *iter = s->private;
931	struct ipc_proc_iface *iface = iter->iface;
932
933	if (it == SEQ_START_TOKEN)
934		return seq_puts(s, iface->header);
935
936	return iface->show(s, it);
937}
938
939static const struct seq_operations sysvipc_proc_seqops = {
940	.start = sysvipc_proc_start,
941	.stop  = sysvipc_proc_stop,
942	.next  = sysvipc_proc_next,
943	.show  = sysvipc_proc_show,
944};
945
946static int sysvipc_proc_open(struct inode *inode, struct file *file)
947{
948	int ret;
949	struct seq_file *seq;
950	struct ipc_proc_iter *iter;
951
952	ret = -ENOMEM;
953	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
954	if (!iter)
955		goto out;
956
957	ret = seq_open(file, &sysvipc_proc_seqops);
958	if (ret)
959		goto out_kfree;
960
961	seq = file->private_data;
962	seq->private = iter;
963
964	iter->iface = PDE(inode)->data;
965	iter->ns    = get_ipc_ns(current->nsproxy->ipc_ns);
966out:
967	return ret;
968out_kfree:
969	kfree(iter);
970	goto out;
971}
972
973static int sysvipc_proc_release(struct inode *inode, struct file *file)
974{
975	struct seq_file *seq = file->private_data;
976	struct ipc_proc_iter *iter = seq->private;
977	put_ipc_ns(iter->ns);
978	return seq_release_private(inode, file);
979}
980
981static const struct file_operations sysvipc_proc_fops = {
982	.open    = sysvipc_proc_open,
983	.read    = seq_read,
984	.llseek  = seq_lseek,
985	.release = sysvipc_proc_release,
986};
987#endif /* CONFIG_PROC_FS */