Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Implementation of the SID table type.
  4 *
  5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
  6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
  7 *
  8 * Copyright (C) 2018 Red Hat, Inc.
  9 */
 10#include <linux/errno.h>
 11#include <linux/kernel.h>
 12#include <linux/list.h>
 13#include <linux/rcupdate.h>
 14#include <linux/slab.h>
 15#include <linux/sched.h>
 16#include <linux/spinlock.h>
 17#include <asm/barrier.h>
 18#include "flask.h"
 19#include "security.h"
 20#include "sidtab.h"
 21#include "services.h"
 22
 23struct sidtab_str_cache {
 24	struct rcu_head rcu_member;
 25	struct list_head lru_member;
 26	struct sidtab_entry *parent;
 27	u32 len;
 28	char str[];
 29};
 30
 31#define index_to_sid(index) ((index) + SECINITSID_NUM + 1)
 32#define sid_to_index(sid) ((sid) - (SECINITSID_NUM + 1))
 33
 34int sidtab_init(struct sidtab *s)
 35{
 36	u32 i;
 37
 38	memset(s->roots, 0, sizeof(s->roots));
 39
 40	for (i = 0; i < SECINITSID_NUM; i++)
 41		s->isids[i].set = 0;
 42
 43	s->frozen = false;
 44	s->count = 0;
 45	s->convert = NULL;
 46	hash_init(s->context_to_sid);
 47
 48	spin_lock_init(&s->lock);
 49
 50#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
 51	s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
 52	INIT_LIST_HEAD(&s->cache_lru_list);
 53	spin_lock_init(&s->cache_lock);
 54#endif
 55
 56	return 0;
 57}
 58
 59static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
 60{
 61	struct sidtab_entry *entry;
 62	u32 sid = 0;
 63
 64	rcu_read_lock();
 65	hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
 66		if (entry->hash != hash)
 67			continue;
 68		if (context_cmp(&entry->context, context)) {
 69			sid = entry->sid;
 70			break;
 71		}
 72	}
 73	rcu_read_unlock();
 74	return sid;
 75}
 76
 77int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
 78{
 79	struct sidtab_isid_entry *isid;
 80	u32 hash;
 81	int rc;
 82
 83	if (sid == 0 || sid > SECINITSID_NUM)
 84		return -EINVAL;
 85
 86	isid = &s->isids[sid - 1];
 87
 88	rc = context_cpy(&isid->entry.context, context);
 89	if (rc)
 90		return rc;
 91
 92#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
 93	isid->entry.cache = NULL;
 94#endif
 95	isid->set = 1;
 96
 97	hash = context_compute_hash(context);
 98
 99	/*
100	 * Multiple initial sids may map to the same context. Check that this
101	 * context is not already represented in the context_to_sid hashtable
102	 * to avoid duplicate entries and long linked lists upon hash
103	 * collision.
104	 */
105	if (!context_to_sid(s, context, hash)) {
106		isid->entry.sid = sid;
107		isid->entry.hash = hash;
108		hash_add(s->context_to_sid, &isid->entry.list, hash);
109	}
110
111	return 0;
112}
113
114int sidtab_hash_stats(struct sidtab *sidtab, char *page)
115{
116	int i;
117	int chain_len = 0;
118	int slots_used = 0;
119	int entries = 0;
120	int max_chain_len = 0;
121	int cur_bucket = 0;
122	struct sidtab_entry *entry;
123
124	rcu_read_lock();
125	hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
126		entries++;
127		if (i == cur_bucket) {
128			chain_len++;
129			if (chain_len == 1)
130				slots_used++;
131		} else {
132			cur_bucket = i;
133			if (chain_len > max_chain_len)
134				max_chain_len = chain_len;
135			chain_len = 0;
136		}
137	}
138	rcu_read_unlock();
139
140	if (chain_len > max_chain_len)
141		max_chain_len = chain_len;
142
143	return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
144			 "longest chain: %d\n", entries,
145			 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
146}
147
148static u32 sidtab_level_from_count(u32 count)
149{
150	u32 capacity = SIDTAB_LEAF_ENTRIES;
151	u32 level = 0;
152
153	while (count > capacity) {
154		capacity <<= SIDTAB_INNER_SHIFT;
155		++level;
156	}
157	return level;
158}
159
160static int sidtab_alloc_roots(struct sidtab *s, u32 level)
161{
162	u32 l;
163
164	if (!s->roots[0].ptr_leaf) {
165		s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
166					       GFP_ATOMIC);
167		if (!s->roots[0].ptr_leaf)
168			return -ENOMEM;
169	}
170	for (l = 1; l <= level; ++l)
171		if (!s->roots[l].ptr_inner) {
172			s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
173							GFP_ATOMIC);
174			if (!s->roots[l].ptr_inner)
175				return -ENOMEM;
176			s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
177		}
178	return 0;
179}
180
181static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
182					     int alloc)
183{
184	union sidtab_entry_inner *entry;
185	u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
186
187	/* find the level of the subtree we need */
188	level = sidtab_level_from_count(index + 1);
189	capacity_shift = level * SIDTAB_INNER_SHIFT;
190
191	/* allocate roots if needed */
192	if (alloc && sidtab_alloc_roots(s, level) != 0)
193		return NULL;
194
195	/* lookup inside the subtree */
196	entry = &s->roots[level];
197	while (level != 0) {
198		capacity_shift -= SIDTAB_INNER_SHIFT;
199		--level;
200
201		entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
202		leaf_index &= ((u32)1 << capacity_shift) - 1;
203
204		if (!entry->ptr_inner) {
205			if (alloc)
206				entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
207							   GFP_ATOMIC);
208			if (!entry->ptr_inner)
209				return NULL;
210		}
211	}
212	if (!entry->ptr_leaf) {
213		if (alloc)
214			entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
215						  GFP_ATOMIC);
216		if (!entry->ptr_leaf)
217			return NULL;
218	}
219	return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
220}
221
222static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
223{
224	/* read entries only after reading count */
225	u32 count = smp_load_acquire(&s->count);
226
227	if (index >= count)
228		return NULL;
229
230	return sidtab_do_lookup(s, index, 0);
231}
232
233static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
234{
235	return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
236}
237
238static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
239					       int force)
240{
241	if (sid != 0) {
242		struct sidtab_entry *entry;
243
244		if (sid > SECINITSID_NUM)
245			entry = sidtab_lookup(s, sid_to_index(sid));
246		else
247			entry = sidtab_lookup_initial(s, sid);
248		if (entry && (!entry->context.len || force))
249			return entry;
250	}
251
252	return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
253}
254
255struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
256{
257	return sidtab_search_core(s, sid, 0);
258}
259
260struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
261{
262	return sidtab_search_core(s, sid, 1);
263}
264
265int sidtab_context_to_sid(struct sidtab *s, struct context *context,
266			  u32 *sid)
267{
268	unsigned long flags;
269	u32 count, hash = context_compute_hash(context);
270	struct sidtab_convert_params *convert;
271	struct sidtab_entry *dst, *dst_convert;
272	int rc;
273
274	*sid = context_to_sid(s, context, hash);
275	if (*sid)
276		return 0;
277
278	/* lock-free search failed: lock, re-search, and insert if not found */
279	spin_lock_irqsave(&s->lock, flags);
280
281	rc = 0;
282	*sid = context_to_sid(s, context, hash);
283	if (*sid)
284		goto out_unlock;
285
286	if (unlikely(s->frozen)) {
287		/*
288		 * This sidtab is now frozen - tell the caller to abort and
289		 * get the new one.
290		 */
291		rc = -ESTALE;
292		goto out_unlock;
293	}
294
295	count = s->count;
 
296
297	/* bail out if we already reached max entries */
298	rc = -EOVERFLOW;
299	if (count >= SIDTAB_MAX)
300		goto out_unlock;
301
302	/* insert context into new entry */
303	rc = -ENOMEM;
304	dst = sidtab_do_lookup(s, count, 1);
305	if (!dst)
306		goto out_unlock;
307
308	dst->sid = index_to_sid(count);
309	dst->hash = hash;
310
311	rc = context_cpy(&dst->context, context);
312	if (rc)
313		goto out_unlock;
314
315	/*
316	 * if we are building a new sidtab, we need to convert the context
317	 * and insert it there as well
318	 */
319	convert = s->convert;
320	if (convert) {
321		struct sidtab *target = convert->target;
322
323		rc = -ENOMEM;
324		dst_convert = sidtab_do_lookup(target, count, 1);
325		if (!dst_convert) {
326			context_destroy(&dst->context);
327			goto out_unlock;
328		}
329
330		rc = services_convert_context(convert->args,
331					      context, &dst_convert->context,
332					      GFP_ATOMIC);
333		if (rc) {
334			context_destroy(&dst->context);
335			goto out_unlock;
336		}
337		dst_convert->sid = index_to_sid(count);
338		dst_convert->hash = context_compute_hash(&dst_convert->context);
339		target->count = count + 1;
340
341		hash_add_rcu(target->context_to_sid,
342			     &dst_convert->list, dst_convert->hash);
343	}
344
345	if (context->len)
346		pr_info("SELinux:  Context %s is not valid (left unmapped).\n",
347			context->str);
348
349	*sid = index_to_sid(count);
350
351	/* write entries before updating count */
352	smp_store_release(&s->count, count + 1);
353	hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
354
355	rc = 0;
356out_unlock:
357	spin_unlock_irqrestore(&s->lock, flags);
358	return rc;
359}
360
361static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
362{
363	struct sidtab_entry *entry;
364	u32 i;
365
366	for (i = 0; i < count; i++) {
367		entry = sidtab_do_lookup(s, i, 0);
368		entry->sid = index_to_sid(i);
369		entry->hash = context_compute_hash(&entry->context);
370
371		hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
372	}
373}
374
375static int sidtab_convert_tree(union sidtab_entry_inner *edst,
376			       union sidtab_entry_inner *esrc,
377			       u32 *pos, u32 count, u32 level,
378			       struct sidtab_convert_params *convert)
379{
380	int rc;
381	u32 i;
382
383	if (level != 0) {
384		if (!edst->ptr_inner) {
385			edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
386						  GFP_KERNEL);
387			if (!edst->ptr_inner)
388				return -ENOMEM;
389		}
390		i = 0;
391		while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
392			rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
393						 &esrc->ptr_inner->entries[i],
394						 pos, count, level - 1,
395						 convert);
396			if (rc)
397				return rc;
398			i++;
399		}
400	} else {
401		if (!edst->ptr_leaf) {
402			edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
403						 GFP_KERNEL);
404			if (!edst->ptr_leaf)
405				return -ENOMEM;
406		}
407		i = 0;
408		while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
409			rc = services_convert_context(convert->args,
410					&esrc->ptr_leaf->entries[i].context,
411					&edst->ptr_leaf->entries[i].context,
412					GFP_KERNEL);
413			if (rc)
414				return rc;
415			(*pos)++;
416			i++;
417		}
418		cond_resched();
419	}
420	return 0;
421}
422
423int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
424{
425	unsigned long flags;
426	u32 count, level, pos;
427	int rc;
428
429	spin_lock_irqsave(&s->lock, flags);
430
431	/* concurrent policy loads are not allowed */
432	if (s->convert) {
433		spin_unlock_irqrestore(&s->lock, flags);
434		return -EBUSY;
435	}
436
437	count = s->count;
438	level = sidtab_level_from_count(count);
439
440	/* allocate last leaf in the new sidtab (to avoid race with
441	 * live convert)
442	 */
443	rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
444	if (rc) {
445		spin_unlock_irqrestore(&s->lock, flags);
446		return rc;
447	}
448
449	/* set count in case no new entries are added during conversion */
450	params->target->count = count;
451
452	/* enable live convert of new entries */
453	s->convert = params;
454
455	/* we can safely convert the tree outside the lock */
456	spin_unlock_irqrestore(&s->lock, flags);
457
458	pr_info("SELinux:  Converting %u SID table entries...\n", count);
459
460	/* convert all entries not covered by live convert */
461	pos = 0;
462	rc = sidtab_convert_tree(&params->target->roots[level],
463				 &s->roots[level], &pos, count, level, params);
464	if (rc) {
465		/* we need to keep the old table - disable live convert */
466		spin_lock_irqsave(&s->lock, flags);
467		s->convert = NULL;
468		spin_unlock_irqrestore(&s->lock, flags);
469		return rc;
470	}
471	/*
472	 * The hashtable can also be modified in sidtab_context_to_sid()
473	 * so we must re-acquire the lock here.
474	 */
475	spin_lock_irqsave(&s->lock, flags);
476	sidtab_convert_hashtable(params->target, count);
477	spin_unlock_irqrestore(&s->lock, flags);
478
479	return 0;
480}
481
482void sidtab_cancel_convert(struct sidtab *s)
483{
484	unsigned long flags;
485
486	/* cancelling policy load - disable live convert of sidtab */
487	spin_lock_irqsave(&s->lock, flags);
488	s->convert = NULL;
489	spin_unlock_irqrestore(&s->lock, flags);
490}
491
492void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
493{
494	spin_lock_irqsave(&s->lock, *flags);
495	s->frozen = true;
496	s->convert = NULL;
497}
498void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
499{
500	spin_unlock_irqrestore(&s->lock, *flags);
501}
502
503static void sidtab_destroy_entry(struct sidtab_entry *entry)
504{
505	context_destroy(&entry->context);
506#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
507	kfree(rcu_dereference_raw(entry->cache));
508#endif
509}
510
511static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
512{
513	u32 i;
514
515	if (level != 0) {
516		struct sidtab_node_inner *node = entry.ptr_inner;
517
518		if (!node)
519			return;
520
521		for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
522			sidtab_destroy_tree(node->entries[i], level - 1);
523		kfree(node);
524	} else {
525		struct sidtab_node_leaf *node = entry.ptr_leaf;
526
527		if (!node)
528			return;
529
530		for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
531			sidtab_destroy_entry(&node->entries[i]);
532		kfree(node);
533	}
534}
535
536void sidtab_destroy(struct sidtab *s)
537{
538	u32 i, level;
539
540	for (i = 0; i < SECINITSID_NUM; i++)
541		if (s->isids[i].set)
542			sidtab_destroy_entry(&s->isids[i].entry);
543
544	level = SIDTAB_MAX_LEVEL;
545	while (level && !s->roots[level].ptr_inner)
546		--level;
547
548	sidtab_destroy_tree(s->roots[level], level);
549	/*
550	 * The context_to_sid hashtable's objects are all shared
551	 * with the isids array and context tree, and so don't need
552	 * to be cleaned up here.
553	 */
554}
555
556#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
557
558void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
559			const char *str, u32 str_len)
560{
561	struct sidtab_str_cache *cache, *victim = NULL;
562	unsigned long flags;
563
564	/* do not cache invalid contexts */
565	if (entry->context.len)
566		return;
567
568	spin_lock_irqsave(&s->cache_lock, flags);
569
570	cache = rcu_dereference_protected(entry->cache,
571					  lockdep_is_held(&s->cache_lock));
572	if (cache) {
573		/* entry in cache - just bump to the head of LRU list */
574		list_move(&cache->lru_member, &s->cache_lru_list);
575		goto out_unlock;
576	}
577
578	cache = kmalloc(struct_size(cache, str, str_len), GFP_ATOMIC);
579	if (!cache)
580		goto out_unlock;
581
582	if (s->cache_free_slots == 0) {
583		/* pop a cache entry from the tail and free it */
584		victim = container_of(s->cache_lru_list.prev,
585				      struct sidtab_str_cache, lru_member);
586		list_del(&victim->lru_member);
587		rcu_assign_pointer(victim->parent->cache, NULL);
588	} else {
589		s->cache_free_slots--;
590	}
591	cache->parent = entry;
592	cache->len = str_len;
593	memcpy(cache->str, str, str_len);
594	list_add(&cache->lru_member, &s->cache_lru_list);
595
596	rcu_assign_pointer(entry->cache, cache);
597
598out_unlock:
599	spin_unlock_irqrestore(&s->cache_lock, flags);
600	kfree_rcu(victim, rcu_member);
601}
602
603int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
604		       char **out, u32 *out_len)
605{
606	struct sidtab_str_cache *cache;
607	int rc = 0;
608
609	if (entry->context.len)
610		return -ENOENT; /* do not cache invalid contexts */
611
612	rcu_read_lock();
613
614	cache = rcu_dereference(entry->cache);
615	if (!cache) {
616		rc = -ENOENT;
617	} else {
618		*out_len = cache->len;
619		if (out) {
620			*out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
621			if (!*out)
622				rc = -ENOMEM;
623		}
624	}
625
626	rcu_read_unlock();
627
628	if (!rc && out)
629		sidtab_sid2str_put(s, entry, *out, *out_len);
630	return rc;
631}
632
633#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Implementation of the SID table type.
  4 *
  5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
  6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
  7 *
  8 * Copyright (C) 2018 Red Hat, Inc.
  9 */
 10#include <linux/errno.h>
 11#include <linux/kernel.h>
 12#include <linux/list.h>
 13#include <linux/rcupdate.h>
 14#include <linux/slab.h>
 15#include <linux/sched.h>
 16#include <linux/spinlock.h>
 17#include <asm/barrier.h>
 18#include "flask.h"
 19#include "security.h"
 20#include "sidtab.h"
 
 21
 22struct sidtab_str_cache {
 23	struct rcu_head rcu_member;
 24	struct list_head lru_member;
 25	struct sidtab_entry *parent;
 26	u32 len;
 27	char str[];
 28};
 29
 30#define index_to_sid(index) (index + SECINITSID_NUM + 1)
 31#define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
 32
 33int sidtab_init(struct sidtab *s)
 34{
 35	u32 i;
 36
 37	memset(s->roots, 0, sizeof(s->roots));
 38
 39	for (i = 0; i < SECINITSID_NUM; i++)
 40		s->isids[i].set = 0;
 41
 42	s->frozen = false;
 43	s->count = 0;
 44	s->convert = NULL;
 45	hash_init(s->context_to_sid);
 46
 47	spin_lock_init(&s->lock);
 48
 49#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
 50	s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
 51	INIT_LIST_HEAD(&s->cache_lru_list);
 52	spin_lock_init(&s->cache_lock);
 53#endif
 54
 55	return 0;
 56}
 57
 58static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
 59{
 60	struct sidtab_entry *entry;
 61	u32 sid = 0;
 62
 63	rcu_read_lock();
 64	hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
 65		if (entry->hash != hash)
 66			continue;
 67		if (context_cmp(&entry->context, context)) {
 68			sid = entry->sid;
 69			break;
 70		}
 71	}
 72	rcu_read_unlock();
 73	return sid;
 74}
 75
 76int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
 77{
 78	struct sidtab_isid_entry *isid;
 79	u32 hash;
 80	int rc;
 81
 82	if (sid == 0 || sid > SECINITSID_NUM)
 83		return -EINVAL;
 84
 85	isid = &s->isids[sid - 1];
 86
 87	rc = context_cpy(&isid->entry.context, context);
 88	if (rc)
 89		return rc;
 90
 91#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
 92	isid->entry.cache = NULL;
 93#endif
 94	isid->set = 1;
 95
 96	hash = context_compute_hash(context);
 97
 98	/*
 99	 * Multiple initial sids may map to the same context. Check that this
100	 * context is not already represented in the context_to_sid hashtable
101	 * to avoid duplicate entries and long linked lists upon hash
102	 * collision.
103	 */
104	if (!context_to_sid(s, context, hash)) {
105		isid->entry.sid = sid;
106		isid->entry.hash = hash;
107		hash_add(s->context_to_sid, &isid->entry.list, hash);
108	}
109
110	return 0;
111}
112
113int sidtab_hash_stats(struct sidtab *sidtab, char *page)
114{
115	int i;
116	int chain_len = 0;
117	int slots_used = 0;
118	int entries = 0;
119	int max_chain_len = 0;
120	int cur_bucket = 0;
121	struct sidtab_entry *entry;
122
123	rcu_read_lock();
124	hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
125		entries++;
126		if (i == cur_bucket) {
127			chain_len++;
128			if (chain_len == 1)
129				slots_used++;
130		} else {
131			cur_bucket = i;
132			if (chain_len > max_chain_len)
133				max_chain_len = chain_len;
134			chain_len = 0;
135		}
136	}
137	rcu_read_unlock();
138
139	if (chain_len > max_chain_len)
140		max_chain_len = chain_len;
141
142	return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
143			 "longest chain: %d\n", entries,
144			 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
145}
146
147static u32 sidtab_level_from_count(u32 count)
148{
149	u32 capacity = SIDTAB_LEAF_ENTRIES;
150	u32 level = 0;
151
152	while (count > capacity) {
153		capacity <<= SIDTAB_INNER_SHIFT;
154		++level;
155	}
156	return level;
157}
158
159static int sidtab_alloc_roots(struct sidtab *s, u32 level)
160{
161	u32 l;
162
163	if (!s->roots[0].ptr_leaf) {
164		s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
165					       GFP_ATOMIC);
166		if (!s->roots[0].ptr_leaf)
167			return -ENOMEM;
168	}
169	for (l = 1; l <= level; ++l)
170		if (!s->roots[l].ptr_inner) {
171			s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
172							GFP_ATOMIC);
173			if (!s->roots[l].ptr_inner)
174				return -ENOMEM;
175			s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
176		}
177	return 0;
178}
179
180static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
181					     int alloc)
182{
183	union sidtab_entry_inner *entry;
184	u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
185
186	/* find the level of the subtree we need */
187	level = sidtab_level_from_count(index + 1);
188	capacity_shift = level * SIDTAB_INNER_SHIFT;
189
190	/* allocate roots if needed */
191	if (alloc && sidtab_alloc_roots(s, level) != 0)
192		return NULL;
193
194	/* lookup inside the subtree */
195	entry = &s->roots[level];
196	while (level != 0) {
197		capacity_shift -= SIDTAB_INNER_SHIFT;
198		--level;
199
200		entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
201		leaf_index &= ((u32)1 << capacity_shift) - 1;
202
203		if (!entry->ptr_inner) {
204			if (alloc)
205				entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
206							   GFP_ATOMIC);
207			if (!entry->ptr_inner)
208				return NULL;
209		}
210	}
211	if (!entry->ptr_leaf) {
212		if (alloc)
213			entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
214						  GFP_ATOMIC);
215		if (!entry->ptr_leaf)
216			return NULL;
217	}
218	return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
219}
220
221static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
222{
223	/* read entries only after reading count */
224	u32 count = smp_load_acquire(&s->count);
225
226	if (index >= count)
227		return NULL;
228
229	return sidtab_do_lookup(s, index, 0);
230}
231
232static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
233{
234	return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
235}
236
237static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
238					       int force)
239{
240	if (sid != 0) {
241		struct sidtab_entry *entry;
242
243		if (sid > SECINITSID_NUM)
244			entry = sidtab_lookup(s, sid_to_index(sid));
245		else
246			entry = sidtab_lookup_initial(s, sid);
247		if (entry && (!entry->context.len || force))
248			return entry;
249	}
250
251	return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
252}
253
254struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
255{
256	return sidtab_search_core(s, sid, 0);
257}
258
259struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
260{
261	return sidtab_search_core(s, sid, 1);
262}
263
264int sidtab_context_to_sid(struct sidtab *s, struct context *context,
265			  u32 *sid)
266{
267	unsigned long flags;
268	u32 count, hash = context_compute_hash(context);
269	struct sidtab_convert_params *convert;
270	struct sidtab_entry *dst, *dst_convert;
271	int rc;
272
273	*sid = context_to_sid(s, context, hash);
274	if (*sid)
275		return 0;
276
277	/* lock-free search failed: lock, re-search, and insert if not found */
278	spin_lock_irqsave(&s->lock, flags);
279
280	rc = 0;
281	*sid = context_to_sid(s, context, hash);
282	if (*sid)
283		goto out_unlock;
284
285	if (unlikely(s->frozen)) {
286		/*
287		 * This sidtab is now frozen - tell the caller to abort and
288		 * get the new one.
289		 */
290		rc = -ESTALE;
291		goto out_unlock;
292	}
293
294	count = s->count;
295	convert = s->convert;
296
297	/* bail out if we already reached max entries */
298	rc = -EOVERFLOW;
299	if (count >= SIDTAB_MAX)
300		goto out_unlock;
301
302	/* insert context into new entry */
303	rc = -ENOMEM;
304	dst = sidtab_do_lookup(s, count, 1);
305	if (!dst)
306		goto out_unlock;
307
308	dst->sid = index_to_sid(count);
309	dst->hash = hash;
310
311	rc = context_cpy(&dst->context, context);
312	if (rc)
313		goto out_unlock;
314
315	/*
316	 * if we are building a new sidtab, we need to convert the context
317	 * and insert it there as well
318	 */
 
319	if (convert) {
 
 
320		rc = -ENOMEM;
321		dst_convert = sidtab_do_lookup(convert->target, count, 1);
322		if (!dst_convert) {
323			context_destroy(&dst->context);
324			goto out_unlock;
325		}
326
327		rc = convert->func(context, &dst_convert->context,
328				   convert->args);
 
329		if (rc) {
330			context_destroy(&dst->context);
331			goto out_unlock;
332		}
333		dst_convert->sid = index_to_sid(count);
334		dst_convert->hash = context_compute_hash(&dst_convert->context);
335		convert->target->count = count + 1;
336
337		hash_add_rcu(convert->target->context_to_sid,
338			     &dst_convert->list, dst_convert->hash);
339	}
340
341	if (context->len)
342		pr_info("SELinux:  Context %s is not valid (left unmapped).\n",
343			context->str);
344
345	*sid = index_to_sid(count);
346
347	/* write entries before updating count */
348	smp_store_release(&s->count, count + 1);
349	hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
350
351	rc = 0;
352out_unlock:
353	spin_unlock_irqrestore(&s->lock, flags);
354	return rc;
355}
356
357static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
358{
359	struct sidtab_entry *entry;
360	u32 i;
361
362	for (i = 0; i < count; i++) {
363		entry = sidtab_do_lookup(s, i, 0);
364		entry->sid = index_to_sid(i);
365		entry->hash = context_compute_hash(&entry->context);
366
367		hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
368	}
369}
370
371static int sidtab_convert_tree(union sidtab_entry_inner *edst,
372			       union sidtab_entry_inner *esrc,
373			       u32 *pos, u32 count, u32 level,
374			       struct sidtab_convert_params *convert)
375{
376	int rc;
377	u32 i;
378
379	if (level != 0) {
380		if (!edst->ptr_inner) {
381			edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
382						  GFP_KERNEL);
383			if (!edst->ptr_inner)
384				return -ENOMEM;
385		}
386		i = 0;
387		while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
388			rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
389						 &esrc->ptr_inner->entries[i],
390						 pos, count, level - 1,
391						 convert);
392			if (rc)
393				return rc;
394			i++;
395		}
396	} else {
397		if (!edst->ptr_leaf) {
398			edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
399						 GFP_KERNEL);
400			if (!edst->ptr_leaf)
401				return -ENOMEM;
402		}
403		i = 0;
404		while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
405			rc = convert->func(&esrc->ptr_leaf->entries[i].context,
406					   &edst->ptr_leaf->entries[i].context,
407					   convert->args);
 
408			if (rc)
409				return rc;
410			(*pos)++;
411			i++;
412		}
413		cond_resched();
414	}
415	return 0;
416}
417
418int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
419{
420	unsigned long flags;
421	u32 count, level, pos;
422	int rc;
423
424	spin_lock_irqsave(&s->lock, flags);
425
426	/* concurrent policy loads are not allowed */
427	if (s->convert) {
428		spin_unlock_irqrestore(&s->lock, flags);
429		return -EBUSY;
430	}
431
432	count = s->count;
433	level = sidtab_level_from_count(count);
434
435	/* allocate last leaf in the new sidtab (to avoid race with
436	 * live convert)
437	 */
438	rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
439	if (rc) {
440		spin_unlock_irqrestore(&s->lock, flags);
441		return rc;
442	}
443
444	/* set count in case no new entries are added during conversion */
445	params->target->count = count;
446
447	/* enable live convert of new entries */
448	s->convert = params;
449
450	/* we can safely convert the tree outside the lock */
451	spin_unlock_irqrestore(&s->lock, flags);
452
453	pr_info("SELinux:  Converting %u SID table entries...\n", count);
454
455	/* convert all entries not covered by live convert */
456	pos = 0;
457	rc = sidtab_convert_tree(&params->target->roots[level],
458				 &s->roots[level], &pos, count, level, params);
459	if (rc) {
460		/* we need to keep the old table - disable live convert */
461		spin_lock_irqsave(&s->lock, flags);
462		s->convert = NULL;
463		spin_unlock_irqrestore(&s->lock, flags);
464		return rc;
465	}
466	/*
467	 * The hashtable can also be modified in sidtab_context_to_sid()
468	 * so we must re-acquire the lock here.
469	 */
470	spin_lock_irqsave(&s->lock, flags);
471	sidtab_convert_hashtable(params->target, count);
472	spin_unlock_irqrestore(&s->lock, flags);
473
474	return 0;
475}
476
477void sidtab_cancel_convert(struct sidtab *s)
478{
479	unsigned long flags;
480
481	/* cancelling policy load - disable live convert of sidtab */
482	spin_lock_irqsave(&s->lock, flags);
483	s->convert = NULL;
484	spin_unlock_irqrestore(&s->lock, flags);
485}
486
487void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
488{
489	spin_lock_irqsave(&s->lock, *flags);
490	s->frozen = true;
491	s->convert = NULL;
492}
493void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
494{
495	spin_unlock_irqrestore(&s->lock, *flags);
496}
497
498static void sidtab_destroy_entry(struct sidtab_entry *entry)
499{
500	context_destroy(&entry->context);
501#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
502	kfree(rcu_dereference_raw(entry->cache));
503#endif
504}
505
506static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
507{
508	u32 i;
509
510	if (level != 0) {
511		struct sidtab_node_inner *node = entry.ptr_inner;
512
513		if (!node)
514			return;
515
516		for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
517			sidtab_destroy_tree(node->entries[i], level - 1);
518		kfree(node);
519	} else {
520		struct sidtab_node_leaf *node = entry.ptr_leaf;
521
522		if (!node)
523			return;
524
525		for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
526			sidtab_destroy_entry(&node->entries[i]);
527		kfree(node);
528	}
529}
530
531void sidtab_destroy(struct sidtab *s)
532{
533	u32 i, level;
534
535	for (i = 0; i < SECINITSID_NUM; i++)
536		if (s->isids[i].set)
537			sidtab_destroy_entry(&s->isids[i].entry);
538
539	level = SIDTAB_MAX_LEVEL;
540	while (level && !s->roots[level].ptr_inner)
541		--level;
542
543	sidtab_destroy_tree(s->roots[level], level);
544	/*
545	 * The context_to_sid hashtable's objects are all shared
546	 * with the isids array and context tree, and so don't need
547	 * to be cleaned up here.
548	 */
549}
550
551#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
552
553void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
554			const char *str, u32 str_len)
555{
556	struct sidtab_str_cache *cache, *victim = NULL;
557	unsigned long flags;
558
559	/* do not cache invalid contexts */
560	if (entry->context.len)
561		return;
562
563	spin_lock_irqsave(&s->cache_lock, flags);
564
565	cache = rcu_dereference_protected(entry->cache,
566					  lockdep_is_held(&s->cache_lock));
567	if (cache) {
568		/* entry in cache - just bump to the head of LRU list */
569		list_move(&cache->lru_member, &s->cache_lru_list);
570		goto out_unlock;
571	}
572
573	cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC);
574	if (!cache)
575		goto out_unlock;
576
577	if (s->cache_free_slots == 0) {
578		/* pop a cache entry from the tail and free it */
579		victim = container_of(s->cache_lru_list.prev,
580				      struct sidtab_str_cache, lru_member);
581		list_del(&victim->lru_member);
582		rcu_assign_pointer(victim->parent->cache, NULL);
583	} else {
584		s->cache_free_slots--;
585	}
586	cache->parent = entry;
587	cache->len = str_len;
588	memcpy(cache->str, str, str_len);
589	list_add(&cache->lru_member, &s->cache_lru_list);
590
591	rcu_assign_pointer(entry->cache, cache);
592
593out_unlock:
594	spin_unlock_irqrestore(&s->cache_lock, flags);
595	kfree_rcu(victim, rcu_member);
596}
597
598int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
599		       char **out, u32 *out_len)
600{
601	struct sidtab_str_cache *cache;
602	int rc = 0;
603
604	if (entry->context.len)
605		return -ENOENT; /* do not cache invalid contexts */
606
607	rcu_read_lock();
608
609	cache = rcu_dereference(entry->cache);
610	if (!cache) {
611		rc = -ENOENT;
612	} else {
613		*out_len = cache->len;
614		if (out) {
615			*out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
616			if (!*out)
617				rc = -ENOMEM;
618		}
619	}
620
621	rcu_read_unlock();
622
623	if (!rc && out)
624		sidtab_sid2str_put(s, entry, *out, *out_len);
625	return rc;
626}
627
628#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */