Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Implementation of the SID table type.
  4 *
  5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
  6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
  7 *
  8 * Copyright (C) 2018 Red Hat, Inc.
  9 */
 10#include <linux/errno.h>
 11#include <linux/kernel.h>
 12#include <linux/list.h>
 13#include <linux/rcupdate.h>
 14#include <linux/slab.h>
 15#include <linux/sched.h>
 16#include <linux/spinlock.h>
 17#include <asm/barrier.h>
 18#include "flask.h"
 19#include "security.h"
 20#include "sidtab.h"
 21#include "services.h"
 22
 23struct sidtab_str_cache {
 24	struct rcu_head rcu_member;
 25	struct list_head lru_member;
 26	struct sidtab_entry *parent;
 27	u32 len;
 28	char str[];
 29};
 30
 31#define index_to_sid(index) ((index) + SECINITSID_NUM + 1)
 32#define sid_to_index(sid) ((sid) - (SECINITSID_NUM + 1))
 33
 34int sidtab_init(struct sidtab *s)
 35{
 36	u32 i;
 37
 38	memset(s->roots, 0, sizeof(s->roots));
 39
 
 
 
 
 40	for (i = 0; i < SECINITSID_NUM; i++)
 41		s->isids[i].set = 0;
 42
 43	s->frozen = false;
 44	s->count = 0;
 45	s->convert = NULL;
 46	hash_init(s->context_to_sid);
 47
 48	spin_lock_init(&s->lock);
 49
 50#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
 51	s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
 52	INIT_LIST_HEAD(&s->cache_lru_list);
 53	spin_lock_init(&s->cache_lock);
 54#endif
 55
 56	return 0;
 57}
 58
 59static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
 60{
 61	struct sidtab_entry *entry;
 62	u32 sid = 0;
 63
 64	rcu_read_lock();
 65	hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
 66		if (entry->hash != hash)
 67			continue;
 68		if (context_cmp(&entry->context, context)) {
 69			sid = entry->sid;
 70			break;
 71		}
 72	}
 73	rcu_read_unlock();
 74	return sid;
 75}
 76
 77int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
 78{
 79	struct sidtab_isid_entry *isid;
 80	u32 hash;
 81	int rc;
 82
 83	if (sid == 0 || sid > SECINITSID_NUM)
 84		return -EINVAL;
 85
 86	isid = &s->isids[sid - 1];
 87
 88	rc = context_cpy(&isid->entry.context, context);
 89	if (rc)
 90		return rc;
 91
 92#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
 93	isid->entry.cache = NULL;
 94#endif
 95	isid->set = 1;
 96
 97	hash = context_compute_hash(context);
 98
 99	/*
100	 * Multiple initial sids may map to the same context. Check that this
101	 * context is not already represented in the context_to_sid hashtable
102	 * to avoid duplicate entries and long linked lists upon hash
103	 * collision.
104	 */
105	if (!context_to_sid(s, context, hash)) {
106		isid->entry.sid = sid;
107		isid->entry.hash = hash;
108		hash_add(s->context_to_sid, &isid->entry.list, hash);
109	}
110
111	return 0;
112}
113
114int sidtab_hash_stats(struct sidtab *sidtab, char *page)
115{
116	int i;
117	int chain_len = 0;
118	int slots_used = 0;
119	int entries = 0;
120	int max_chain_len = 0;
121	int cur_bucket = 0;
122	struct sidtab_entry *entry;
123
124	rcu_read_lock();
125	hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
126		entries++;
127		if (i == cur_bucket) {
128			chain_len++;
129			if (chain_len == 1)
130				slots_used++;
131		} else {
132			cur_bucket = i;
133			if (chain_len > max_chain_len)
134				max_chain_len = chain_len;
135			chain_len = 0;
136		}
137	}
138	rcu_read_unlock();
139
140	if (chain_len > max_chain_len)
141		max_chain_len = chain_len;
142
143	return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
144			 "longest chain: %d\n", entries,
145			 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
146}
147
148static u32 sidtab_level_from_count(u32 count)
149{
150	u32 capacity = SIDTAB_LEAF_ENTRIES;
151	u32 level = 0;
152
153	while (count > capacity) {
154		capacity <<= SIDTAB_INNER_SHIFT;
155		++level;
156	}
157	return level;
158}
159
160static int sidtab_alloc_roots(struct sidtab *s, u32 level)
161{
162	u32 l;
163
164	if (!s->roots[0].ptr_leaf) {
165		s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
166					       GFP_ATOMIC);
167		if (!s->roots[0].ptr_leaf)
168			return -ENOMEM;
169	}
170	for (l = 1; l <= level; ++l)
171		if (!s->roots[l].ptr_inner) {
172			s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
173							GFP_ATOMIC);
174			if (!s->roots[l].ptr_inner)
175				return -ENOMEM;
176			s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
177		}
178	return 0;
179}
180
181static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
182					     int alloc)
183{
184	union sidtab_entry_inner *entry;
185	u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
186
187	/* find the level of the subtree we need */
188	level = sidtab_level_from_count(index + 1);
189	capacity_shift = level * SIDTAB_INNER_SHIFT;
190
191	/* allocate roots if needed */
192	if (alloc && sidtab_alloc_roots(s, level) != 0)
193		return NULL;
194
195	/* lookup inside the subtree */
196	entry = &s->roots[level];
197	while (level != 0) {
198		capacity_shift -= SIDTAB_INNER_SHIFT;
199		--level;
200
201		entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
202		leaf_index &= ((u32)1 << capacity_shift) - 1;
203
204		if (!entry->ptr_inner) {
205			if (alloc)
206				entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
207							   GFP_ATOMIC);
208			if (!entry->ptr_inner)
209				return NULL;
210		}
211	}
212	if (!entry->ptr_leaf) {
213		if (alloc)
214			entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
215						  GFP_ATOMIC);
216		if (!entry->ptr_leaf)
217			return NULL;
218	}
219	return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
220}
221
222static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
223{
224	/* read entries only after reading count */
225	u32 count = smp_load_acquire(&s->count);
226
227	if (index >= count)
228		return NULL;
229
230	return sidtab_do_lookup(s, index, 0);
231}
232
233static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
234{
235	return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
236}
237
238static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
239					       int force)
240{
241	if (sid != 0) {
242		struct sidtab_entry *entry;
243
 
244		if (sid > SECINITSID_NUM)
245			entry = sidtab_lookup(s, sid_to_index(sid));
246		else
247			entry = sidtab_lookup_initial(s, sid);
248		if (entry && (!entry->context.len || force))
249			return entry;
250	}
251
252	return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
253}
254
255struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
256{
257	return sidtab_search_core(s, sid, 0);
258}
259
260struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
261{
262	return sidtab_search_core(s, sid, 1);
263}
264
265int sidtab_context_to_sid(struct sidtab *s, struct context *context,
266			  u32 *sid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267{
268	unsigned long flags;
269	u32 count, hash = context_compute_hash(context);
270	struct sidtab_convert_params *convert;
271	struct sidtab_entry *dst, *dst_convert;
272	int rc;
273
274	*sid = context_to_sid(s, context, hash);
275	if (*sid)
276		return 0;
277
 
 
 
 
 
 
 
 
 
 
 
 
278	/* lock-free search failed: lock, re-search, and insert if not found */
279	spin_lock_irqsave(&s->lock, flags);
280
281	rc = 0;
282	*sid = context_to_sid(s, context, hash);
283	if (*sid)
284		goto out_unlock;
285
286	if (unlikely(s->frozen)) {
287		/*
288		 * This sidtab is now frozen - tell the caller to abort and
289		 * get the new one.
290		 */
291		rc = -ESTALE;
292		goto out_unlock;
 
 
293	}
294
295	count = s->count;
296
297	/* bail out if we already reached max entries */
298	rc = -EOVERFLOW;
299	if (count >= SIDTAB_MAX)
300		goto out_unlock;
301
302	/* insert context into new entry */
303	rc = -ENOMEM;
304	dst = sidtab_do_lookup(s, count, 1);
305	if (!dst)
306		goto out_unlock;
307
308	dst->sid = index_to_sid(count);
309	dst->hash = hash;
310
311	rc = context_cpy(&dst->context, context);
312	if (rc)
313		goto out_unlock;
314
315	/*
316	 * if we are building a new sidtab, we need to convert the context
317	 * and insert it there as well
318	 */
319	convert = s->convert;
320	if (convert) {
321		struct sidtab *target = convert->target;
322
323		rc = -ENOMEM;
324		dst_convert = sidtab_do_lookup(target, count, 1);
325		if (!dst_convert) {
326			context_destroy(&dst->context);
327			goto out_unlock;
328		}
329
330		rc = services_convert_context(convert->args,
331					      context, &dst_convert->context,
332					      GFP_ATOMIC);
333		if (rc) {
334			context_destroy(&dst->context);
335			goto out_unlock;
336		}
337		dst_convert->sid = index_to_sid(count);
338		dst_convert->hash = context_compute_hash(&dst_convert->context);
339		target->count = count + 1;
340
341		hash_add_rcu(target->context_to_sid,
342			     &dst_convert->list, dst_convert->hash);
343	}
344
345	if (context->len)
346		pr_info("SELinux:  Context %s is not valid (left unmapped).\n",
347			context->str);
348
349	*sid = index_to_sid(count);
 
350
351	/* write entries before updating count */
352	smp_store_release(&s->count, count + 1);
353	hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
354
355	rc = 0;
356out_unlock:
357	spin_unlock_irqrestore(&s->lock, flags);
358	return rc;
359}
360
361static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
362{
363	struct sidtab_entry *entry;
364	u32 i;
365
366	for (i = 0; i < count; i++) {
367		entry = sidtab_do_lookup(s, i, 0);
368		entry->sid = index_to_sid(i);
369		entry->hash = context_compute_hash(&entry->context);
370
371		hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
 
 
 
372	}
 
 
 
 
 
 
373}
374
375static int sidtab_convert_tree(union sidtab_entry_inner *edst,
376			       union sidtab_entry_inner *esrc,
377			       u32 *pos, u32 count, u32 level,
378			       struct sidtab_convert_params *convert)
379{
380	int rc;
381	u32 i;
382
383	if (level != 0) {
384		if (!edst->ptr_inner) {
385			edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
386						  GFP_KERNEL);
387			if (!edst->ptr_inner)
388				return -ENOMEM;
389		}
390		i = 0;
391		while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
392			rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
393						 &esrc->ptr_inner->entries[i],
394						 pos, count, level - 1,
395						 convert);
396			if (rc)
397				return rc;
398			i++;
399		}
400	} else {
401		if (!edst->ptr_leaf) {
402			edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
403						 GFP_KERNEL);
404			if (!edst->ptr_leaf)
405				return -ENOMEM;
406		}
407		i = 0;
408		while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
409			rc = services_convert_context(convert->args,
410					&esrc->ptr_leaf->entries[i].context,
411					&edst->ptr_leaf->entries[i].context,
412					GFP_KERNEL);
413			if (rc)
414				return rc;
415			(*pos)++;
416			i++;
417		}
418		cond_resched();
419	}
420	return 0;
421}
422
423int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
424{
425	unsigned long flags;
426	u32 count, level, pos;
427	int rc;
428
429	spin_lock_irqsave(&s->lock, flags);
430
431	/* concurrent policy loads are not allowed */
432	if (s->convert) {
433		spin_unlock_irqrestore(&s->lock, flags);
434		return -EBUSY;
435	}
436
437	count = s->count;
438	level = sidtab_level_from_count(count);
439
440	/* allocate last leaf in the new sidtab (to avoid race with
441	 * live convert)
442	 */
443	rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
444	if (rc) {
445		spin_unlock_irqrestore(&s->lock, flags);
446		return rc;
447	}
448
449	/* set count in case no new entries are added during conversion */
450	params->target->count = count;
451
452	/* enable live convert of new entries */
453	s->convert = params;
454
455	/* we can safely convert the tree outside the lock */
456	spin_unlock_irqrestore(&s->lock, flags);
457
458	pr_info("SELinux:  Converting %u SID table entries...\n", count);
459
460	/* convert all entries not covered by live convert */
461	pos = 0;
462	rc = sidtab_convert_tree(&params->target->roots[level],
463				 &s->roots[level], &pos, count, level, params);
464	if (rc) {
465		/* we need to keep the old table - disable live convert */
466		spin_lock_irqsave(&s->lock, flags);
467		s->convert = NULL;
468		spin_unlock_irqrestore(&s->lock, flags);
469		return rc;
470	}
471	/*
472	 * The hashtable can also be modified in sidtab_context_to_sid()
473	 * so we must re-acquire the lock here.
474	 */
475	spin_lock_irqsave(&s->lock, flags);
476	sidtab_convert_hashtable(params->target, count);
477	spin_unlock_irqrestore(&s->lock, flags);
478
479	return 0;
480}
481
482void sidtab_cancel_convert(struct sidtab *s)
483{
484	unsigned long flags;
485
486	/* cancelling policy load - disable live convert of sidtab */
487	spin_lock_irqsave(&s->lock, flags);
488	s->convert = NULL;
489	spin_unlock_irqrestore(&s->lock, flags);
490}
491
492void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
493{
494	spin_lock_irqsave(&s->lock, *flags);
495	s->frozen = true;
496	s->convert = NULL;
497}
498void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
499{
500	spin_unlock_irqrestore(&s->lock, *flags);
501}
502
503static void sidtab_destroy_entry(struct sidtab_entry *entry)
504{
505	context_destroy(&entry->context);
506#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
507	kfree(rcu_dereference_raw(entry->cache));
508#endif
509}
510
511static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
512{
513	u32 i;
514
515	if (level != 0) {
516		struct sidtab_node_inner *node = entry.ptr_inner;
517
518		if (!node)
519			return;
520
521		for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
522			sidtab_destroy_tree(node->entries[i], level - 1);
523		kfree(node);
524	} else {
525		struct sidtab_node_leaf *node = entry.ptr_leaf;
526
527		if (!node)
528			return;
529
530		for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
531			sidtab_destroy_entry(&node->entries[i]);
532		kfree(node);
533	}
534}
535
536void sidtab_destroy(struct sidtab *s)
537{
538	u32 i, level;
539
540	for (i = 0; i < SECINITSID_NUM; i++)
541		if (s->isids[i].set)
542			sidtab_destroy_entry(&s->isids[i].entry);
543
544	level = SIDTAB_MAX_LEVEL;
545	while (level && !s->roots[level].ptr_inner)
546		--level;
547
548	sidtab_destroy_tree(s->roots[level], level);
549	/*
550	 * The context_to_sid hashtable's objects are all shared
551	 * with the isids array and context tree, and so don't need
552	 * to be cleaned up here.
553	 */
554}
555
556#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
557
558void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
559			const char *str, u32 str_len)
560{
561	struct sidtab_str_cache *cache, *victim = NULL;
562	unsigned long flags;
563
564	/* do not cache invalid contexts */
565	if (entry->context.len)
566		return;
567
568	spin_lock_irqsave(&s->cache_lock, flags);
569
570	cache = rcu_dereference_protected(entry->cache,
571					  lockdep_is_held(&s->cache_lock));
572	if (cache) {
573		/* entry in cache - just bump to the head of LRU list */
574		list_move(&cache->lru_member, &s->cache_lru_list);
575		goto out_unlock;
576	}
577
578	cache = kmalloc(struct_size(cache, str, str_len), GFP_ATOMIC);
579	if (!cache)
580		goto out_unlock;
581
582	if (s->cache_free_slots == 0) {
583		/* pop a cache entry from the tail and free it */
584		victim = container_of(s->cache_lru_list.prev,
585				      struct sidtab_str_cache, lru_member);
586		list_del(&victim->lru_member);
587		rcu_assign_pointer(victim->parent->cache, NULL);
588	} else {
589		s->cache_free_slots--;
590	}
591	cache->parent = entry;
592	cache->len = str_len;
593	memcpy(cache->str, str, str_len);
594	list_add(&cache->lru_member, &s->cache_lru_list);
595
596	rcu_assign_pointer(entry->cache, cache);
597
598out_unlock:
599	spin_unlock_irqrestore(&s->cache_lock, flags);
600	kfree_rcu(victim, rcu_member);
601}
602
603int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
604		       char **out, u32 *out_len)
605{
606	struct sidtab_str_cache *cache;
607	int rc = 0;
608
609	if (entry->context.len)
610		return -ENOENT; /* do not cache invalid contexts */
611
612	rcu_read_lock();
613
614	cache = rcu_dereference(entry->cache);
615	if (!cache) {
616		rc = -ENOENT;
617	} else {
618		*out_len = cache->len;
619		if (out) {
620			*out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
621			if (!*out)
622				rc = -ENOMEM;
623		}
624	}
625
626	rcu_read_unlock();
627
628	if (!rc && out)
629		sidtab_sid2str_put(s, entry, *out, *out_len);
630	return rc;
631}
632
633#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Implementation of the SID table type.
  4 *
  5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
  6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
  7 *
  8 * Copyright (C) 2018 Red Hat, Inc.
  9 */
 10#include <linux/errno.h>
 11#include <linux/kernel.h>
 
 
 12#include <linux/slab.h>
 13#include <linux/sched.h>
 14#include <linux/spinlock.h>
 15#include <asm/barrier.h>
 16#include "flask.h"
 17#include "security.h"
 18#include "sidtab.h"
 
 
 
 
 
 
 
 
 
 
 
 
 19
 20int sidtab_init(struct sidtab *s)
 21{
 22	u32 i;
 23
 24	memset(s->roots, 0, sizeof(s->roots));
 25
 26	/* max count is SIDTAB_MAX so valid index is always < SIDTAB_MAX */
 27	for (i = 0; i < SIDTAB_RCACHE_SIZE; i++)
 28		s->rcache[i] = SIDTAB_MAX;
 29
 30	for (i = 0; i < SECINITSID_NUM; i++)
 31		s->isids[i].set = 0;
 32
 
 33	s->count = 0;
 34	s->convert = NULL;
 
 35
 36	spin_lock_init(&s->lock);
 
 
 
 
 
 
 
 37	return 0;
 38}
 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
 41{
 42	struct sidtab_isid_entry *entry;
 
 43	int rc;
 44
 45	if (sid == 0 || sid > SECINITSID_NUM)
 46		return -EINVAL;
 47
 48	entry = &s->isids[sid - 1];
 49
 50	rc = context_cpy(&entry->context, context);
 51	if (rc)
 52		return rc;
 53
 54	entry->set = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 55	return 0;
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58static u32 sidtab_level_from_count(u32 count)
 59{
 60	u32 capacity = SIDTAB_LEAF_ENTRIES;
 61	u32 level = 0;
 62
 63	while (count > capacity) {
 64		capacity <<= SIDTAB_INNER_SHIFT;
 65		++level;
 66	}
 67	return level;
 68}
 69
 70static int sidtab_alloc_roots(struct sidtab *s, u32 level)
 71{
 72	u32 l;
 73
 74	if (!s->roots[0].ptr_leaf) {
 75		s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
 76					       GFP_ATOMIC);
 77		if (!s->roots[0].ptr_leaf)
 78			return -ENOMEM;
 79	}
 80	for (l = 1; l <= level; ++l)
 81		if (!s->roots[l].ptr_inner) {
 82			s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
 83							GFP_ATOMIC);
 84			if (!s->roots[l].ptr_inner)
 85				return -ENOMEM;
 86			s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
 87		}
 88	return 0;
 89}
 90
 91static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
 
 92{
 93	union sidtab_entry_inner *entry;
 94	u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
 95
 96	/* find the level of the subtree we need */
 97	level = sidtab_level_from_count(index + 1);
 98	capacity_shift = level * SIDTAB_INNER_SHIFT;
 99
100	/* allocate roots if needed */
101	if (alloc && sidtab_alloc_roots(s, level) != 0)
102		return NULL;
103
104	/* lookup inside the subtree */
105	entry = &s->roots[level];
106	while (level != 0) {
107		capacity_shift -= SIDTAB_INNER_SHIFT;
108		--level;
109
110		entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
111		leaf_index &= ((u32)1 << capacity_shift) - 1;
112
113		if (!entry->ptr_inner) {
114			if (alloc)
115				entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
116							   GFP_ATOMIC);
117			if (!entry->ptr_inner)
118				return NULL;
119		}
120	}
121	if (!entry->ptr_leaf) {
122		if (alloc)
123			entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
124						  GFP_ATOMIC);
125		if (!entry->ptr_leaf)
126			return NULL;
127	}
128	return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES].context;
129}
130
131static struct context *sidtab_lookup(struct sidtab *s, u32 index)
132{
133	/* read entries only after reading count */
134	u32 count = smp_load_acquire(&s->count);
135
136	if (index >= count)
137		return NULL;
138
139	return sidtab_do_lookup(s, index, 0);
140}
141
142static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid)
143{
144	return s->isids[sid - 1].set ? &s->isids[sid - 1].context : NULL;
145}
146
147static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
 
148{
149	struct context *context;
 
150
151	if (sid != 0) {
152		if (sid > SECINITSID_NUM)
153			context = sidtab_lookup(s, sid - (SECINITSID_NUM + 1));
154		else
155			context = sidtab_lookup_initial(s, sid);
156		if (context && (!context->len || force))
157			return context;
158	}
159
160	return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
161}
162
163struct context *sidtab_search(struct sidtab *s, u32 sid)
164{
165	return sidtab_search_core(s, sid, 0);
166}
167
168struct context *sidtab_search_force(struct sidtab *s, u32 sid)
169{
170	return sidtab_search_core(s, sid, 1);
171}
172
173static int sidtab_find_context(union sidtab_entry_inner entry,
174			       u32 *pos, u32 count, u32 level,
175			       struct context *context, u32 *index)
176{
177	int rc;
178	u32 i;
179
180	if (level != 0) {
181		struct sidtab_node_inner *node = entry.ptr_inner;
182
183		i = 0;
184		while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
185			rc = sidtab_find_context(node->entries[i],
186						 pos, count, level - 1,
187						 context, index);
188			if (rc == 0)
189				return 0;
190			i++;
191		}
192	} else {
193		struct sidtab_node_leaf *node = entry.ptr_leaf;
194
195		i = 0;
196		while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
197			if (context_cmp(&node->entries[i].context, context)) {
198				*index = *pos;
199				return 0;
200			}
201			(*pos)++;
202			i++;
203		}
204	}
205	return -ENOENT;
206}
207
208static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos)
209{
210	while (pos > 0) {
211		WRITE_ONCE(s->rcache[pos], READ_ONCE(s->rcache[pos - 1]));
212		--pos;
213	}
214	WRITE_ONCE(s->rcache[0], index);
215}
216
217static void sidtab_rcache_push(struct sidtab *s, u32 index)
218{
219	sidtab_rcache_update(s, index, SIDTAB_RCACHE_SIZE - 1);
220}
221
222static int sidtab_rcache_search(struct sidtab *s, struct context *context,
223				u32 *index)
224{
225	u32 i;
226
227	for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) {
228		u32 v = READ_ONCE(s->rcache[i]);
229
230		if (v >= SIDTAB_MAX)
231			continue;
232
233		if (context_cmp(sidtab_do_lookup(s, v, 0), context)) {
234			sidtab_rcache_update(s, v, i);
235			*index = v;
236			return 0;
237		}
238	}
239	return -ENOENT;
240}
241
242static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
243				 u32 *index)
244{
245	unsigned long flags;
246	u32 count, count_locked, level, pos;
247	struct sidtab_convert_params *convert;
248	struct context *dst, *dst_convert;
249	int rc;
250
251	rc = sidtab_rcache_search(s, context, index);
252	if (rc == 0)
253		return 0;
254
255	/* read entries only after reading count */
256	count = smp_load_acquire(&s->count);
257	level = sidtab_level_from_count(count);
258
259	pos = 0;
260	rc = sidtab_find_context(s->roots[level], &pos, count, level,
261				 context, index);
262	if (rc == 0) {
263		sidtab_rcache_push(s, *index);
264		return 0;
265	}
266
267	/* lock-free search failed: lock, re-search, and insert if not found */
268	spin_lock_irqsave(&s->lock, flags);
269
270	convert = s->convert;
271	count_locked = s->count;
272	level = sidtab_level_from_count(count_locked);
 
273
274	/* if count has changed before we acquired the lock, then catch up */
275	while (count < count_locked) {
276		if (context_cmp(sidtab_do_lookup(s, count, 0), context)) {
277			sidtab_rcache_push(s, count);
278			*index = count;
279			rc = 0;
280			goto out_unlock;
281		}
282		++count;
283	}
284
 
 
285	/* bail out if we already reached max entries */
286	rc = -EOVERFLOW;
287	if (count >= SIDTAB_MAX)
288		goto out_unlock;
289
290	/* insert context into new entry */
291	rc = -ENOMEM;
292	dst = sidtab_do_lookup(s, count, 1);
293	if (!dst)
294		goto out_unlock;
295
296	rc = context_cpy(dst, context);
 
 
 
297	if (rc)
298		goto out_unlock;
299
300	/*
301	 * if we are building a new sidtab, we need to convert the context
302	 * and insert it there as well
303	 */
 
304	if (convert) {
 
 
305		rc = -ENOMEM;
306		dst_convert = sidtab_do_lookup(convert->target, count, 1);
307		if (!dst_convert) {
308			context_destroy(dst);
309			goto out_unlock;
310		}
311
312		rc = convert->func(context, dst_convert, convert->args);
 
 
313		if (rc) {
314			context_destroy(dst);
315			goto out_unlock;
316		}
 
 
 
317
318		/* at this point we know the insert won't fail */
319		convert->target->count = count + 1;
320	}
321
322	if (context->len)
323		pr_info("SELinux:  Context %s is not valid (left unmapped).\n",
324			context->str);
325
326	sidtab_rcache_push(s, count);
327	*index = count;
328
329	/* write entries before writing new count */
330	smp_store_release(&s->count, count + 1);
 
331
332	rc = 0;
333out_unlock:
334	spin_unlock_irqrestore(&s->lock, flags);
335	return rc;
336}
337
338int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid)
339{
340	int rc;
341	u32 i;
342
343	for (i = 0; i < SECINITSID_NUM; i++) {
344		struct sidtab_isid_entry *entry = &s->isids[i];
 
 
345
346		if (entry->set && context_cmp(context, &entry->context)) {
347			*sid = i + 1;
348			return 0;
349		}
350	}
351
352	rc = sidtab_reverse_lookup(s, context, sid);
353	if (rc)
354		return rc;
355	*sid += SECINITSID_NUM + 1;
356	return 0;
357}
358
359static int sidtab_convert_tree(union sidtab_entry_inner *edst,
360			       union sidtab_entry_inner *esrc,
361			       u32 *pos, u32 count, u32 level,
362			       struct sidtab_convert_params *convert)
363{
364	int rc;
365	u32 i;
366
367	if (level != 0) {
368		if (!edst->ptr_inner) {
369			edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
370						  GFP_KERNEL);
371			if (!edst->ptr_inner)
372				return -ENOMEM;
373		}
374		i = 0;
375		while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
376			rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
377						 &esrc->ptr_inner->entries[i],
378						 pos, count, level - 1,
379						 convert);
380			if (rc)
381				return rc;
382			i++;
383		}
384	} else {
385		if (!edst->ptr_leaf) {
386			edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
387						 GFP_KERNEL);
388			if (!edst->ptr_leaf)
389				return -ENOMEM;
390		}
391		i = 0;
392		while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
393			rc = convert->func(&esrc->ptr_leaf->entries[i].context,
394					   &edst->ptr_leaf->entries[i].context,
395					   convert->args);
 
396			if (rc)
397				return rc;
398			(*pos)++;
399			i++;
400		}
401		cond_resched();
402	}
403	return 0;
404}
405
406int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
407{
408	unsigned long flags;
409	u32 count, level, pos;
410	int rc;
411
412	spin_lock_irqsave(&s->lock, flags);
413
414	/* concurrent policy loads are not allowed */
415	if (s->convert) {
416		spin_unlock_irqrestore(&s->lock, flags);
417		return -EBUSY;
418	}
419
420	count = s->count;
421	level = sidtab_level_from_count(count);
422
423	/* allocate last leaf in the new sidtab (to avoid race with
424	 * live convert)
425	 */
426	rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
427	if (rc) {
428		spin_unlock_irqrestore(&s->lock, flags);
429		return rc;
430	}
431
432	/* set count in case no new entries are added during conversion */
433	params->target->count = count;
434
435	/* enable live convert of new entries */
436	s->convert = params;
437
438	/* we can safely do the rest of the conversion outside the lock */
439	spin_unlock_irqrestore(&s->lock, flags);
440
441	pr_info("SELinux:  Converting %u SID table entries...\n", count);
442
443	/* convert all entries not covered by live convert */
444	pos = 0;
445	rc = sidtab_convert_tree(&params->target->roots[level],
446				 &s->roots[level], &pos, count, level, params);
447	if (rc) {
448		/* we need to keep the old table - disable live convert */
449		spin_lock_irqsave(&s->lock, flags);
450		s->convert = NULL;
451		spin_unlock_irqrestore(&s->lock, flags);
 
452	}
453	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454}
455
456static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
457{
458	u32 i;
459
460	if (level != 0) {
461		struct sidtab_node_inner *node = entry.ptr_inner;
462
463		if (!node)
464			return;
465
466		for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
467			sidtab_destroy_tree(node->entries[i], level - 1);
468		kfree(node);
469	} else {
470		struct sidtab_node_leaf *node = entry.ptr_leaf;
471
472		if (!node)
473			return;
474
475		for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
476			context_destroy(&node->entries[i].context);
477		kfree(node);
478	}
479}
480
481void sidtab_destroy(struct sidtab *s)
482{
483	u32 i, level;
484
485	for (i = 0; i < SECINITSID_NUM; i++)
486		if (s->isids[i].set)
487			context_destroy(&s->isids[i].context);
488
489	level = SIDTAB_MAX_LEVEL;
490	while (level && !s->roots[level].ptr_inner)
491		--level;
492
493	sidtab_destroy_tree(s->roots[level], level);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494}