Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * AppArmor security module
4 *
5 * This file contains AppArmor functions for unpacking policy loaded from
6 * userspace.
7 *
8 * Copyright (C) 1998-2008 Novell/SUSE
9 * Copyright 2009-2010 Canonical Ltd.
10 *
11 * AppArmor uses a serialized binary format for loading policy. To find
12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
13 * All policy is validated before it is used.
14 */
15
16#include <linux/unaligned.h>
17#include <kunit/visibility.h>
18#include <linux/ctype.h>
19#include <linux/errno.h>
20#include <linux/zstd.h>
21
22#include "include/apparmor.h"
23#include "include/audit.h"
24#include "include/cred.h"
25#include "include/crypto.h"
26#include "include/file.h"
27#include "include/match.h"
28#include "include/path.h"
29#include "include/policy.h"
30#include "include/policy_unpack.h"
31#include "include/policy_compat.h"
32
33/* audit callback for unpack fields */
34static void audit_cb(struct audit_buffer *ab, void *va)
35{
36 struct common_audit_data *sa = va;
37 struct apparmor_audit_data *ad = aad(sa);
38
39 if (ad->iface.ns) {
40 audit_log_format(ab, " ns=");
41 audit_log_untrustedstring(ab, ad->iface.ns);
42 }
43 if (ad->name) {
44 audit_log_format(ab, " name=");
45 audit_log_untrustedstring(ab, ad->name);
46 }
47 if (ad->iface.pos)
48 audit_log_format(ab, " offset=%ld", ad->iface.pos);
49}
50
51/**
52 * audit_iface - do audit message for policy unpacking/load/replace/remove
53 * @new: profile if it has been allocated (MAYBE NULL)
54 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
55 * @name: name of the profile being manipulated (MAYBE NULL)
56 * @info: any extra info about the failure (MAYBE NULL)
57 * @e: buffer position info
58 * @error: error code
59 *
60 * Returns: %0 or error
61 */
62static int audit_iface(struct aa_profile *new, const char *ns_name,
63 const char *name, const char *info, struct aa_ext *e,
64 int error)
65{
66 struct aa_profile *profile = labels_profile(aa_current_raw_label());
67 DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
68 if (e)
69 ad.iface.pos = e->pos - e->start;
70 ad.iface.ns = ns_name;
71 if (new)
72 ad.name = new->base.hname;
73 else
74 ad.name = name;
75 ad.info = info;
76 ad.error = error;
77
78 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &ad, audit_cb);
79}
80
81void __aa_loaddata_update(struct aa_loaddata *data, long revision)
82{
83 AA_BUG(!data);
84 AA_BUG(!data->ns);
85 AA_BUG(!mutex_is_locked(&data->ns->lock));
86 AA_BUG(data->revision > revision);
87
88 data->revision = revision;
89 if ((data->dents[AAFS_LOADDATA_REVISION])) {
90 struct inode *inode;
91
92 inode = d_inode(data->dents[AAFS_LOADDATA_DIR]);
93 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
94
95 inode = d_inode(data->dents[AAFS_LOADDATA_REVISION]);
96 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
97 }
98}
99
100bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
101{
102 if (l->size != r->size)
103 return false;
104 if (l->compressed_size != r->compressed_size)
105 return false;
106 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
107 return false;
108 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
109}
110
111/*
112 * need to take the ns mutex lock which is NOT safe most places that
113 * put_loaddata is called, so we have to delay freeing it
114 */
115static void do_loaddata_free(struct work_struct *work)
116{
117 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
118 struct aa_ns *ns = aa_get_ns(d->ns);
119
120 if (ns) {
121 mutex_lock_nested(&ns->lock, ns->level);
122 __aa_fs_remove_rawdata(d);
123 mutex_unlock(&ns->lock);
124 aa_put_ns(ns);
125 }
126
127 kfree_sensitive(d->hash);
128 kfree_sensitive(d->name);
129 kvfree(d->data);
130 kfree_sensitive(d);
131}
132
133void aa_loaddata_kref(struct kref *kref)
134{
135 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
136
137 if (d) {
138 INIT_WORK(&d->work, do_loaddata_free);
139 schedule_work(&d->work);
140 }
141}
142
143struct aa_loaddata *aa_loaddata_alloc(size_t size)
144{
145 struct aa_loaddata *d;
146
147 d = kzalloc(sizeof(*d), GFP_KERNEL);
148 if (d == NULL)
149 return ERR_PTR(-ENOMEM);
150 d->data = kvzalloc(size, GFP_KERNEL);
151 if (!d->data) {
152 kfree(d);
153 return ERR_PTR(-ENOMEM);
154 }
155 kref_init(&d->count);
156 INIT_LIST_HEAD(&d->list);
157
158 return d;
159}
160
161/* test if read will be in packed data bounds */
162VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size)
163{
164 return (size <= e->end - e->pos);
165}
166EXPORT_SYMBOL_IF_KUNIT(aa_inbounds);
167
168/**
169 * aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
170 * @e: serialized data read head (NOT NULL)
171 * @chunk: start address for chunk of data (NOT NULL)
172 *
173 * Returns: the size of chunk found with the read head at the end of the chunk.
174 */
175VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk)
176{
177 size_t size = 0;
178 void *pos = e->pos;
179
180 if (!aa_inbounds(e, sizeof(u16)))
181 goto fail;
182 size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
183 e->pos += sizeof(__le16);
184 if (!aa_inbounds(e, size))
185 goto fail;
186 *chunk = e->pos;
187 e->pos += size;
188 return size;
189
190fail:
191 e->pos = pos;
192 return 0;
193}
194EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk);
195
196/* unpack control byte */
197VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code)
198{
199 if (!aa_inbounds(e, 1))
200 return false;
201 if (*(u8 *) e->pos != code)
202 return false;
203 e->pos++;
204 return true;
205}
206EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X);
207
208/**
209 * aa_unpack_nameX - check is the next element is of type X with a name of @name
210 * @e: serialized data extent information (NOT NULL)
211 * @code: type code
212 * @name: name to match to the serialized element. (MAYBE NULL)
213 *
214 * check that the next serialized data element is of type X and has a tag
215 * name @name. If @name is specified then there must be a matching
216 * name element in the stream. If @name is NULL any name element will be
217 * skipped and only the typecode will be tested.
218 *
219 * Returns true on success (both type code and name tests match) and the read
220 * head is advanced past the headers
221 *
222 * Returns: false if either match fails, the read head does not move
223 */
224VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
225{
226 /*
227 * May need to reset pos if name or type doesn't match
228 */
229 void *pos = e->pos;
230 /*
231 * Check for presence of a tagname, and if present name size
232 * AA_NAME tag value is a u16.
233 */
234 if (aa_unpack_X(e, AA_NAME)) {
235 char *tag = NULL;
236 size_t size = aa_unpack_u16_chunk(e, &tag);
237 /* if a name is specified it must match. otherwise skip tag */
238 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
239 goto fail;
240 } else if (name) {
241 /* if a name is specified and there is no name tag fail */
242 goto fail;
243 }
244
245 /* now check if type code matches */
246 if (aa_unpack_X(e, code))
247 return true;
248
249fail:
250 e->pos = pos;
251 return false;
252}
253EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX);
254
255static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
256{
257 void *pos = e->pos;
258
259 if (aa_unpack_nameX(e, AA_U8, name)) {
260 if (!aa_inbounds(e, sizeof(u8)))
261 goto fail;
262 if (data)
263 *data = *((u8 *)e->pos);
264 e->pos += sizeof(u8);
265 return true;
266 }
267
268fail:
269 e->pos = pos;
270 return false;
271}
272
273VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name)
274{
275 void *pos = e->pos;
276
277 if (aa_unpack_nameX(e, AA_U32, name)) {
278 if (!aa_inbounds(e, sizeof(u32)))
279 goto fail;
280 if (data)
281 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
282 e->pos += sizeof(u32);
283 return true;
284 }
285
286fail:
287 e->pos = pos;
288 return false;
289}
290EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32);
291
292VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name)
293{
294 void *pos = e->pos;
295
296 if (aa_unpack_nameX(e, AA_U64, name)) {
297 if (!aa_inbounds(e, sizeof(u64)))
298 goto fail;
299 if (data)
300 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
301 e->pos += sizeof(u64);
302 return true;
303 }
304
305fail:
306 e->pos = pos;
307 return false;
308}
309EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64);
310
311static bool aa_unpack_cap_low(struct aa_ext *e, kernel_cap_t *data, const char *name)
312{
313 u32 val;
314
315 if (!aa_unpack_u32(e, &val, name))
316 return false;
317 data->val = val;
318 return true;
319}
320
321static bool aa_unpack_cap_high(struct aa_ext *e, kernel_cap_t *data, const char *name)
322{
323 u32 val;
324
325 if (!aa_unpack_u32(e, &val, name))
326 return false;
327 data->val = (u32)data->val | ((u64)val << 32);
328 return true;
329}
330
331VISIBLE_IF_KUNIT bool aa_unpack_array(struct aa_ext *e, const char *name, u16 *size)
332{
333 void *pos = e->pos;
334
335 if (aa_unpack_nameX(e, AA_ARRAY, name)) {
336 if (!aa_inbounds(e, sizeof(u16)))
337 goto fail;
338 *size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
339 e->pos += sizeof(u16);
340 return true;
341 }
342
343fail:
344 e->pos = pos;
345 return false;
346}
347EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array);
348
349VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name)
350{
351 void *pos = e->pos;
352
353 if (aa_unpack_nameX(e, AA_BLOB, name)) {
354 u32 size;
355 if (!aa_inbounds(e, sizeof(u32)))
356 goto fail;
357 size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
358 e->pos += sizeof(u32);
359 if (aa_inbounds(e, (size_t) size)) {
360 *blob = e->pos;
361 e->pos += size;
362 return size;
363 }
364 }
365
366fail:
367 e->pos = pos;
368 return 0;
369}
370EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob);
371
372VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name)
373{
374 char *src_str;
375 size_t size = 0;
376 void *pos = e->pos;
377 *string = NULL;
378 if (aa_unpack_nameX(e, AA_STRING, name)) {
379 size = aa_unpack_u16_chunk(e, &src_str);
380 if (size) {
381 /* strings are null terminated, length is size - 1 */
382 if (src_str[size - 1] != 0)
383 goto fail;
384 *string = src_str;
385
386 return size;
387 }
388 }
389
390fail:
391 e->pos = pos;
392 return 0;
393}
394EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str);
395
396VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name)
397{
398 const char *tmp;
399 void *pos = e->pos;
400 int res = aa_unpack_str(e, &tmp, name);
401 *string = NULL;
402
403 if (!res)
404 return 0;
405
406 *string = kmemdup(tmp, res, GFP_KERNEL);
407 if (!*string) {
408 e->pos = pos;
409 return 0;
410 }
411
412 return res;
413}
414EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup);
415
416
417/**
418 * unpack_dfa - unpack a file rule dfa
419 * @e: serialized data extent information (NOT NULL)
420 * @flags: dfa flags to check
421 *
422 * returns dfa or ERR_PTR or NULL if no dfa
423 */
424static struct aa_dfa *unpack_dfa(struct aa_ext *e, int flags)
425{
426 char *blob = NULL;
427 size_t size;
428 struct aa_dfa *dfa = NULL;
429
430 size = aa_unpack_blob(e, &blob, "aadfa");
431 if (size) {
432 /*
433 * The dfa is aligned with in the blob to 8 bytes
434 * from the beginning of the stream.
435 * alignment adjust needed by dfa unpack
436 */
437 size_t sz = blob - (char *) e->start -
438 ((e->pos - e->start) & 7);
439 size_t pad = ALIGN(sz, 8) - sz;
440 if (aa_g_paranoid_load)
441 flags |= DFA_FLAG_VERIFY_STATES;
442 dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
443
444 if (IS_ERR(dfa))
445 return dfa;
446
447 }
448
449 return dfa;
450}
451
452/**
453 * unpack_trans_table - unpack a profile transition table
454 * @e: serialized data extent information (NOT NULL)
455 * @strs: str table to unpack to (NOT NULL)
456 *
457 * Returns: true if table successfully unpacked or not present
458 */
459static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs)
460{
461 void *saved_pos = e->pos;
462 char **table = NULL;
463
464 /* exec table is optional */
465 if (aa_unpack_nameX(e, AA_STRUCT, "xtable")) {
466 u16 size;
467 int i;
468
469 if (!aa_unpack_array(e, NULL, &size))
470 /*
471 * Note: index into trans table array is a max
472 * of 2^24, but unpack array can only unpack
473 * an array of 2^16 in size atm so no need
474 * for size check here
475 */
476 goto fail;
477 table = kcalloc(size, sizeof(char *), GFP_KERNEL);
478 if (!table)
479 goto fail;
480
481 strs->table = table;
482 strs->size = size;
483 for (i = 0; i < size; i++) {
484 char *str;
485 int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
486 /* aa_unpack_strdup verifies that the last character is
487 * null termination byte.
488 */
489 if (!size2)
490 goto fail;
491 table[i] = str;
492 /* verify that name doesn't start with space */
493 if (isspace(*str))
494 goto fail;
495
496 /* count internal # of internal \0 */
497 for (c = j = 0; j < size2 - 1; j++) {
498 if (!str[j]) {
499 pos = j;
500 c++;
501 }
502 }
503 if (*str == ':') {
504 /* first character after : must be valid */
505 if (!str[1])
506 goto fail;
507 /* beginning with : requires an embedded \0,
508 * verify that exactly 1 internal \0 exists
509 * trailing \0 already verified by aa_unpack_strdup
510 *
511 * convert \0 back to : for label_parse
512 */
513 if (c == 1)
514 str[pos] = ':';
515 else if (c > 1)
516 goto fail;
517 } else if (c)
518 /* fail - all other cases with embedded \0 */
519 goto fail;
520 }
521 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
522 goto fail;
523 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
524 goto fail;
525 }
526 return true;
527
528fail:
529 aa_free_str_table(strs);
530 e->pos = saved_pos;
531 return false;
532}
533
534static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
535{
536 void *pos = e->pos;
537
538 if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) {
539 u16 size;
540 int i;
541
542 if (!aa_unpack_array(e, NULL, &size))
543 goto fail;
544 profile->attach.xattr_count = size;
545 profile->attach.xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
546 if (!profile->attach.xattrs)
547 goto fail;
548 for (i = 0; i < size; i++) {
549 if (!aa_unpack_strdup(e, &profile->attach.xattrs[i], NULL))
550 goto fail;
551 }
552 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
553 goto fail;
554 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
555 goto fail;
556 }
557
558 return true;
559
560fail:
561 e->pos = pos;
562 return false;
563}
564
565static bool unpack_secmark(struct aa_ext *e, struct aa_ruleset *rules)
566{
567 void *pos = e->pos;
568 u16 size;
569 int i;
570
571 if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) {
572 if (!aa_unpack_array(e, NULL, &size))
573 goto fail;
574
575 rules->secmark = kcalloc(size, sizeof(struct aa_secmark),
576 GFP_KERNEL);
577 if (!rules->secmark)
578 goto fail;
579
580 rules->secmark_count = size;
581
582 for (i = 0; i < size; i++) {
583 if (!unpack_u8(e, &rules->secmark[i].audit, NULL))
584 goto fail;
585 if (!unpack_u8(e, &rules->secmark[i].deny, NULL))
586 goto fail;
587 if (!aa_unpack_strdup(e, &rules->secmark[i].label, NULL))
588 goto fail;
589 }
590 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
591 goto fail;
592 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
593 goto fail;
594 }
595
596 return true;
597
598fail:
599 if (rules->secmark) {
600 for (i = 0; i < size; i++)
601 kfree(rules->secmark[i].label);
602 kfree(rules->secmark);
603 rules->secmark_count = 0;
604 rules->secmark = NULL;
605 }
606
607 e->pos = pos;
608 return false;
609}
610
611static bool unpack_rlimits(struct aa_ext *e, struct aa_ruleset *rules)
612{
613 void *pos = e->pos;
614
615 /* rlimits are optional */
616 if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) {
617 u16 size;
618 int i;
619 u32 tmp = 0;
620 if (!aa_unpack_u32(e, &tmp, NULL))
621 goto fail;
622 rules->rlimits.mask = tmp;
623
624 if (!aa_unpack_array(e, NULL, &size) ||
625 size > RLIM_NLIMITS)
626 goto fail;
627 for (i = 0; i < size; i++) {
628 u64 tmp2 = 0;
629 int a = aa_map_resource(i);
630 if (!aa_unpack_u64(e, &tmp2, NULL))
631 goto fail;
632 rules->rlimits.limits[a].rlim_max = tmp2;
633 }
634 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
635 goto fail;
636 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
637 goto fail;
638 }
639 return true;
640
641fail:
642 e->pos = pos;
643 return false;
644}
645
646static bool unpack_perm(struct aa_ext *e, u32 version, struct aa_perms *perm)
647{
648 u32 reserved;
649
650 if (version != 1)
651 return false;
652
653 /* reserved entry is for later expansion, discard for now */
654 return aa_unpack_u32(e, &reserved, NULL) &&
655 aa_unpack_u32(e, &perm->allow, NULL) &&
656 aa_unpack_u32(e, &perm->deny, NULL) &&
657 aa_unpack_u32(e, &perm->subtree, NULL) &&
658 aa_unpack_u32(e, &perm->cond, NULL) &&
659 aa_unpack_u32(e, &perm->kill, NULL) &&
660 aa_unpack_u32(e, &perm->complain, NULL) &&
661 aa_unpack_u32(e, &perm->prompt, NULL) &&
662 aa_unpack_u32(e, &perm->audit, NULL) &&
663 aa_unpack_u32(e, &perm->quiet, NULL) &&
664 aa_unpack_u32(e, &perm->hide, NULL) &&
665 aa_unpack_u32(e, &perm->xindex, NULL) &&
666 aa_unpack_u32(e, &perm->tag, NULL) &&
667 aa_unpack_u32(e, &perm->label, NULL);
668}
669
670static ssize_t unpack_perms_table(struct aa_ext *e, struct aa_perms **perms)
671{
672 void *pos = e->pos;
673 u16 size = 0;
674
675 AA_BUG(!perms);
676 /*
677 * policy perms are optional, in which case perms are embedded
678 * in the dfa accept table
679 */
680 if (aa_unpack_nameX(e, AA_STRUCT, "perms")) {
681 int i;
682 u32 version;
683
684 if (!aa_unpack_u32(e, &version, "version"))
685 goto fail_reset;
686 if (!aa_unpack_array(e, NULL, &size))
687 goto fail_reset;
688 *perms = kcalloc(size, sizeof(struct aa_perms), GFP_KERNEL);
689 if (!*perms)
690 goto fail_reset;
691 for (i = 0; i < size; i++) {
692 if (!unpack_perm(e, version, &(*perms)[i]))
693 goto fail;
694 }
695 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
696 goto fail;
697 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
698 goto fail;
699 } else
700 *perms = NULL;
701
702 return size;
703
704fail:
705 kfree(*perms);
706fail_reset:
707 e->pos = pos;
708 return -EPROTO;
709}
710
711static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy,
712 bool required_dfa, bool required_trans,
713 const char **info)
714{
715 struct aa_policydb *pdb;
716 void *pos = e->pos;
717 int i, flags, error = -EPROTO;
718 ssize_t size;
719
720 pdb = aa_alloc_pdb(GFP_KERNEL);
721 if (!pdb)
722 return -ENOMEM;
723
724 size = unpack_perms_table(e, &pdb->perms);
725 if (size < 0) {
726 error = size;
727 pdb->perms = NULL;
728 *info = "failed to unpack - perms";
729 goto fail;
730 }
731 pdb->size = size;
732
733 if (pdb->perms) {
734 /* perms table present accept is index */
735 flags = TO_ACCEPT1_FLAG(YYTD_DATA32);
736 } else {
737 /* packed perms in accept1 and accept2 */
738 flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
739 TO_ACCEPT2_FLAG(YYTD_DATA32);
740 }
741
742 pdb->dfa = unpack_dfa(e, flags);
743 if (IS_ERR(pdb->dfa)) {
744 error = PTR_ERR(pdb->dfa);
745 pdb->dfa = NULL;
746 *info = "failed to unpack - dfa";
747 goto fail;
748 } else if (!pdb->dfa) {
749 if (required_dfa) {
750 *info = "missing required dfa";
751 goto fail;
752 }
753 } else {
754 /*
755 * only unpack the following if a dfa is present
756 *
757 * sadly start was given different names for file and policydb
758 * but since it is optional we can try both
759 */
760 if (!aa_unpack_u32(e, &pdb->start[0], "start"))
761 /* default start state */
762 pdb->start[0] = DFA_START;
763 if (!aa_unpack_u32(e, &pdb->start[AA_CLASS_FILE], "dfa_start")) {
764 /* default start state for xmatch and file dfa */
765 pdb->start[AA_CLASS_FILE] = DFA_START;
766 } /* setup class index */
767 for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) {
768 pdb->start[i] = aa_dfa_next(pdb->dfa, pdb->start[0],
769 i);
770 }
771 }
772
773 /*
774 * Unfortunately due to a bug in earlier userspaces, a
775 * transition table may be present even when the dfa is
776 * not. For compatibility reasons unpack and discard.
777 */
778 if (!unpack_trans_table(e, &pdb->trans) && required_trans) {
779 *info = "failed to unpack profile transition table";
780 goto fail;
781 }
782
783 if (!pdb->dfa && pdb->trans.table)
784 aa_free_str_table(&pdb->trans);
785
786 /* TODO: move compat mapping here, requires dfa merging first */
787 /* TODO: move verify here, it has to be done after compat mappings */
788
789 *policy = pdb;
790 return 0;
791
792fail:
793 aa_put_pdb(pdb);
794 e->pos = pos;
795 return error;
796}
797
798static u32 strhash(const void *data, u32 len, u32 seed)
799{
800 const char * const *key = data;
801
802 return jhash(*key, strlen(*key), seed);
803}
804
805static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
806{
807 const struct aa_data *data = obj;
808 const char * const *key = arg->key;
809
810 return strcmp(data->key, *key);
811}
812
813/**
814 * unpack_profile - unpack a serialized profile
815 * @e: serialized data extent information (NOT NULL)
816 * @ns_name: pointer of newly allocated copy of %NULL in case of error
817 *
818 * NOTE: unpack profile sets audit struct if there is a failure
819 */
820static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
821{
822 struct aa_ruleset *rules;
823 struct aa_profile *profile = NULL;
824 const char *tmpname, *tmpns = NULL, *name = NULL;
825 const char *info = "failed to unpack profile";
826 size_t ns_len;
827 struct rhashtable_params params = { 0 };
828 char *key = NULL, *disconnected = NULL;
829 struct aa_data *data;
830 int error = -EPROTO;
831 kernel_cap_t tmpcap;
832 u32 tmp;
833
834 *ns_name = NULL;
835
836 /* check that we have the right struct being passed */
837 if (!aa_unpack_nameX(e, AA_STRUCT, "profile"))
838 goto fail;
839 if (!aa_unpack_str(e, &name, NULL))
840 goto fail;
841 if (*name == '\0')
842 goto fail;
843
844 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
845 if (tmpns) {
846 if (!tmpname) {
847 info = "empty profile name";
848 goto fail;
849 }
850 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
851 if (!*ns_name) {
852 info = "out of memory";
853 error = -ENOMEM;
854 goto fail;
855 }
856 name = tmpname;
857 }
858
859 profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
860 if (!profile) {
861 info = "out of memory";
862 error = -ENOMEM;
863 goto fail;
864 }
865 rules = list_first_entry(&profile->rules, typeof(*rules), list);
866
867 /* profile renaming is optional */
868 (void) aa_unpack_str(e, &profile->rename, "rename");
869
870 /* attachment string is optional */
871 (void) aa_unpack_str(e, &profile->attach.xmatch_str, "attach");
872
873 /* xmatch is optional and may be NULL */
874 error = unpack_pdb(e, &profile->attach.xmatch, false, false, &info);
875 if (error) {
876 info = "bad xmatch";
877 goto fail;
878 }
879
880 /* neither xmatch_len not xmatch_perms are optional if xmatch is set */
881 if (profile->attach.xmatch->dfa) {
882 if (!aa_unpack_u32(e, &tmp, NULL)) {
883 info = "missing xmatch len";
884 goto fail;
885 }
886 profile->attach.xmatch_len = tmp;
887 profile->attach.xmatch->start[AA_CLASS_XMATCH] = DFA_START;
888 if (!profile->attach.xmatch->perms) {
889 error = aa_compat_map_xmatch(profile->attach.xmatch);
890 if (error) {
891 info = "failed to convert xmatch permission table";
892 goto fail;
893 }
894 }
895 }
896
897 /* disconnected attachment string is optional */
898 (void) aa_unpack_strdup(e, &disconnected, "disconnected");
899 profile->disconnected = disconnected;
900
901 /* per profile debug flags (complain, audit) */
902 if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
903 info = "profile missing flags";
904 goto fail;
905 }
906 info = "failed to unpack profile flags";
907 if (!aa_unpack_u32(e, &tmp, NULL))
908 goto fail;
909 if (tmp & PACKED_FLAG_HAT)
910 profile->label.flags |= FLAG_HAT;
911 if (tmp & PACKED_FLAG_DEBUG1)
912 profile->label.flags |= FLAG_DEBUG1;
913 if (tmp & PACKED_FLAG_DEBUG2)
914 profile->label.flags |= FLAG_DEBUG2;
915 if (!aa_unpack_u32(e, &tmp, NULL))
916 goto fail;
917 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
918 profile->mode = APPARMOR_COMPLAIN;
919 } else if (tmp == PACKED_MODE_ENFORCE) {
920 profile->mode = APPARMOR_ENFORCE;
921 } else if (tmp == PACKED_MODE_KILL) {
922 profile->mode = APPARMOR_KILL;
923 } else if (tmp == PACKED_MODE_UNCONFINED) {
924 profile->mode = APPARMOR_UNCONFINED;
925 profile->label.flags |= FLAG_UNCONFINED;
926 } else if (tmp == PACKED_MODE_USER) {
927 profile->mode = APPARMOR_USER;
928 } else {
929 goto fail;
930 }
931 if (!aa_unpack_u32(e, &tmp, NULL))
932 goto fail;
933 if (tmp)
934 profile->audit = AUDIT_ALL;
935
936 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
937 goto fail;
938
939 /* path_flags is optional */
940 if (aa_unpack_u32(e, &profile->path_flags, "path_flags"))
941 profile->path_flags |= profile->label.flags &
942 PATH_MEDIATE_DELETED;
943 else
944 /* set a default value if path_flags field is not present */
945 profile->path_flags = PATH_MEDIATE_DELETED;
946
947 info = "failed to unpack profile capabilities";
948 if (!aa_unpack_cap_low(e, &rules->caps.allow, NULL))
949 goto fail;
950 if (!aa_unpack_cap_low(e, &rules->caps.audit, NULL))
951 goto fail;
952 if (!aa_unpack_cap_low(e, &rules->caps.quiet, NULL))
953 goto fail;
954 if (!aa_unpack_cap_low(e, &tmpcap, NULL))
955 goto fail;
956
957 info = "failed to unpack upper profile capabilities";
958 if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) {
959 /* optional upper half of 64 bit caps */
960 if (!aa_unpack_cap_high(e, &rules->caps.allow, NULL))
961 goto fail;
962 if (!aa_unpack_cap_high(e, &rules->caps.audit, NULL))
963 goto fail;
964 if (!aa_unpack_cap_high(e, &rules->caps.quiet, NULL))
965 goto fail;
966 if (!aa_unpack_cap_high(e, &tmpcap, NULL))
967 goto fail;
968 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
969 goto fail;
970 }
971
972 info = "failed to unpack extended profile capabilities";
973 if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) {
974 /* optional extended caps mediation mask */
975 if (!aa_unpack_cap_low(e, &rules->caps.extended, NULL))
976 goto fail;
977 if (!aa_unpack_cap_high(e, &rules->caps.extended, NULL))
978 goto fail;
979 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
980 goto fail;
981 }
982
983 if (!unpack_xattrs(e, profile)) {
984 info = "failed to unpack profile xattrs";
985 goto fail;
986 }
987
988 if (!unpack_rlimits(e, rules)) {
989 info = "failed to unpack profile rlimits";
990 goto fail;
991 }
992
993 if (!unpack_secmark(e, rules)) {
994 info = "failed to unpack profile secmark rules";
995 goto fail;
996 }
997
998 if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) {
999 /* generic policy dfa - optional and may be NULL */
1000 info = "failed to unpack policydb";
1001 error = unpack_pdb(e, &rules->policy, true, false,
1002 &info);
1003 if (error)
1004 goto fail;
1005 /* Fixup: drop when we get rid of start array */
1006 if (aa_dfa_next(rules->policy->dfa, rules->policy->start[0],
1007 AA_CLASS_FILE))
1008 rules->policy->start[AA_CLASS_FILE] =
1009 aa_dfa_next(rules->policy->dfa,
1010 rules->policy->start[0],
1011 AA_CLASS_FILE);
1012 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
1013 goto fail;
1014 if (!rules->policy->perms) {
1015 error = aa_compat_map_policy(rules->policy,
1016 e->version);
1017 if (error) {
1018 info = "failed to remap policydb permission table";
1019 goto fail;
1020 }
1021 }
1022 } else {
1023 rules->policy = aa_get_pdb(nullpdb);
1024 }
1025 /* get file rules */
1026 error = unpack_pdb(e, &rules->file, false, true, &info);
1027 if (error) {
1028 goto fail;
1029 } else if (rules->file->dfa) {
1030 if (!rules->file->perms) {
1031 error = aa_compat_map_file(rules->file);
1032 if (error) {
1033 info = "failed to remap file permission table";
1034 goto fail;
1035 }
1036 }
1037 } else if (rules->policy->dfa &&
1038 rules->policy->start[AA_CLASS_FILE]) {
1039 aa_put_pdb(rules->file);
1040 rules->file = aa_get_pdb(rules->policy);
1041 } else {
1042 aa_put_pdb(rules->file);
1043 rules->file = aa_get_pdb(nullpdb);
1044 }
1045 error = -EPROTO;
1046 if (aa_unpack_nameX(e, AA_STRUCT, "data")) {
1047 info = "out of memory";
1048 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
1049 if (!profile->data) {
1050 error = -ENOMEM;
1051 goto fail;
1052 }
1053 params.nelem_hint = 3;
1054 params.key_len = sizeof(void *);
1055 params.key_offset = offsetof(struct aa_data, key);
1056 params.head_offset = offsetof(struct aa_data, head);
1057 params.hashfn = strhash;
1058 params.obj_cmpfn = datacmp;
1059
1060 if (rhashtable_init(profile->data, ¶ms)) {
1061 info = "failed to init key, value hash table";
1062 goto fail;
1063 }
1064
1065 while (aa_unpack_strdup(e, &key, NULL)) {
1066 data = kzalloc(sizeof(*data), GFP_KERNEL);
1067 if (!data) {
1068 kfree_sensitive(key);
1069 error = -ENOMEM;
1070 goto fail;
1071 }
1072
1073 data->key = key;
1074 data->size = aa_unpack_blob(e, &data->data, NULL);
1075 data->data = kvmemdup(data->data, data->size, GFP_KERNEL);
1076 if (data->size && !data->data) {
1077 kfree_sensitive(data->key);
1078 kfree_sensitive(data);
1079 error = -ENOMEM;
1080 goto fail;
1081 }
1082
1083 if (rhashtable_insert_fast(profile->data, &data->head,
1084 profile->data->p)) {
1085 kvfree_sensitive(data->data, data->size);
1086 kfree_sensitive(data->key);
1087 kfree_sensitive(data);
1088 info = "failed to insert data to table";
1089 goto fail;
1090 }
1091 }
1092
1093 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
1094 info = "failed to unpack end of key, value data table";
1095 goto fail;
1096 }
1097 }
1098
1099 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
1100 info = "failed to unpack end of profile";
1101 goto fail;
1102 }
1103
1104 return profile;
1105
1106fail:
1107 if (error == 0)
1108 /* default error covers most cases */
1109 error = -EPROTO;
1110 if (*ns_name) {
1111 kfree(*ns_name);
1112 *ns_name = NULL;
1113 }
1114 if (profile)
1115 name = NULL;
1116 else if (!name)
1117 name = "unknown";
1118 audit_iface(profile, NULL, name, info, e, error);
1119 aa_free_profile(profile);
1120
1121 return ERR_PTR(error);
1122}
1123
1124/**
1125 * verify_header - unpack serialized stream header
1126 * @e: serialized data read head (NOT NULL)
1127 * @required: whether the header is required or optional
1128 * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
1129 *
1130 * Returns: error or 0 if header is good
1131 */
1132static int verify_header(struct aa_ext *e, int required, const char **ns)
1133{
1134 int error = -EPROTONOSUPPORT;
1135 const char *name = NULL;
1136 *ns = NULL;
1137
1138 /* get the interface version */
1139 if (!aa_unpack_u32(e, &e->version, "version")) {
1140 if (required) {
1141 audit_iface(NULL, NULL, NULL, "invalid profile format",
1142 e, error);
1143 return error;
1144 }
1145 }
1146
1147 /* Check that the interface version is currently supported.
1148 * if not specified use previous version
1149 * Mask off everything that is not kernel abi version
1150 */
1151 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v9)) {
1152 audit_iface(NULL, NULL, NULL, "unsupported interface version",
1153 e, error);
1154 return error;
1155 }
1156
1157 /* read the namespace if present */
1158 if (aa_unpack_str(e, &name, "namespace")) {
1159 if (*name == '\0') {
1160 audit_iface(NULL, NULL, NULL, "invalid namespace name",
1161 e, error);
1162 return error;
1163 }
1164 if (*ns && strcmp(*ns, name)) {
1165 audit_iface(NULL, NULL, NULL, "invalid ns change", e,
1166 error);
1167 } else if (!*ns) {
1168 *ns = kstrdup(name, GFP_KERNEL);
1169 if (!*ns)
1170 return -ENOMEM;
1171 }
1172 }
1173
1174 return 0;
1175}
1176
1177/**
1178 * verify_dfa_accept_index - verify accept indexes are in range of perms table
1179 * @dfa: the dfa to check accept indexes are in range
1180 * @table_size: the permission table size the indexes should be within
1181 */
1182static bool verify_dfa_accept_index(struct aa_dfa *dfa, int table_size)
1183{
1184 int i;
1185 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
1186 if (ACCEPT_TABLE(dfa)[i] >= table_size)
1187 return false;
1188 }
1189 return true;
1190}
1191
1192static bool verify_perm(struct aa_perms *perm)
1193{
1194 /* TODO: allow option to just force the perms into a valid state */
1195 if (perm->allow & perm->deny)
1196 return false;
1197 if (perm->subtree & ~perm->allow)
1198 return false;
1199 if (perm->cond & (perm->allow | perm->deny))
1200 return false;
1201 if (perm->kill & perm->allow)
1202 return false;
1203 if (perm->complain & (perm->allow | perm->deny))
1204 return false;
1205 if (perm->prompt & (perm->allow | perm->deny))
1206 return false;
1207 if (perm->complain & perm->prompt)
1208 return false;
1209 if (perm->hide & perm->allow)
1210 return false;
1211
1212 return true;
1213}
1214
1215static bool verify_perms(struct aa_policydb *pdb)
1216{
1217 int i;
1218
1219 for (i = 0; i < pdb->size; i++) {
1220 if (!verify_perm(&pdb->perms[i]))
1221 return false;
1222 /* verify indexes into str table */
1223 if ((pdb->perms[i].xindex & AA_X_TYPE_MASK) == AA_X_TABLE &&
1224 (pdb->perms[i].xindex & AA_X_INDEX_MASK) >= pdb->trans.size)
1225 return false;
1226 if (pdb->perms[i].tag && pdb->perms[i].tag >= pdb->trans.size)
1227 return false;
1228 if (pdb->perms[i].label &&
1229 pdb->perms[i].label >= pdb->trans.size)
1230 return false;
1231 }
1232
1233 return true;
1234}
1235
1236/**
1237 * verify_profile - Do post unpack analysis to verify profile consistency
1238 * @profile: profile to verify (NOT NULL)
1239 *
1240 * Returns: 0 if passes verification else error
1241 *
1242 * This verification is post any unpack mapping or changes
1243 */
1244static int verify_profile(struct aa_profile *profile)
1245{
1246 struct aa_ruleset *rules = list_first_entry(&profile->rules,
1247 typeof(*rules), list);
1248 if (!rules)
1249 return 0;
1250
1251 if (rules->file->dfa && !verify_dfa_accept_index(rules->file->dfa,
1252 rules->file->size)) {
1253 audit_iface(profile, NULL, NULL,
1254 "Unpack: file Invalid named transition", NULL,
1255 -EPROTO);
1256 return -EPROTO;
1257 }
1258 if (rules->policy->dfa &&
1259 !verify_dfa_accept_index(rules->policy->dfa, rules->policy->size)) {
1260 audit_iface(profile, NULL, NULL,
1261 "Unpack: policy Invalid named transition", NULL,
1262 -EPROTO);
1263 return -EPROTO;
1264 }
1265
1266 if (!verify_perms(rules->file)) {
1267 audit_iface(profile, NULL, NULL,
1268 "Unpack: Invalid perm index", NULL, -EPROTO);
1269 return -EPROTO;
1270 }
1271 if (!verify_perms(rules->policy)) {
1272 audit_iface(profile, NULL, NULL,
1273 "Unpack: Invalid perm index", NULL, -EPROTO);
1274 return -EPROTO;
1275 }
1276 if (!verify_perms(profile->attach.xmatch)) {
1277 audit_iface(profile, NULL, NULL,
1278 "Unpack: Invalid perm index", NULL, -EPROTO);
1279 return -EPROTO;
1280 }
1281
1282 return 0;
1283}
1284
1285void aa_load_ent_free(struct aa_load_ent *ent)
1286{
1287 if (ent) {
1288 aa_put_profile(ent->rename);
1289 aa_put_profile(ent->old);
1290 aa_put_profile(ent->new);
1291 kfree(ent->ns_name);
1292 kfree_sensitive(ent);
1293 }
1294}
1295
1296struct aa_load_ent *aa_load_ent_alloc(void)
1297{
1298 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
1299 if (ent)
1300 INIT_LIST_HEAD(&ent->list);
1301 return ent;
1302}
1303
1304static int compress_zstd(const char *src, size_t slen, char **dst, size_t *dlen)
1305{
1306#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
1307 const zstd_parameters params =
1308 zstd_get_params(aa_g_rawdata_compression_level, slen);
1309 const size_t wksp_len = zstd_cctx_workspace_bound(¶ms.cParams);
1310 void *wksp = NULL;
1311 zstd_cctx *ctx = NULL;
1312 size_t out_len = zstd_compress_bound(slen);
1313 void *out = NULL;
1314 int ret = 0;
1315
1316 out = kvzalloc(out_len, GFP_KERNEL);
1317 if (!out) {
1318 ret = -ENOMEM;
1319 goto cleanup;
1320 }
1321
1322 wksp = kvzalloc(wksp_len, GFP_KERNEL);
1323 if (!wksp) {
1324 ret = -ENOMEM;
1325 goto cleanup;
1326 }
1327
1328 ctx = zstd_init_cctx(wksp, wksp_len);
1329 if (!ctx) {
1330 ret = -EINVAL;
1331 goto cleanup;
1332 }
1333
1334 out_len = zstd_compress_cctx(ctx, out, out_len, src, slen, ¶ms);
1335 if (zstd_is_error(out_len) || out_len >= slen) {
1336 ret = -EINVAL;
1337 goto cleanup;
1338 }
1339
1340 if (is_vmalloc_addr(out)) {
1341 *dst = kvzalloc(out_len, GFP_KERNEL);
1342 if (*dst) {
1343 memcpy(*dst, out, out_len);
1344 kvfree(out);
1345 out = NULL;
1346 }
1347 } else {
1348 /*
1349 * If the staging buffer was kmalloc'd, then using krealloc is
1350 * probably going to be faster. The destination buffer will
1351 * always be smaller, so it's just shrunk, avoiding a memcpy
1352 */
1353 *dst = krealloc(out, out_len, GFP_KERNEL);
1354 }
1355
1356 if (!*dst) {
1357 ret = -ENOMEM;
1358 goto cleanup;
1359 }
1360
1361 *dlen = out_len;
1362
1363cleanup:
1364 if (ret) {
1365 kvfree(out);
1366 *dst = NULL;
1367 }
1368
1369 kvfree(wksp);
1370 return ret;
1371#else
1372 *dlen = slen;
1373 return 0;
1374#endif
1375}
1376
1377static int compress_loaddata(struct aa_loaddata *data)
1378{
1379 AA_BUG(data->compressed_size > 0);
1380
1381 /*
1382 * Shortcut the no compression case, else we increase the amount of
1383 * storage required by a small amount
1384 */
1385 if (aa_g_rawdata_compression_level != 0) {
1386 void *udata = data->data;
1387 int error = compress_zstd(udata, data->size, &data->data,
1388 &data->compressed_size);
1389 if (error) {
1390 data->compressed_size = data->size;
1391 return error;
1392 }
1393 if (udata != data->data)
1394 kvfree(udata);
1395 } else
1396 data->compressed_size = data->size;
1397
1398 return 0;
1399}
1400
1401/**
1402 * aa_unpack - unpack packed binary profile(s) data loaded from user space
1403 * @udata: user data copied to kmem (NOT NULL)
1404 * @lh: list to place unpacked profiles in a aa_repl_ws
1405 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
1406 *
1407 * Unpack user data and return refcounted allocated profile(s) stored in
1408 * @lh in order of discovery, with the list chain stored in base.list
1409 * or error
1410 *
1411 * Returns: profile(s) on @lh else error pointer if fails to unpack
1412 */
1413int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
1414 const char **ns)
1415{
1416 struct aa_load_ent *tmp, *ent;
1417 struct aa_profile *profile = NULL;
1418 char *ns_name = NULL;
1419 int error;
1420 struct aa_ext e = {
1421 .start = udata->data,
1422 .end = udata->data + udata->size,
1423 .pos = udata->data,
1424 };
1425
1426 *ns = NULL;
1427 while (e.pos < e.end) {
1428 void *start;
1429 error = verify_header(&e, e.pos == e.start, ns);
1430 if (error)
1431 goto fail;
1432
1433 start = e.pos;
1434 profile = unpack_profile(&e, &ns_name);
1435 if (IS_ERR(profile)) {
1436 error = PTR_ERR(profile);
1437 goto fail;
1438 }
1439
1440 error = verify_profile(profile);
1441 if (error)
1442 goto fail_profile;
1443
1444 if (aa_g_hash_policy)
1445 error = aa_calc_profile_hash(profile, e.version, start,
1446 e.pos - start);
1447 if (error)
1448 goto fail_profile;
1449
1450 ent = aa_load_ent_alloc();
1451 if (!ent) {
1452 error = -ENOMEM;
1453 goto fail_profile;
1454 }
1455
1456 ent->new = profile;
1457 ent->ns_name = ns_name;
1458 ns_name = NULL;
1459 list_add_tail(&ent->list, lh);
1460 }
1461 udata->abi = e.version & K_ABI_MASK;
1462 if (aa_g_hash_policy) {
1463 udata->hash = aa_calc_hash(udata->data, udata->size);
1464 if (IS_ERR(udata->hash)) {
1465 error = PTR_ERR(udata->hash);
1466 udata->hash = NULL;
1467 goto fail;
1468 }
1469 }
1470
1471 if (aa_g_export_binary) {
1472 error = compress_loaddata(udata);
1473 if (error)
1474 goto fail;
1475 }
1476 return 0;
1477
1478fail_profile:
1479 kfree(ns_name);
1480 aa_put_profile(profile);
1481
1482fail:
1483 list_for_each_entry_safe(ent, tmp, lh, list) {
1484 list_del_init(&ent->list);
1485 aa_load_ent_free(ent);
1486 }
1487
1488 return error;
1489}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * AppArmor security module
4 *
5 * This file contains AppArmor functions for unpacking policy loaded from
6 * userspace.
7 *
8 * Copyright (C) 1998-2008 Novell/SUSE
9 * Copyright 2009-2010 Canonical Ltd.
10 *
11 * AppArmor uses a serialized binary format for loading policy. To find
12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
13 * All policy is validated before it is used.
14 */
15
16#include <asm/unaligned.h>
17#include <linux/ctype.h>
18#include <linux/errno.h>
19#include <linux/zlib.h>
20
21#include "include/apparmor.h"
22#include "include/audit.h"
23#include "include/cred.h"
24#include "include/crypto.h"
25#include "include/match.h"
26#include "include/path.h"
27#include "include/policy.h"
28#include "include/policy_unpack.h"
29
30#define K_ABI_MASK 0x3ff
31#define FORCE_COMPLAIN_FLAG 0x800
32#define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
33#define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
34
35#define v5 5 /* base version */
36#define v6 6 /* per entry policydb mediation check */
37#define v7 7
38#define v8 8 /* full network masking */
39
40/*
41 * The AppArmor interface treats data as a type byte followed by the
42 * actual data. The interface has the notion of a a named entry
43 * which has a name (AA_NAME typecode followed by name string) followed by
44 * the entries typecode and data. Named types allow for optional
45 * elements and extensions to be added and tested for without breaking
46 * backwards compatibility.
47 */
48
49enum aa_code {
50 AA_U8,
51 AA_U16,
52 AA_U32,
53 AA_U64,
54 AA_NAME, /* same as string except it is items name */
55 AA_STRING,
56 AA_BLOB,
57 AA_STRUCT,
58 AA_STRUCTEND,
59 AA_LIST,
60 AA_LISTEND,
61 AA_ARRAY,
62 AA_ARRAYEND,
63};
64
65/*
66 * aa_ext is the read of the buffer containing the serialized profile. The
67 * data is copied into a kernel buffer in apparmorfs and then handed off to
68 * the unpack routines.
69 */
70struct aa_ext {
71 void *start;
72 void *end;
73 void *pos; /* pointer to current position in the buffer */
74 u32 version;
75};
76
77/* audit callback for unpack fields */
78static void audit_cb(struct audit_buffer *ab, void *va)
79{
80 struct common_audit_data *sa = va;
81
82 if (aad(sa)->iface.ns) {
83 audit_log_format(ab, " ns=");
84 audit_log_untrustedstring(ab, aad(sa)->iface.ns);
85 }
86 if (aad(sa)->name) {
87 audit_log_format(ab, " name=");
88 audit_log_untrustedstring(ab, aad(sa)->name);
89 }
90 if (aad(sa)->iface.pos)
91 audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
92}
93
94/**
95 * audit_iface - do audit message for policy unpacking/load/replace/remove
96 * @new: profile if it has been allocated (MAYBE NULL)
97 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
98 * @name: name of the profile being manipulated (MAYBE NULL)
99 * @info: any extra info about the failure (MAYBE NULL)
100 * @e: buffer position info
101 * @error: error code
102 *
103 * Returns: %0 or error
104 */
105static int audit_iface(struct aa_profile *new, const char *ns_name,
106 const char *name, const char *info, struct aa_ext *e,
107 int error)
108{
109 struct aa_profile *profile = labels_profile(aa_current_raw_label());
110 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
111 if (e)
112 aad(&sa)->iface.pos = e->pos - e->start;
113 aad(&sa)->iface.ns = ns_name;
114 if (new)
115 aad(&sa)->name = new->base.hname;
116 else
117 aad(&sa)->name = name;
118 aad(&sa)->info = info;
119 aad(&sa)->error = error;
120
121 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
122}
123
124void __aa_loaddata_update(struct aa_loaddata *data, long revision)
125{
126 AA_BUG(!data);
127 AA_BUG(!data->ns);
128 AA_BUG(!data->dents[AAFS_LOADDATA_REVISION]);
129 AA_BUG(!mutex_is_locked(&data->ns->lock));
130 AA_BUG(data->revision > revision);
131
132 data->revision = revision;
133 d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
134 current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
135 d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
136 current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
137}
138
139bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
140{
141 if (l->size != r->size)
142 return false;
143 if (l->compressed_size != r->compressed_size)
144 return false;
145 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
146 return false;
147 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
148}
149
150/*
151 * need to take the ns mutex lock which is NOT safe most places that
152 * put_loaddata is called, so we have to delay freeing it
153 */
154static void do_loaddata_free(struct work_struct *work)
155{
156 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
157 struct aa_ns *ns = aa_get_ns(d->ns);
158
159 if (ns) {
160 mutex_lock_nested(&ns->lock, ns->level);
161 __aa_fs_remove_rawdata(d);
162 mutex_unlock(&ns->lock);
163 aa_put_ns(ns);
164 }
165
166 kfree_sensitive(d->hash);
167 kfree_sensitive(d->name);
168 kvfree(d->data);
169 kfree_sensitive(d);
170}
171
172void aa_loaddata_kref(struct kref *kref)
173{
174 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
175
176 if (d) {
177 INIT_WORK(&d->work, do_loaddata_free);
178 schedule_work(&d->work);
179 }
180}
181
182struct aa_loaddata *aa_loaddata_alloc(size_t size)
183{
184 struct aa_loaddata *d;
185
186 d = kzalloc(sizeof(*d), GFP_KERNEL);
187 if (d == NULL)
188 return ERR_PTR(-ENOMEM);
189 d->data = kvzalloc(size, GFP_KERNEL);
190 if (!d->data) {
191 kfree(d);
192 return ERR_PTR(-ENOMEM);
193 }
194 kref_init(&d->count);
195 INIT_LIST_HEAD(&d->list);
196
197 return d;
198}
199
200/* test if read will be in packed data bounds */
201static bool inbounds(struct aa_ext *e, size_t size)
202{
203 return (size <= e->end - e->pos);
204}
205
206static void *kvmemdup(const void *src, size_t len)
207{
208 void *p = kvmalloc(len, GFP_KERNEL);
209
210 if (p)
211 memcpy(p, src, len);
212 return p;
213}
214
215/**
216 * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
217 * @e: serialized data read head (NOT NULL)
218 * @chunk: start address for chunk of data (NOT NULL)
219 *
220 * Returns: the size of chunk found with the read head at the end of the chunk.
221 */
222static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
223{
224 size_t size = 0;
225 void *pos = e->pos;
226
227 if (!inbounds(e, sizeof(u16)))
228 goto fail;
229 size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
230 e->pos += sizeof(__le16);
231 if (!inbounds(e, size))
232 goto fail;
233 *chunk = e->pos;
234 e->pos += size;
235 return size;
236
237fail:
238 e->pos = pos;
239 return 0;
240}
241
242/* unpack control byte */
243static bool unpack_X(struct aa_ext *e, enum aa_code code)
244{
245 if (!inbounds(e, 1))
246 return false;
247 if (*(u8 *) e->pos != code)
248 return false;
249 e->pos++;
250 return true;
251}
252
253/**
254 * unpack_nameX - check is the next element is of type X with a name of @name
255 * @e: serialized data extent information (NOT NULL)
256 * @code: type code
257 * @name: name to match to the serialized element. (MAYBE NULL)
258 *
259 * check that the next serialized data element is of type X and has a tag
260 * name @name. If @name is specified then there must be a matching
261 * name element in the stream. If @name is NULL any name element will be
262 * skipped and only the typecode will be tested.
263 *
264 * Returns true on success (both type code and name tests match) and the read
265 * head is advanced past the headers
266 *
267 * Returns: false if either match fails, the read head does not move
268 */
269static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
270{
271 /*
272 * May need to reset pos if name or type doesn't match
273 */
274 void *pos = e->pos;
275 /*
276 * Check for presence of a tagname, and if present name size
277 * AA_NAME tag value is a u16.
278 */
279 if (unpack_X(e, AA_NAME)) {
280 char *tag = NULL;
281 size_t size = unpack_u16_chunk(e, &tag);
282 /* if a name is specified it must match. otherwise skip tag */
283 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
284 goto fail;
285 } else if (name) {
286 /* if a name is specified and there is no name tag fail */
287 goto fail;
288 }
289
290 /* now check if type code matches */
291 if (unpack_X(e, code))
292 return true;
293
294fail:
295 e->pos = pos;
296 return false;
297}
298
299static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
300{
301 void *pos = e->pos;
302
303 if (unpack_nameX(e, AA_U8, name)) {
304 if (!inbounds(e, sizeof(u8)))
305 goto fail;
306 if (data)
307 *data = get_unaligned((u8 *)e->pos);
308 e->pos += sizeof(u8);
309 return true;
310 }
311
312fail:
313 e->pos = pos;
314 return false;
315}
316
317static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
318{
319 void *pos = e->pos;
320
321 if (unpack_nameX(e, AA_U32, name)) {
322 if (!inbounds(e, sizeof(u32)))
323 goto fail;
324 if (data)
325 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
326 e->pos += sizeof(u32);
327 return true;
328 }
329
330fail:
331 e->pos = pos;
332 return false;
333}
334
335static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
336{
337 void *pos = e->pos;
338
339 if (unpack_nameX(e, AA_U64, name)) {
340 if (!inbounds(e, sizeof(u64)))
341 goto fail;
342 if (data)
343 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
344 e->pos += sizeof(u64);
345 return true;
346 }
347
348fail:
349 e->pos = pos;
350 return false;
351}
352
353static size_t unpack_array(struct aa_ext *e, const char *name)
354{
355 void *pos = e->pos;
356
357 if (unpack_nameX(e, AA_ARRAY, name)) {
358 int size;
359 if (!inbounds(e, sizeof(u16)))
360 goto fail;
361 size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
362 e->pos += sizeof(u16);
363 return size;
364 }
365
366fail:
367 e->pos = pos;
368 return 0;
369}
370
371static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
372{
373 void *pos = e->pos;
374
375 if (unpack_nameX(e, AA_BLOB, name)) {
376 u32 size;
377 if (!inbounds(e, sizeof(u32)))
378 goto fail;
379 size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
380 e->pos += sizeof(u32);
381 if (inbounds(e, (size_t) size)) {
382 *blob = e->pos;
383 e->pos += size;
384 return size;
385 }
386 }
387
388fail:
389 e->pos = pos;
390 return 0;
391}
392
393static int unpack_str(struct aa_ext *e, const char **string, const char *name)
394{
395 char *src_str;
396 size_t size = 0;
397 void *pos = e->pos;
398 *string = NULL;
399 if (unpack_nameX(e, AA_STRING, name)) {
400 size = unpack_u16_chunk(e, &src_str);
401 if (size) {
402 /* strings are null terminated, length is size - 1 */
403 if (src_str[size - 1] != 0)
404 goto fail;
405 *string = src_str;
406
407 return size;
408 }
409 }
410
411fail:
412 e->pos = pos;
413 return 0;
414}
415
416static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
417{
418 const char *tmp;
419 void *pos = e->pos;
420 int res = unpack_str(e, &tmp, name);
421 *string = NULL;
422
423 if (!res)
424 return 0;
425
426 *string = kmemdup(tmp, res, GFP_KERNEL);
427 if (!*string) {
428 e->pos = pos;
429 return 0;
430 }
431
432 return res;
433}
434
435
436/**
437 * unpack_dfa - unpack a file rule dfa
438 * @e: serialized data extent information (NOT NULL)
439 *
440 * returns dfa or ERR_PTR or NULL if no dfa
441 */
442static struct aa_dfa *unpack_dfa(struct aa_ext *e)
443{
444 char *blob = NULL;
445 size_t size;
446 struct aa_dfa *dfa = NULL;
447
448 size = unpack_blob(e, &blob, "aadfa");
449 if (size) {
450 /*
451 * The dfa is aligned with in the blob to 8 bytes
452 * from the beginning of the stream.
453 * alignment adjust needed by dfa unpack
454 */
455 size_t sz = blob - (char *) e->start -
456 ((e->pos - e->start) & 7);
457 size_t pad = ALIGN(sz, 8) - sz;
458 int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
459 TO_ACCEPT2_FLAG(YYTD_DATA32) | DFA_FLAG_VERIFY_STATES;
460 dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
461
462 if (IS_ERR(dfa))
463 return dfa;
464
465 }
466
467 return dfa;
468}
469
470/**
471 * unpack_trans_table - unpack a profile transition table
472 * @e: serialized data extent information (NOT NULL)
473 * @profile: profile to add the accept table to (NOT NULL)
474 *
475 * Returns: true if table successfully unpacked
476 */
477static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
478{
479 void *saved_pos = e->pos;
480
481 /* exec table is optional */
482 if (unpack_nameX(e, AA_STRUCT, "xtable")) {
483 int i, size;
484
485 size = unpack_array(e, NULL);
486 /* currently 4 exec bits and entries 0-3 are reserved iupcx */
487 if (size > 16 - 4)
488 goto fail;
489 profile->file.trans.table = kcalloc(size, sizeof(char *),
490 GFP_KERNEL);
491 if (!profile->file.trans.table)
492 goto fail;
493
494 profile->file.trans.size = size;
495 for (i = 0; i < size; i++) {
496 char *str;
497 int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
498 /* unpack_strdup verifies that the last character is
499 * null termination byte.
500 */
501 if (!size2)
502 goto fail;
503 profile->file.trans.table[i] = str;
504 /* verify that name doesn't start with space */
505 if (isspace(*str))
506 goto fail;
507
508 /* count internal # of internal \0 */
509 for (c = j = 0; j < size2 - 1; j++) {
510 if (!str[j]) {
511 pos = j;
512 c++;
513 }
514 }
515 if (*str == ':') {
516 /* first character after : must be valid */
517 if (!str[1])
518 goto fail;
519 /* beginning with : requires an embedded \0,
520 * verify that exactly 1 internal \0 exists
521 * trailing \0 already verified by unpack_strdup
522 *
523 * convert \0 back to : for label_parse
524 */
525 if (c == 1)
526 str[pos] = ':';
527 else if (c > 1)
528 goto fail;
529 } else if (c)
530 /* fail - all other cases with embedded \0 */
531 goto fail;
532 }
533 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
534 goto fail;
535 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
536 goto fail;
537 }
538 return true;
539
540fail:
541 aa_free_domain_entries(&profile->file.trans);
542 e->pos = saved_pos;
543 return false;
544}
545
546static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
547{
548 void *pos = e->pos;
549
550 if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
551 int i, size;
552
553 size = unpack_array(e, NULL);
554 profile->xattr_count = size;
555 profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
556 if (!profile->xattrs)
557 goto fail;
558 for (i = 0; i < size; i++) {
559 if (!unpack_strdup(e, &profile->xattrs[i], NULL))
560 goto fail;
561 }
562 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
563 goto fail;
564 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
565 goto fail;
566 }
567
568 return true;
569
570fail:
571 e->pos = pos;
572 return false;
573}
574
575static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
576{
577 void *pos = e->pos;
578 int i, size;
579
580 if (unpack_nameX(e, AA_STRUCT, "secmark")) {
581 size = unpack_array(e, NULL);
582
583 profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
584 GFP_KERNEL);
585 if (!profile->secmark)
586 goto fail;
587
588 profile->secmark_count = size;
589
590 for (i = 0; i < size; i++) {
591 if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
592 goto fail;
593 if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
594 goto fail;
595 if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
596 goto fail;
597 }
598 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
599 goto fail;
600 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
601 goto fail;
602 }
603
604 return true;
605
606fail:
607 if (profile->secmark) {
608 for (i = 0; i < size; i++)
609 kfree(profile->secmark[i].label);
610 kfree(profile->secmark);
611 profile->secmark_count = 0;
612 profile->secmark = NULL;
613 }
614
615 e->pos = pos;
616 return false;
617}
618
619static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
620{
621 void *pos = e->pos;
622
623 /* rlimits are optional */
624 if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
625 int i, size;
626 u32 tmp = 0;
627 if (!unpack_u32(e, &tmp, NULL))
628 goto fail;
629 profile->rlimits.mask = tmp;
630
631 size = unpack_array(e, NULL);
632 if (size > RLIM_NLIMITS)
633 goto fail;
634 for (i = 0; i < size; i++) {
635 u64 tmp2 = 0;
636 int a = aa_map_resource(i);
637 if (!unpack_u64(e, &tmp2, NULL))
638 goto fail;
639 profile->rlimits.limits[a].rlim_max = tmp2;
640 }
641 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
642 goto fail;
643 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
644 goto fail;
645 }
646 return true;
647
648fail:
649 e->pos = pos;
650 return false;
651}
652
653static u32 strhash(const void *data, u32 len, u32 seed)
654{
655 const char * const *key = data;
656
657 return jhash(*key, strlen(*key), seed);
658}
659
660static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
661{
662 const struct aa_data *data = obj;
663 const char * const *key = arg->key;
664
665 return strcmp(data->key, *key);
666}
667
668/**
669 * unpack_profile - unpack a serialized profile
670 * @e: serialized data extent information (NOT NULL)
671 *
672 * NOTE: unpack profile sets audit struct if there is a failure
673 */
674static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
675{
676 struct aa_profile *profile = NULL;
677 const char *tmpname, *tmpns = NULL, *name = NULL;
678 const char *info = "failed to unpack profile";
679 size_t ns_len;
680 struct rhashtable_params params = { 0 };
681 char *key = NULL;
682 struct aa_data *data;
683 int i, error = -EPROTO;
684 kernel_cap_t tmpcap;
685 u32 tmp;
686
687 *ns_name = NULL;
688
689 /* check that we have the right struct being passed */
690 if (!unpack_nameX(e, AA_STRUCT, "profile"))
691 goto fail;
692 if (!unpack_str(e, &name, NULL))
693 goto fail;
694 if (*name == '\0')
695 goto fail;
696
697 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
698 if (tmpns) {
699 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
700 if (!*ns_name) {
701 info = "out of memory";
702 goto fail;
703 }
704 name = tmpname;
705 }
706
707 profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
708 if (!profile)
709 return ERR_PTR(-ENOMEM);
710
711 /* profile renaming is optional */
712 (void) unpack_str(e, &profile->rename, "rename");
713
714 /* attachment string is optional */
715 (void) unpack_str(e, &profile->attach, "attach");
716
717 /* xmatch is optional and may be NULL */
718 profile->xmatch = unpack_dfa(e);
719 if (IS_ERR(profile->xmatch)) {
720 error = PTR_ERR(profile->xmatch);
721 profile->xmatch = NULL;
722 info = "bad xmatch";
723 goto fail;
724 }
725 /* xmatch_len is not optional if xmatch is set */
726 if (profile->xmatch) {
727 if (!unpack_u32(e, &tmp, NULL)) {
728 info = "missing xmatch len";
729 goto fail;
730 }
731 profile->xmatch_len = tmp;
732 }
733
734 /* disconnected attachment string is optional */
735 (void) unpack_str(e, &profile->disconnected, "disconnected");
736
737 /* per profile debug flags (complain, audit) */
738 if (!unpack_nameX(e, AA_STRUCT, "flags")) {
739 info = "profile missing flags";
740 goto fail;
741 }
742 info = "failed to unpack profile flags";
743 if (!unpack_u32(e, &tmp, NULL))
744 goto fail;
745 if (tmp & PACKED_FLAG_HAT)
746 profile->label.flags |= FLAG_HAT;
747 if (!unpack_u32(e, &tmp, NULL))
748 goto fail;
749 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG))
750 profile->mode = APPARMOR_COMPLAIN;
751 else if (tmp == PACKED_MODE_ENFORCE)
752 profile->mode = APPARMOR_ENFORCE;
753 else if (tmp == PACKED_MODE_KILL)
754 profile->mode = APPARMOR_KILL;
755 else if (tmp == PACKED_MODE_UNCONFINED)
756 profile->mode = APPARMOR_UNCONFINED;
757 else
758 goto fail;
759 if (!unpack_u32(e, &tmp, NULL))
760 goto fail;
761 if (tmp)
762 profile->audit = AUDIT_ALL;
763
764 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
765 goto fail;
766
767 /* path_flags is optional */
768 if (unpack_u32(e, &profile->path_flags, "path_flags"))
769 profile->path_flags |= profile->label.flags &
770 PATH_MEDIATE_DELETED;
771 else
772 /* set a default value if path_flags field is not present */
773 profile->path_flags = PATH_MEDIATE_DELETED;
774
775 info = "failed to unpack profile capabilities";
776 if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
777 goto fail;
778 if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
779 goto fail;
780 if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
781 goto fail;
782 if (!unpack_u32(e, &tmpcap.cap[0], NULL))
783 goto fail;
784
785 info = "failed to unpack upper profile capabilities";
786 if (unpack_nameX(e, AA_STRUCT, "caps64")) {
787 /* optional upper half of 64 bit caps */
788 if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
789 goto fail;
790 if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
791 goto fail;
792 if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
793 goto fail;
794 if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
795 goto fail;
796 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
797 goto fail;
798 }
799
800 info = "failed to unpack extended profile capabilities";
801 if (unpack_nameX(e, AA_STRUCT, "capsx")) {
802 /* optional extended caps mediation mask */
803 if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
804 goto fail;
805 if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
806 goto fail;
807 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
808 goto fail;
809 }
810
811 if (!unpack_xattrs(e, profile)) {
812 info = "failed to unpack profile xattrs";
813 goto fail;
814 }
815
816 if (!unpack_rlimits(e, profile)) {
817 info = "failed to unpack profile rlimits";
818 goto fail;
819 }
820
821 if (!unpack_secmark(e, profile)) {
822 info = "failed to unpack profile secmark rules";
823 goto fail;
824 }
825
826 if (unpack_nameX(e, AA_STRUCT, "policydb")) {
827 /* generic policy dfa - optional and may be NULL */
828 info = "failed to unpack policydb";
829 profile->policy.dfa = unpack_dfa(e);
830 if (IS_ERR(profile->policy.dfa)) {
831 error = PTR_ERR(profile->policy.dfa);
832 profile->policy.dfa = NULL;
833 goto fail;
834 } else if (!profile->policy.dfa) {
835 error = -EPROTO;
836 goto fail;
837 }
838 if (!unpack_u32(e, &profile->policy.start[0], "start"))
839 /* default start state */
840 profile->policy.start[0] = DFA_START;
841 /* setup class index */
842 for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
843 profile->policy.start[i] =
844 aa_dfa_next(profile->policy.dfa,
845 profile->policy.start[0],
846 i);
847 }
848 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
849 goto fail;
850 } else
851 profile->policy.dfa = aa_get_dfa(nulldfa);
852
853 /* get file rules */
854 profile->file.dfa = unpack_dfa(e);
855 if (IS_ERR(profile->file.dfa)) {
856 error = PTR_ERR(profile->file.dfa);
857 profile->file.dfa = NULL;
858 info = "failed to unpack profile file rules";
859 goto fail;
860 } else if (profile->file.dfa) {
861 if (!unpack_u32(e, &profile->file.start, "dfa_start"))
862 /* default start state */
863 profile->file.start = DFA_START;
864 } else if (profile->policy.dfa &&
865 profile->policy.start[AA_CLASS_FILE]) {
866 profile->file.dfa = aa_get_dfa(profile->policy.dfa);
867 profile->file.start = profile->policy.start[AA_CLASS_FILE];
868 } else
869 profile->file.dfa = aa_get_dfa(nulldfa);
870
871 if (!unpack_trans_table(e, profile)) {
872 info = "failed to unpack profile transition table";
873 goto fail;
874 }
875
876 if (unpack_nameX(e, AA_STRUCT, "data")) {
877 info = "out of memory";
878 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
879 if (!profile->data)
880 goto fail;
881
882 params.nelem_hint = 3;
883 params.key_len = sizeof(void *);
884 params.key_offset = offsetof(struct aa_data, key);
885 params.head_offset = offsetof(struct aa_data, head);
886 params.hashfn = strhash;
887 params.obj_cmpfn = datacmp;
888
889 if (rhashtable_init(profile->data, ¶ms)) {
890 info = "failed to init key, value hash table";
891 goto fail;
892 }
893
894 while (unpack_strdup(e, &key, NULL)) {
895 data = kzalloc(sizeof(*data), GFP_KERNEL);
896 if (!data) {
897 kfree_sensitive(key);
898 goto fail;
899 }
900
901 data->key = key;
902 data->size = unpack_blob(e, &data->data, NULL);
903 data->data = kvmemdup(data->data, data->size);
904 if (data->size && !data->data) {
905 kfree_sensitive(data->key);
906 kfree_sensitive(data);
907 goto fail;
908 }
909
910 rhashtable_insert_fast(profile->data, &data->head,
911 profile->data->p);
912 }
913
914 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
915 info = "failed to unpack end of key, value data table";
916 goto fail;
917 }
918 }
919
920 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
921 info = "failed to unpack end of profile";
922 goto fail;
923 }
924
925 return profile;
926
927fail:
928 if (profile)
929 name = NULL;
930 else if (!name)
931 name = "unknown";
932 audit_iface(profile, NULL, name, info, e, error);
933 aa_free_profile(profile);
934
935 return ERR_PTR(error);
936}
937
938/**
939 * verify_head - unpack serialized stream header
940 * @e: serialized data read head (NOT NULL)
941 * @required: whether the header is required or optional
942 * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
943 *
944 * Returns: error or 0 if header is good
945 */
946static int verify_header(struct aa_ext *e, int required, const char **ns)
947{
948 int error = -EPROTONOSUPPORT;
949 const char *name = NULL;
950 *ns = NULL;
951
952 /* get the interface version */
953 if (!unpack_u32(e, &e->version, "version")) {
954 if (required) {
955 audit_iface(NULL, NULL, NULL, "invalid profile format",
956 e, error);
957 return error;
958 }
959 }
960
961 /* Check that the interface version is currently supported.
962 * if not specified use previous version
963 * Mask off everything that is not kernel abi version
964 */
965 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
966 audit_iface(NULL, NULL, NULL, "unsupported interface version",
967 e, error);
968 return error;
969 }
970
971 /* read the namespace if present */
972 if (unpack_str(e, &name, "namespace")) {
973 if (*name == '\0') {
974 audit_iface(NULL, NULL, NULL, "invalid namespace name",
975 e, error);
976 return error;
977 }
978 if (*ns && strcmp(*ns, name)) {
979 audit_iface(NULL, NULL, NULL, "invalid ns change", e,
980 error);
981 } else if (!*ns) {
982 *ns = kstrdup(name, GFP_KERNEL);
983 if (!*ns)
984 return -ENOMEM;
985 }
986 }
987
988 return 0;
989}
990
991static bool verify_xindex(int xindex, int table_size)
992{
993 int index, xtype;
994 xtype = xindex & AA_X_TYPE_MASK;
995 index = xindex & AA_X_INDEX_MASK;
996 if (xtype == AA_X_TABLE && index >= table_size)
997 return false;
998 return true;
999}
1000
1001/* verify dfa xindexes are in range of transition tables */
1002static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
1003{
1004 int i;
1005 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
1006 if (!verify_xindex(dfa_user_xindex(dfa, i), table_size))
1007 return false;
1008 if (!verify_xindex(dfa_other_xindex(dfa, i), table_size))
1009 return false;
1010 }
1011 return true;
1012}
1013
1014/**
1015 * verify_profile - Do post unpack analysis to verify profile consistency
1016 * @profile: profile to verify (NOT NULL)
1017 *
1018 * Returns: 0 if passes verification else error
1019 */
1020static int verify_profile(struct aa_profile *profile)
1021{
1022 if (profile->file.dfa &&
1023 !verify_dfa_xindex(profile->file.dfa,
1024 profile->file.trans.size)) {
1025 audit_iface(profile, NULL, NULL, "Invalid named transition",
1026 NULL, -EPROTO);
1027 return -EPROTO;
1028 }
1029
1030 return 0;
1031}
1032
1033void aa_load_ent_free(struct aa_load_ent *ent)
1034{
1035 if (ent) {
1036 aa_put_profile(ent->rename);
1037 aa_put_profile(ent->old);
1038 aa_put_profile(ent->new);
1039 kfree(ent->ns_name);
1040 kfree_sensitive(ent);
1041 }
1042}
1043
1044struct aa_load_ent *aa_load_ent_alloc(void)
1045{
1046 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
1047 if (ent)
1048 INIT_LIST_HEAD(&ent->list);
1049 return ent;
1050}
1051
1052static int deflate_compress(const char *src, size_t slen, char **dst,
1053 size_t *dlen)
1054{
1055 int error;
1056 struct z_stream_s strm;
1057 void *stgbuf, *dstbuf;
1058 size_t stglen = deflateBound(slen);
1059
1060 memset(&strm, 0, sizeof(strm));
1061
1062 if (stglen < slen)
1063 return -EFBIG;
1064
1065 strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS,
1066 MAX_MEM_LEVEL),
1067 GFP_KERNEL);
1068 if (!strm.workspace)
1069 return -ENOMEM;
1070
1071 error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level);
1072 if (error != Z_OK) {
1073 error = -ENOMEM;
1074 goto fail_deflate_init;
1075 }
1076
1077 stgbuf = kvzalloc(stglen, GFP_KERNEL);
1078 if (!stgbuf) {
1079 error = -ENOMEM;
1080 goto fail_stg_alloc;
1081 }
1082
1083 strm.next_in = src;
1084 strm.avail_in = slen;
1085 strm.next_out = stgbuf;
1086 strm.avail_out = stglen;
1087
1088 error = zlib_deflate(&strm, Z_FINISH);
1089 if (error != Z_STREAM_END) {
1090 error = -EINVAL;
1091 goto fail_deflate;
1092 }
1093 error = 0;
1094
1095 if (is_vmalloc_addr(stgbuf)) {
1096 dstbuf = kvzalloc(strm.total_out, GFP_KERNEL);
1097 if (dstbuf) {
1098 memcpy(dstbuf, stgbuf, strm.total_out);
1099 kvfree(stgbuf);
1100 }
1101 } else
1102 /*
1103 * If the staging buffer was kmalloc'd, then using krealloc is
1104 * probably going to be faster. The destination buffer will
1105 * always be smaller, so it's just shrunk, avoiding a memcpy
1106 */
1107 dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL);
1108
1109 if (!dstbuf) {
1110 error = -ENOMEM;
1111 goto fail_deflate;
1112 }
1113
1114 *dst = dstbuf;
1115 *dlen = strm.total_out;
1116
1117fail_stg_alloc:
1118 zlib_deflateEnd(&strm);
1119fail_deflate_init:
1120 kvfree(strm.workspace);
1121 return error;
1122
1123fail_deflate:
1124 kvfree(stgbuf);
1125 goto fail_stg_alloc;
1126}
1127
1128static int compress_loaddata(struct aa_loaddata *data)
1129{
1130
1131 AA_BUG(data->compressed_size > 0);
1132
1133 /*
1134 * Shortcut the no compression case, else we increase the amount of
1135 * storage required by a small amount
1136 */
1137 if (aa_g_rawdata_compression_level != 0) {
1138 void *udata = data->data;
1139 int error = deflate_compress(udata, data->size, &data->data,
1140 &data->compressed_size);
1141 if (error)
1142 return error;
1143
1144 kvfree(udata);
1145 } else
1146 data->compressed_size = data->size;
1147
1148 return 0;
1149}
1150
1151/**
1152 * aa_unpack - unpack packed binary profile(s) data loaded from user space
1153 * @udata: user data copied to kmem (NOT NULL)
1154 * @lh: list to place unpacked profiles in a aa_repl_ws
1155 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
1156 *
1157 * Unpack user data and return refcounted allocated profile(s) stored in
1158 * @lh in order of discovery, with the list chain stored in base.list
1159 * or error
1160 *
1161 * Returns: profile(s) on @lh else error pointer if fails to unpack
1162 */
1163int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
1164 const char **ns)
1165{
1166 struct aa_load_ent *tmp, *ent;
1167 struct aa_profile *profile = NULL;
1168 int error;
1169 struct aa_ext e = {
1170 .start = udata->data,
1171 .end = udata->data + udata->size,
1172 .pos = udata->data,
1173 };
1174
1175 *ns = NULL;
1176 while (e.pos < e.end) {
1177 char *ns_name = NULL;
1178 void *start;
1179 error = verify_header(&e, e.pos == e.start, ns);
1180 if (error)
1181 goto fail;
1182
1183 start = e.pos;
1184 profile = unpack_profile(&e, &ns_name);
1185 if (IS_ERR(profile)) {
1186 error = PTR_ERR(profile);
1187 goto fail;
1188 }
1189
1190 error = verify_profile(profile);
1191 if (error)
1192 goto fail_profile;
1193
1194 if (aa_g_hash_policy)
1195 error = aa_calc_profile_hash(profile, e.version, start,
1196 e.pos - start);
1197 if (error)
1198 goto fail_profile;
1199
1200 ent = aa_load_ent_alloc();
1201 if (!ent) {
1202 error = -ENOMEM;
1203 goto fail_profile;
1204 }
1205
1206 ent->new = profile;
1207 ent->ns_name = ns_name;
1208 list_add_tail(&ent->list, lh);
1209 }
1210 udata->abi = e.version & K_ABI_MASK;
1211 if (aa_g_hash_policy) {
1212 udata->hash = aa_calc_hash(udata->data, udata->size);
1213 if (IS_ERR(udata->hash)) {
1214 error = PTR_ERR(udata->hash);
1215 udata->hash = NULL;
1216 goto fail;
1217 }
1218 }
1219 error = compress_loaddata(udata);
1220 if (error)
1221 goto fail;
1222 return 0;
1223
1224fail_profile:
1225 aa_put_profile(profile);
1226
1227fail:
1228 list_for_each_entry_safe(ent, tmp, lh, list) {
1229 list_del_init(&ent->list);
1230 aa_load_ent_free(ent);
1231 }
1232
1233 return error;
1234}
1235
1236#ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST
1237#include "policy_unpack_test.c"
1238#endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */