Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Daemon interface
3 *
4 * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/sched.h>
11#include <linux/completion.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/file.h>
15#include <linux/namei.h>
16#include <linux/poll.h>
17#include <linux/mount.h>
18#include <linux/statfs.h>
19#include <linux/ctype.h>
20#include <linux/string.h>
21#include <linux/fs_struct.h>
22#include "internal.h"
23
24static int cachefiles_daemon_open(struct inode *, struct file *);
25static int cachefiles_daemon_release(struct inode *, struct file *);
26static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
27 loff_t *);
28static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
29 size_t, loff_t *);
30static __poll_t cachefiles_daemon_poll(struct file *,
31 struct poll_table_struct *);
32static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
33static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
34static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
35static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
36static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
37static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
38static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
39static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
40static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
41static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
42static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
43static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
44static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
45static void cachefiles_daemon_unbind(struct cachefiles_cache *);
46
47static unsigned long cachefiles_open;
48
49const struct file_operations cachefiles_daemon_fops = {
50 .owner = THIS_MODULE,
51 .open = cachefiles_daemon_open,
52 .release = cachefiles_daemon_release,
53 .read = cachefiles_daemon_read,
54 .write = cachefiles_daemon_write,
55 .poll = cachefiles_daemon_poll,
56 .llseek = noop_llseek,
57};
58
59struct cachefiles_daemon_cmd {
60 char name[8];
61 int (*handler)(struct cachefiles_cache *cache, char *args);
62};
63
64static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
65 { "bind", cachefiles_daemon_bind },
66 { "brun", cachefiles_daemon_brun },
67 { "bcull", cachefiles_daemon_bcull },
68 { "bstop", cachefiles_daemon_bstop },
69 { "cull", cachefiles_daemon_cull },
70 { "debug", cachefiles_daemon_debug },
71 { "dir", cachefiles_daemon_dir },
72 { "frun", cachefiles_daemon_frun },
73 { "fcull", cachefiles_daemon_fcull },
74 { "fstop", cachefiles_daemon_fstop },
75 { "inuse", cachefiles_daemon_inuse },
76 { "secctx", cachefiles_daemon_secctx },
77 { "tag", cachefiles_daemon_tag },
78#ifdef CONFIG_CACHEFILES_ONDEMAND
79 { "copen", cachefiles_ondemand_copen },
80 { "restore", cachefiles_ondemand_restore },
81#endif
82 { "", NULL }
83};
84
85
86/*
87 * Prepare a cache for caching.
88 */
89static int cachefiles_daemon_open(struct inode *inode, struct file *file)
90{
91 struct cachefiles_cache *cache;
92
93 _enter("");
94
95 /* only the superuser may do this */
96 if (!capable(CAP_SYS_ADMIN))
97 return -EPERM;
98
99 /* the cachefiles device may only be open once at a time */
100 if (xchg(&cachefiles_open, 1) == 1)
101 return -EBUSY;
102
103 /* allocate a cache record */
104 cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL);
105 if (!cache) {
106 cachefiles_open = 0;
107 return -ENOMEM;
108 }
109
110 mutex_init(&cache->daemon_mutex);
111 init_waitqueue_head(&cache->daemon_pollwq);
112 INIT_LIST_HEAD(&cache->volumes);
113 INIT_LIST_HEAD(&cache->object_list);
114 spin_lock_init(&cache->object_list_lock);
115 refcount_set(&cache->unbind_pincount, 1);
116 xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC);
117 xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1);
118
119 /* set default caching limits
120 * - limit at 1% free space and/or free files
121 * - cull below 5% free space and/or free files
122 * - cease culling above 7% free space and/or free files
123 */
124 cache->frun_percent = 7;
125 cache->fcull_percent = 5;
126 cache->fstop_percent = 1;
127 cache->brun_percent = 7;
128 cache->bcull_percent = 5;
129 cache->bstop_percent = 1;
130
131 file->private_data = cache;
132 cache->cachefilesd = file;
133 return 0;
134}
135
136static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
137{
138 struct xarray *xa = &cache->reqs;
139 struct cachefiles_req *req;
140 unsigned long index;
141
142 /*
143 * Make sure the following two operations won't be reordered.
144 * 1) set CACHEFILES_DEAD bit
145 * 2) flush requests in the xarray
146 * Otherwise the request may be enqueued after xarray has been
147 * flushed, leaving the orphan request never being completed.
148 *
149 * CPU 1 CPU 2
150 * ===== =====
151 * flush requests in the xarray
152 * test CACHEFILES_DEAD bit
153 * enqueue the request
154 * set CACHEFILES_DEAD bit
155 */
156 smp_mb();
157
158 xa_lock(xa);
159 xa_for_each(xa, index, req) {
160 req->error = -EIO;
161 complete(&req->done);
162 }
163 xa_unlock(xa);
164
165 xa_destroy(&cache->reqs);
166 xa_destroy(&cache->ondemand_ids);
167}
168
169void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
170{
171 if (refcount_dec_and_test(&cache->unbind_pincount)) {
172 cachefiles_daemon_unbind(cache);
173 cachefiles_open = 0;
174 kfree(cache);
175 }
176}
177
178void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
179{
180 refcount_inc(&cache->unbind_pincount);
181}
182
183/*
184 * Release a cache.
185 */
186static int cachefiles_daemon_release(struct inode *inode, struct file *file)
187{
188 struct cachefiles_cache *cache = file->private_data;
189
190 _enter("");
191
192 ASSERT(cache);
193
194 set_bit(CACHEFILES_DEAD, &cache->flags);
195
196 if (cachefiles_in_ondemand_mode(cache))
197 cachefiles_flush_reqs(cache);
198
199 /* clean up the control file interface */
200 cache->cachefilesd = NULL;
201 file->private_data = NULL;
202
203 cachefiles_put_unbind_pincount(cache);
204
205 _leave("");
206 return 0;
207}
208
209static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
210 char __user *_buffer, size_t buflen)
211{
212 unsigned long long b_released;
213 unsigned f_released;
214 char buffer[256];
215 int n;
216
217 /* check how much space the cache has */
218 cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
219
220 /* summarise */
221 f_released = atomic_xchg(&cache->f_released, 0);
222 b_released = atomic_long_xchg(&cache->b_released, 0);
223 clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
224
225 n = snprintf(buffer, sizeof(buffer),
226 "cull=%c"
227 " frun=%llx"
228 " fcull=%llx"
229 " fstop=%llx"
230 " brun=%llx"
231 " bcull=%llx"
232 " bstop=%llx"
233 " freleased=%x"
234 " breleased=%llx",
235 test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
236 (unsigned long long) cache->frun,
237 (unsigned long long) cache->fcull,
238 (unsigned long long) cache->fstop,
239 (unsigned long long) cache->brun,
240 (unsigned long long) cache->bcull,
241 (unsigned long long) cache->bstop,
242 f_released,
243 b_released);
244
245 if (n > buflen)
246 return -EMSGSIZE;
247
248 if (copy_to_user(_buffer, buffer, n) != 0)
249 return -EFAULT;
250
251 return n;
252}
253
254/*
255 * Read the cache state.
256 */
257static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
258 size_t buflen, loff_t *pos)
259{
260 struct cachefiles_cache *cache = file->private_data;
261
262 //_enter(",,%zu,", buflen);
263
264 if (!test_bit(CACHEFILES_READY, &cache->flags))
265 return 0;
266
267 if (cachefiles_in_ondemand_mode(cache))
268 return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
269 else
270 return cachefiles_do_daemon_read(cache, _buffer, buflen);
271}
272
273/*
274 * Take a command from cachefilesd, parse it and act on it.
275 */
276static ssize_t cachefiles_daemon_write(struct file *file,
277 const char __user *_data,
278 size_t datalen,
279 loff_t *pos)
280{
281 const struct cachefiles_daemon_cmd *cmd;
282 struct cachefiles_cache *cache = file->private_data;
283 ssize_t ret;
284 char *data, *args, *cp;
285
286 //_enter(",,%zu,", datalen);
287
288 ASSERT(cache);
289
290 if (test_bit(CACHEFILES_DEAD, &cache->flags))
291 return -EIO;
292
293 if (datalen > PAGE_SIZE - 1)
294 return -EOPNOTSUPP;
295
296 /* drag the command string into the kernel so we can parse it */
297 data = memdup_user_nul(_data, datalen);
298 if (IS_ERR(data))
299 return PTR_ERR(data);
300
301 ret = -EINVAL;
302 if (memchr(data, '\0', datalen))
303 goto error;
304
305 /* strip any newline */
306 cp = memchr(data, '\n', datalen);
307 if (cp) {
308 if (cp == data)
309 goto error;
310
311 *cp = '\0';
312 }
313
314 /* parse the command */
315 ret = -EOPNOTSUPP;
316
317 for (args = data; *args; args++)
318 if (isspace(*args))
319 break;
320 if (*args) {
321 if (args == data)
322 goto error;
323 *args = '\0';
324 args = skip_spaces(++args);
325 }
326
327 /* run the appropriate command handler */
328 for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
329 if (strcmp(cmd->name, data) == 0)
330 goto found_command;
331
332error:
333 kfree(data);
334 //_leave(" = %zd", ret);
335 return ret;
336
337found_command:
338 mutex_lock(&cache->daemon_mutex);
339
340 ret = -EIO;
341 if (!test_bit(CACHEFILES_DEAD, &cache->flags))
342 ret = cmd->handler(cache, args);
343
344 mutex_unlock(&cache->daemon_mutex);
345
346 if (ret == 0)
347 ret = datalen;
348 goto error;
349}
350
351/*
352 * Poll for culling state
353 * - use EPOLLOUT to indicate culling state
354 */
355static __poll_t cachefiles_daemon_poll(struct file *file,
356 struct poll_table_struct *poll)
357{
358 struct cachefiles_cache *cache = file->private_data;
359 XA_STATE(xas, &cache->reqs, 0);
360 struct cachefiles_req *req;
361 __poll_t mask;
362
363 poll_wait(file, &cache->daemon_pollwq, poll);
364 mask = 0;
365
366 if (cachefiles_in_ondemand_mode(cache)) {
367 if (!xa_empty(&cache->reqs)) {
368 rcu_read_lock();
369 xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
370 if (!cachefiles_ondemand_is_reopening_read(req)) {
371 mask |= EPOLLIN;
372 break;
373 }
374 }
375 rcu_read_unlock();
376 }
377 } else {
378 if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
379 mask |= EPOLLIN;
380 }
381
382 if (test_bit(CACHEFILES_CULLING, &cache->flags))
383 mask |= EPOLLOUT;
384
385 return mask;
386}
387
388/*
389 * Give a range error for cache space constraints
390 * - can be tail-called
391 */
392static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
393 char *args)
394{
395 pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
396
397 return -EINVAL;
398}
399
400/*
401 * Set the percentage of files at which to stop culling
402 * - command: "frun <N>%"
403 */
404static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
405{
406 unsigned long frun;
407
408 _enter(",%s", args);
409
410 if (!*args)
411 return -EINVAL;
412
413 frun = simple_strtoul(args, &args, 10);
414 if (args[0] != '%' || args[1] != '\0')
415 return -EINVAL;
416
417 if (frun <= cache->fcull_percent || frun >= 100)
418 return cachefiles_daemon_range_error(cache, args);
419
420 cache->frun_percent = frun;
421 return 0;
422}
423
424/*
425 * Set the percentage of files at which to start culling
426 * - command: "fcull <N>%"
427 */
428static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
429{
430 unsigned long fcull;
431
432 _enter(",%s", args);
433
434 if (!*args)
435 return -EINVAL;
436
437 fcull = simple_strtoul(args, &args, 10);
438 if (args[0] != '%' || args[1] != '\0')
439 return -EINVAL;
440
441 if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
442 return cachefiles_daemon_range_error(cache, args);
443
444 cache->fcull_percent = fcull;
445 return 0;
446}
447
448/*
449 * Set the percentage of files at which to stop allocating
450 * - command: "fstop <N>%"
451 */
452static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
453{
454 unsigned long fstop;
455
456 _enter(",%s", args);
457
458 if (!*args)
459 return -EINVAL;
460
461 fstop = simple_strtoul(args, &args, 10);
462 if (args[0] != '%' || args[1] != '\0')
463 return -EINVAL;
464
465 if (fstop >= cache->fcull_percent)
466 return cachefiles_daemon_range_error(cache, args);
467
468 cache->fstop_percent = fstop;
469 return 0;
470}
471
472/*
473 * Set the percentage of blocks at which to stop culling
474 * - command: "brun <N>%"
475 */
476static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
477{
478 unsigned long brun;
479
480 _enter(",%s", args);
481
482 if (!*args)
483 return -EINVAL;
484
485 brun = simple_strtoul(args, &args, 10);
486 if (args[0] != '%' || args[1] != '\0')
487 return -EINVAL;
488
489 if (brun <= cache->bcull_percent || brun >= 100)
490 return cachefiles_daemon_range_error(cache, args);
491
492 cache->brun_percent = brun;
493 return 0;
494}
495
496/*
497 * Set the percentage of blocks at which to start culling
498 * - command: "bcull <N>%"
499 */
500static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
501{
502 unsigned long bcull;
503
504 _enter(",%s", args);
505
506 if (!*args)
507 return -EINVAL;
508
509 bcull = simple_strtoul(args, &args, 10);
510 if (args[0] != '%' || args[1] != '\0')
511 return -EINVAL;
512
513 if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
514 return cachefiles_daemon_range_error(cache, args);
515
516 cache->bcull_percent = bcull;
517 return 0;
518}
519
520/*
521 * Set the percentage of blocks at which to stop allocating
522 * - command: "bstop <N>%"
523 */
524static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
525{
526 unsigned long bstop;
527
528 _enter(",%s", args);
529
530 if (!*args)
531 return -EINVAL;
532
533 bstop = simple_strtoul(args, &args, 10);
534 if (args[0] != '%' || args[1] != '\0')
535 return -EINVAL;
536
537 if (bstop >= cache->bcull_percent)
538 return cachefiles_daemon_range_error(cache, args);
539
540 cache->bstop_percent = bstop;
541 return 0;
542}
543
544/*
545 * Set the cache directory
546 * - command: "dir <name>"
547 */
548static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
549{
550 char *dir;
551
552 _enter(",%s", args);
553
554 if (!*args) {
555 pr_err("Empty directory specified\n");
556 return -EINVAL;
557 }
558
559 if (cache->rootdirname) {
560 pr_err("Second cache directory specified\n");
561 return -EEXIST;
562 }
563
564 dir = kstrdup(args, GFP_KERNEL);
565 if (!dir)
566 return -ENOMEM;
567
568 cache->rootdirname = dir;
569 return 0;
570}
571
572/*
573 * Set the cache security context
574 * - command: "secctx <ctx>"
575 */
576static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
577{
578 char *secctx;
579
580 _enter(",%s", args);
581
582 if (!*args) {
583 pr_err("Empty security context specified\n");
584 return -EINVAL;
585 }
586
587 if (cache->secctx) {
588 pr_err("Second security context specified\n");
589 return -EINVAL;
590 }
591
592 secctx = kstrdup(args, GFP_KERNEL);
593 if (!secctx)
594 return -ENOMEM;
595
596 cache->secctx = secctx;
597 return 0;
598}
599
600/*
601 * Set the cache tag
602 * - command: "tag <name>"
603 */
604static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
605{
606 char *tag;
607
608 _enter(",%s", args);
609
610 if (!*args) {
611 pr_err("Empty tag specified\n");
612 return -EINVAL;
613 }
614
615 if (cache->tag)
616 return -EEXIST;
617
618 tag = kstrdup(args, GFP_KERNEL);
619 if (!tag)
620 return -ENOMEM;
621
622 cache->tag = tag;
623 return 0;
624}
625
626/*
627 * Request a node in the cache be culled from the current working directory
628 * - command: "cull <name>"
629 */
630static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
631{
632 struct path path;
633 const struct cred *saved_cred;
634 int ret;
635
636 _enter(",%s", args);
637
638 if (strchr(args, '/'))
639 goto inval;
640
641 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
642 pr_err("cull applied to unready cache\n");
643 return -EIO;
644 }
645
646 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
647 pr_err("cull applied to dead cache\n");
648 return -EIO;
649 }
650
651 get_fs_pwd(current->fs, &path);
652
653 if (!d_can_lookup(path.dentry))
654 goto notdir;
655
656 cachefiles_begin_secure(cache, &saved_cred);
657 ret = cachefiles_cull(cache, path.dentry, args);
658 cachefiles_end_secure(cache, saved_cred);
659
660 path_put(&path);
661 _leave(" = %d", ret);
662 return ret;
663
664notdir:
665 path_put(&path);
666 pr_err("cull command requires dirfd to be a directory\n");
667 return -ENOTDIR;
668
669inval:
670 pr_err("cull command requires dirfd and filename\n");
671 return -EINVAL;
672}
673
674/*
675 * Set debugging mode
676 * - command: "debug <mask>"
677 */
678static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
679{
680 unsigned long mask;
681
682 _enter(",%s", args);
683
684 mask = simple_strtoul(args, &args, 0);
685 if (args[0] != '\0')
686 goto inval;
687
688 cachefiles_debug = mask;
689 _leave(" = 0");
690 return 0;
691
692inval:
693 pr_err("debug command requires mask\n");
694 return -EINVAL;
695}
696
697/*
698 * Find out whether an object in the current working directory is in use or not
699 * - command: "inuse <name>"
700 */
701static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
702{
703 struct path path;
704 const struct cred *saved_cred;
705 int ret;
706
707 //_enter(",%s", args);
708
709 if (strchr(args, '/'))
710 goto inval;
711
712 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
713 pr_err("inuse applied to unready cache\n");
714 return -EIO;
715 }
716
717 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
718 pr_err("inuse applied to dead cache\n");
719 return -EIO;
720 }
721
722 get_fs_pwd(current->fs, &path);
723
724 if (!d_can_lookup(path.dentry))
725 goto notdir;
726
727 cachefiles_begin_secure(cache, &saved_cred);
728 ret = cachefiles_check_in_use(cache, path.dentry, args);
729 cachefiles_end_secure(cache, saved_cred);
730
731 path_put(&path);
732 //_leave(" = %d", ret);
733 return ret;
734
735notdir:
736 path_put(&path);
737 pr_err("inuse command requires dirfd to be a directory\n");
738 return -ENOTDIR;
739
740inval:
741 pr_err("inuse command requires dirfd and filename\n");
742 return -EINVAL;
743}
744
745/*
746 * Bind a directory as a cache
747 */
748static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
749{
750 _enter("{%u,%u,%u,%u,%u,%u},%s",
751 cache->frun_percent,
752 cache->fcull_percent,
753 cache->fstop_percent,
754 cache->brun_percent,
755 cache->bcull_percent,
756 cache->bstop_percent,
757 args);
758
759 if (cache->fstop_percent >= cache->fcull_percent ||
760 cache->fcull_percent >= cache->frun_percent ||
761 cache->frun_percent >= 100)
762 return -ERANGE;
763
764 if (cache->bstop_percent >= cache->bcull_percent ||
765 cache->bcull_percent >= cache->brun_percent ||
766 cache->brun_percent >= 100)
767 return -ERANGE;
768
769 if (!cache->rootdirname) {
770 pr_err("No cache directory specified\n");
771 return -EINVAL;
772 }
773
774 /* Don't permit already bound caches to be re-bound */
775 if (test_bit(CACHEFILES_READY, &cache->flags)) {
776 pr_err("Cache already bound\n");
777 return -EBUSY;
778 }
779
780 if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
781 if (!strcmp(args, "ondemand")) {
782 set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
783 } else if (*args) {
784 pr_err("Invalid argument to the 'bind' command\n");
785 return -EINVAL;
786 }
787 } else if (*args) {
788 pr_err("'bind' command doesn't take an argument\n");
789 return -EINVAL;
790 }
791
792 /* Make sure we have copies of the tag string */
793 if (!cache->tag) {
794 /*
795 * The tag string is released by the fops->release()
796 * function, so we don't release it on error here
797 */
798 cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
799 if (!cache->tag)
800 return -ENOMEM;
801 }
802
803 return cachefiles_add_cache(cache);
804}
805
806/*
807 * Unbind a cache.
808 */
809static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
810{
811 _enter("");
812
813 if (test_bit(CACHEFILES_READY, &cache->flags))
814 cachefiles_withdraw_cache(cache);
815
816 cachefiles_put_directory(cache->graveyard);
817 cachefiles_put_directory(cache->store);
818 mntput(cache->mnt);
819 put_cred(cache->cache_cred);
820
821 kfree(cache->rootdirname);
822 kfree(cache->secctx);
823 kfree(cache->tag);
824
825 _leave("");
826}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Daemon interface
3 *
4 * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/sched.h>
11#include <linux/completion.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/file.h>
15#include <linux/namei.h>
16#include <linux/poll.h>
17#include <linux/mount.h>
18#include <linux/security.h>
19#include <linux/statfs.h>
20#include <linux/ctype.h>
21#include <linux/string.h>
22#include <linux/fs_struct.h>
23#include "internal.h"
24
25static int cachefiles_daemon_open(struct inode *, struct file *);
26static int cachefiles_daemon_release(struct inode *, struct file *);
27static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
28 loff_t *);
29static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
30 size_t, loff_t *);
31static __poll_t cachefiles_daemon_poll(struct file *,
32 struct poll_table_struct *);
33static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
34static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
35static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
36static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
37static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
38static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
39static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
40static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
41static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
42static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
43static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
44static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
45static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
46static void cachefiles_daemon_unbind(struct cachefiles_cache *);
47
48static unsigned long cachefiles_open;
49
50const struct file_operations cachefiles_daemon_fops = {
51 .owner = THIS_MODULE,
52 .open = cachefiles_daemon_open,
53 .release = cachefiles_daemon_release,
54 .read = cachefiles_daemon_read,
55 .write = cachefiles_daemon_write,
56 .poll = cachefiles_daemon_poll,
57 .llseek = noop_llseek,
58};
59
60struct cachefiles_daemon_cmd {
61 char name[8];
62 int (*handler)(struct cachefiles_cache *cache, char *args);
63};
64
65static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
66 { "bind", cachefiles_daemon_bind },
67 { "brun", cachefiles_daemon_brun },
68 { "bcull", cachefiles_daemon_bcull },
69 { "bstop", cachefiles_daemon_bstop },
70 { "cull", cachefiles_daemon_cull },
71 { "debug", cachefiles_daemon_debug },
72 { "dir", cachefiles_daemon_dir },
73 { "frun", cachefiles_daemon_frun },
74 { "fcull", cachefiles_daemon_fcull },
75 { "fstop", cachefiles_daemon_fstop },
76 { "inuse", cachefiles_daemon_inuse },
77 { "secctx", cachefiles_daemon_secctx },
78 { "tag", cachefiles_daemon_tag },
79#ifdef CONFIG_CACHEFILES_ONDEMAND
80 { "copen", cachefiles_ondemand_copen },
81 { "restore", cachefiles_ondemand_restore },
82#endif
83 { "", NULL }
84};
85
86
87/*
88 * Prepare a cache for caching.
89 */
90static int cachefiles_daemon_open(struct inode *inode, struct file *file)
91{
92 struct cachefiles_cache *cache;
93
94 _enter("");
95
96 /* only the superuser may do this */
97 if (!capable(CAP_SYS_ADMIN))
98 return -EPERM;
99
100 /* the cachefiles device may only be open once at a time */
101 if (xchg(&cachefiles_open, 1) == 1)
102 return -EBUSY;
103
104 /* allocate a cache record */
105 cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL);
106 if (!cache) {
107 cachefiles_open = 0;
108 return -ENOMEM;
109 }
110
111 mutex_init(&cache->daemon_mutex);
112 init_waitqueue_head(&cache->daemon_pollwq);
113 INIT_LIST_HEAD(&cache->volumes);
114 INIT_LIST_HEAD(&cache->object_list);
115 spin_lock_init(&cache->object_list_lock);
116 refcount_set(&cache->unbind_pincount, 1);
117 xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC);
118 xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1);
119
120 /* set default caching limits
121 * - limit at 1% free space and/or free files
122 * - cull below 5% free space and/or free files
123 * - cease culling above 7% free space and/or free files
124 */
125 cache->frun_percent = 7;
126 cache->fcull_percent = 5;
127 cache->fstop_percent = 1;
128 cache->brun_percent = 7;
129 cache->bcull_percent = 5;
130 cache->bstop_percent = 1;
131
132 file->private_data = cache;
133 cache->cachefilesd = file;
134 return 0;
135}
136
137void cachefiles_flush_reqs(struct cachefiles_cache *cache)
138{
139 struct xarray *xa = &cache->reqs;
140 struct cachefiles_req *req;
141 unsigned long index;
142
143 /*
144 * Make sure the following two operations won't be reordered.
145 * 1) set CACHEFILES_DEAD bit
146 * 2) flush requests in the xarray
147 * Otherwise the request may be enqueued after xarray has been
148 * flushed, leaving the orphan request never being completed.
149 *
150 * CPU 1 CPU 2
151 * ===== =====
152 * flush requests in the xarray
153 * test CACHEFILES_DEAD bit
154 * enqueue the request
155 * set CACHEFILES_DEAD bit
156 */
157 smp_mb();
158
159 xa_lock(xa);
160 xa_for_each(xa, index, req) {
161 req->error = -EIO;
162 complete(&req->done);
163 __xa_erase(xa, index);
164 }
165 xa_unlock(xa);
166
167 xa_destroy(&cache->reqs);
168 xa_destroy(&cache->ondemand_ids);
169}
170
171void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
172{
173 if (refcount_dec_and_test(&cache->unbind_pincount)) {
174 cachefiles_daemon_unbind(cache);
175 cachefiles_open = 0;
176 kfree(cache);
177 }
178}
179
180void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
181{
182 refcount_inc(&cache->unbind_pincount);
183}
184
185/*
186 * Release a cache.
187 */
188static int cachefiles_daemon_release(struct inode *inode, struct file *file)
189{
190 struct cachefiles_cache *cache = file->private_data;
191
192 _enter("");
193
194 ASSERT(cache);
195
196 set_bit(CACHEFILES_DEAD, &cache->flags);
197
198 if (cachefiles_in_ondemand_mode(cache))
199 cachefiles_flush_reqs(cache);
200
201 /* clean up the control file interface */
202 cache->cachefilesd = NULL;
203 file->private_data = NULL;
204
205 cachefiles_put_unbind_pincount(cache);
206
207 _leave("");
208 return 0;
209}
210
211static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
212 char __user *_buffer, size_t buflen)
213{
214 unsigned long long b_released;
215 unsigned f_released;
216 char buffer[256];
217 int n;
218
219 /* check how much space the cache has */
220 cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
221
222 /* summarise */
223 f_released = atomic_xchg(&cache->f_released, 0);
224 b_released = atomic_long_xchg(&cache->b_released, 0);
225 clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
226
227 n = snprintf(buffer, sizeof(buffer),
228 "cull=%c"
229 " frun=%llx"
230 " fcull=%llx"
231 " fstop=%llx"
232 " brun=%llx"
233 " bcull=%llx"
234 " bstop=%llx"
235 " freleased=%x"
236 " breleased=%llx",
237 test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
238 (unsigned long long) cache->frun,
239 (unsigned long long) cache->fcull,
240 (unsigned long long) cache->fstop,
241 (unsigned long long) cache->brun,
242 (unsigned long long) cache->bcull,
243 (unsigned long long) cache->bstop,
244 f_released,
245 b_released);
246
247 if (n > buflen)
248 return -EMSGSIZE;
249
250 if (copy_to_user(_buffer, buffer, n) != 0)
251 return -EFAULT;
252
253 return n;
254}
255
256/*
257 * Read the cache state.
258 */
259static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
260 size_t buflen, loff_t *pos)
261{
262 struct cachefiles_cache *cache = file->private_data;
263
264 //_enter(",,%zu,", buflen);
265
266 if (!test_bit(CACHEFILES_READY, &cache->flags))
267 return 0;
268
269 if (cachefiles_in_ondemand_mode(cache))
270 return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
271 else
272 return cachefiles_do_daemon_read(cache, _buffer, buflen);
273}
274
275/*
276 * Take a command from cachefilesd, parse it and act on it.
277 */
278static ssize_t cachefiles_daemon_write(struct file *file,
279 const char __user *_data,
280 size_t datalen,
281 loff_t *pos)
282{
283 const struct cachefiles_daemon_cmd *cmd;
284 struct cachefiles_cache *cache = file->private_data;
285 ssize_t ret;
286 char *data, *args, *cp;
287
288 //_enter(",,%zu,", datalen);
289
290 ASSERT(cache);
291
292 if (test_bit(CACHEFILES_DEAD, &cache->flags))
293 return -EIO;
294
295 if (datalen > PAGE_SIZE - 1)
296 return -EOPNOTSUPP;
297
298 /* drag the command string into the kernel so we can parse it */
299 data = memdup_user_nul(_data, datalen);
300 if (IS_ERR(data))
301 return PTR_ERR(data);
302
303 ret = -EINVAL;
304 if (memchr(data, '\0', datalen))
305 goto error;
306
307 /* strip any newline */
308 cp = memchr(data, '\n', datalen);
309 if (cp) {
310 if (cp == data)
311 goto error;
312
313 *cp = '\0';
314 }
315
316 /* parse the command */
317 ret = -EOPNOTSUPP;
318
319 for (args = data; *args; args++)
320 if (isspace(*args))
321 break;
322 if (*args) {
323 if (args == data)
324 goto error;
325 *args = '\0';
326 args = skip_spaces(++args);
327 }
328
329 /* run the appropriate command handler */
330 for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
331 if (strcmp(cmd->name, data) == 0)
332 goto found_command;
333
334error:
335 kfree(data);
336 //_leave(" = %zd", ret);
337 return ret;
338
339found_command:
340 mutex_lock(&cache->daemon_mutex);
341
342 ret = -EIO;
343 if (!test_bit(CACHEFILES_DEAD, &cache->flags))
344 ret = cmd->handler(cache, args);
345
346 mutex_unlock(&cache->daemon_mutex);
347
348 if (ret == 0)
349 ret = datalen;
350 goto error;
351}
352
353/*
354 * Poll for culling state
355 * - use EPOLLOUT to indicate culling state
356 */
357static __poll_t cachefiles_daemon_poll(struct file *file,
358 struct poll_table_struct *poll)
359{
360 struct cachefiles_cache *cache = file->private_data;
361 XA_STATE(xas, &cache->reqs, 0);
362 struct cachefiles_req *req;
363 __poll_t mask;
364
365 poll_wait(file, &cache->daemon_pollwq, poll);
366 mask = 0;
367
368 if (cachefiles_in_ondemand_mode(cache)) {
369 if (!xa_empty(&cache->reqs)) {
370 xas_lock(&xas);
371 xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
372 if (!cachefiles_ondemand_is_reopening_read(req)) {
373 mask |= EPOLLIN;
374 break;
375 }
376 }
377 xas_unlock(&xas);
378 }
379 } else {
380 if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
381 mask |= EPOLLIN;
382 }
383
384 if (test_bit(CACHEFILES_CULLING, &cache->flags))
385 mask |= EPOLLOUT;
386
387 return mask;
388}
389
390/*
391 * Give a range error for cache space constraints
392 * - can be tail-called
393 */
394static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
395 char *args)
396{
397 pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
398
399 return -EINVAL;
400}
401
402/*
403 * Set the percentage of files at which to stop culling
404 * - command: "frun <N>%"
405 */
406static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
407{
408 unsigned long frun;
409
410 _enter(",%s", args);
411
412 if (!*args)
413 return -EINVAL;
414
415 frun = simple_strtoul(args, &args, 10);
416 if (args[0] != '%' || args[1] != '\0')
417 return -EINVAL;
418
419 if (frun <= cache->fcull_percent || frun >= 100)
420 return cachefiles_daemon_range_error(cache, args);
421
422 cache->frun_percent = frun;
423 return 0;
424}
425
426/*
427 * Set the percentage of files at which to start culling
428 * - command: "fcull <N>%"
429 */
430static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
431{
432 unsigned long fcull;
433
434 _enter(",%s", args);
435
436 if (!*args)
437 return -EINVAL;
438
439 fcull = simple_strtoul(args, &args, 10);
440 if (args[0] != '%' || args[1] != '\0')
441 return -EINVAL;
442
443 if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
444 return cachefiles_daemon_range_error(cache, args);
445
446 cache->fcull_percent = fcull;
447 return 0;
448}
449
450/*
451 * Set the percentage of files at which to stop allocating
452 * - command: "fstop <N>%"
453 */
454static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
455{
456 unsigned long fstop;
457
458 _enter(",%s", args);
459
460 if (!*args)
461 return -EINVAL;
462
463 fstop = simple_strtoul(args, &args, 10);
464 if (args[0] != '%' || args[1] != '\0')
465 return -EINVAL;
466
467 if (fstop >= cache->fcull_percent)
468 return cachefiles_daemon_range_error(cache, args);
469
470 cache->fstop_percent = fstop;
471 return 0;
472}
473
474/*
475 * Set the percentage of blocks at which to stop culling
476 * - command: "brun <N>%"
477 */
478static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
479{
480 unsigned long brun;
481
482 _enter(",%s", args);
483
484 if (!*args)
485 return -EINVAL;
486
487 brun = simple_strtoul(args, &args, 10);
488 if (args[0] != '%' || args[1] != '\0')
489 return -EINVAL;
490
491 if (brun <= cache->bcull_percent || brun >= 100)
492 return cachefiles_daemon_range_error(cache, args);
493
494 cache->brun_percent = brun;
495 return 0;
496}
497
498/*
499 * Set the percentage of blocks at which to start culling
500 * - command: "bcull <N>%"
501 */
502static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
503{
504 unsigned long bcull;
505
506 _enter(",%s", args);
507
508 if (!*args)
509 return -EINVAL;
510
511 bcull = simple_strtoul(args, &args, 10);
512 if (args[0] != '%' || args[1] != '\0')
513 return -EINVAL;
514
515 if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
516 return cachefiles_daemon_range_error(cache, args);
517
518 cache->bcull_percent = bcull;
519 return 0;
520}
521
522/*
523 * Set the percentage of blocks at which to stop allocating
524 * - command: "bstop <N>%"
525 */
526static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
527{
528 unsigned long bstop;
529
530 _enter(",%s", args);
531
532 if (!*args)
533 return -EINVAL;
534
535 bstop = simple_strtoul(args, &args, 10);
536 if (args[0] != '%' || args[1] != '\0')
537 return -EINVAL;
538
539 if (bstop >= cache->bcull_percent)
540 return cachefiles_daemon_range_error(cache, args);
541
542 cache->bstop_percent = bstop;
543 return 0;
544}
545
546/*
547 * Set the cache directory
548 * - command: "dir <name>"
549 */
550static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
551{
552 char *dir;
553
554 _enter(",%s", args);
555
556 if (!*args) {
557 pr_err("Empty directory specified\n");
558 return -EINVAL;
559 }
560
561 if (cache->rootdirname) {
562 pr_err("Second cache directory specified\n");
563 return -EEXIST;
564 }
565
566 dir = kstrdup(args, GFP_KERNEL);
567 if (!dir)
568 return -ENOMEM;
569
570 cache->rootdirname = dir;
571 return 0;
572}
573
574/*
575 * Set the cache security context
576 * - command: "secctx <ctx>"
577 */
578static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
579{
580 int err;
581
582 _enter(",%s", args);
583
584 if (!*args) {
585 pr_err("Empty security context specified\n");
586 return -EINVAL;
587 }
588
589 if (cache->have_secid) {
590 pr_err("Second security context specified\n");
591 return -EINVAL;
592 }
593
594 err = security_secctx_to_secid(args, strlen(args), &cache->secid);
595 if (err)
596 return err;
597
598 cache->have_secid = true;
599 return 0;
600}
601
602/*
603 * Set the cache tag
604 * - command: "tag <name>"
605 */
606static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
607{
608 char *tag;
609
610 _enter(",%s", args);
611
612 if (!*args) {
613 pr_err("Empty tag specified\n");
614 return -EINVAL;
615 }
616
617 if (cache->tag)
618 return -EEXIST;
619
620 tag = kstrdup(args, GFP_KERNEL);
621 if (!tag)
622 return -ENOMEM;
623
624 cache->tag = tag;
625 return 0;
626}
627
628/*
629 * Request a node in the cache be culled from the current working directory
630 * - command: "cull <name>"
631 */
632static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
633{
634 struct path path;
635 const struct cred *saved_cred;
636 int ret;
637
638 _enter(",%s", args);
639
640 if (strchr(args, '/'))
641 goto inval;
642
643 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
644 pr_err("cull applied to unready cache\n");
645 return -EIO;
646 }
647
648 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
649 pr_err("cull applied to dead cache\n");
650 return -EIO;
651 }
652
653 get_fs_pwd(current->fs, &path);
654
655 if (!d_can_lookup(path.dentry))
656 goto notdir;
657
658 cachefiles_begin_secure(cache, &saved_cred);
659 ret = cachefiles_cull(cache, path.dentry, args);
660 cachefiles_end_secure(cache, saved_cred);
661
662 path_put(&path);
663 _leave(" = %d", ret);
664 return ret;
665
666notdir:
667 path_put(&path);
668 pr_err("cull command requires dirfd to be a directory\n");
669 return -ENOTDIR;
670
671inval:
672 pr_err("cull command requires dirfd and filename\n");
673 return -EINVAL;
674}
675
676/*
677 * Set debugging mode
678 * - command: "debug <mask>"
679 */
680static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
681{
682 unsigned long mask;
683
684 _enter(",%s", args);
685
686 mask = simple_strtoul(args, &args, 0);
687 if (args[0] != '\0')
688 goto inval;
689
690 cachefiles_debug = mask;
691 _leave(" = 0");
692 return 0;
693
694inval:
695 pr_err("debug command requires mask\n");
696 return -EINVAL;
697}
698
699/*
700 * Find out whether an object in the current working directory is in use or not
701 * - command: "inuse <name>"
702 */
703static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
704{
705 struct path path;
706 const struct cred *saved_cred;
707 int ret;
708
709 //_enter(",%s", args);
710
711 if (strchr(args, '/'))
712 goto inval;
713
714 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
715 pr_err("inuse applied to unready cache\n");
716 return -EIO;
717 }
718
719 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
720 pr_err("inuse applied to dead cache\n");
721 return -EIO;
722 }
723
724 get_fs_pwd(current->fs, &path);
725
726 if (!d_can_lookup(path.dentry))
727 goto notdir;
728
729 cachefiles_begin_secure(cache, &saved_cred);
730 ret = cachefiles_check_in_use(cache, path.dentry, args);
731 cachefiles_end_secure(cache, saved_cred);
732
733 path_put(&path);
734 //_leave(" = %d", ret);
735 return ret;
736
737notdir:
738 path_put(&path);
739 pr_err("inuse command requires dirfd to be a directory\n");
740 return -ENOTDIR;
741
742inval:
743 pr_err("inuse command requires dirfd and filename\n");
744 return -EINVAL;
745}
746
747/*
748 * Bind a directory as a cache
749 */
750static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
751{
752 _enter("{%u,%u,%u,%u,%u,%u},%s",
753 cache->frun_percent,
754 cache->fcull_percent,
755 cache->fstop_percent,
756 cache->brun_percent,
757 cache->bcull_percent,
758 cache->bstop_percent,
759 args);
760
761 if (cache->fstop_percent >= cache->fcull_percent ||
762 cache->fcull_percent >= cache->frun_percent ||
763 cache->frun_percent >= 100)
764 return -ERANGE;
765
766 if (cache->bstop_percent >= cache->bcull_percent ||
767 cache->bcull_percent >= cache->brun_percent ||
768 cache->brun_percent >= 100)
769 return -ERANGE;
770
771 if (!cache->rootdirname) {
772 pr_err("No cache directory specified\n");
773 return -EINVAL;
774 }
775
776 /* Don't permit already bound caches to be re-bound */
777 if (test_bit(CACHEFILES_READY, &cache->flags)) {
778 pr_err("Cache already bound\n");
779 return -EBUSY;
780 }
781
782 if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
783 if (!strcmp(args, "ondemand")) {
784 set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
785 } else if (*args) {
786 pr_err("Invalid argument to the 'bind' command\n");
787 return -EINVAL;
788 }
789 } else if (*args) {
790 pr_err("'bind' command doesn't take an argument\n");
791 return -EINVAL;
792 }
793
794 /* Make sure we have copies of the tag string */
795 if (!cache->tag) {
796 /*
797 * The tag string is released by the fops->release()
798 * function, so we don't release it on error here
799 */
800 cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
801 if (!cache->tag)
802 return -ENOMEM;
803 }
804
805 return cachefiles_add_cache(cache);
806}
807
808/*
809 * Unbind a cache.
810 */
811static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
812{
813 _enter("");
814
815 if (test_bit(CACHEFILES_READY, &cache->flags))
816 cachefiles_withdraw_cache(cache);
817
818 cachefiles_put_directory(cache->graveyard);
819 cachefiles_put_directory(cache->store);
820 mntput(cache->mnt);
821 put_cred(cache->cache_cred);
822
823 kfree(cache->rootdirname);
824 kfree(cache->tag);
825
826 _leave("");
827}