Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Daemon interface
3 *
4 * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/sched.h>
11#include <linux/completion.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/file.h>
15#include <linux/namei.h>
16#include <linux/poll.h>
17#include <linux/mount.h>
18#include <linux/statfs.h>
19#include <linux/ctype.h>
20#include <linux/string.h>
21#include <linux/fs_struct.h>
22#include "internal.h"
23
24static int cachefiles_daemon_open(struct inode *, struct file *);
25static int cachefiles_daemon_release(struct inode *, struct file *);
26static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
27 loff_t *);
28static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
29 size_t, loff_t *);
30static __poll_t cachefiles_daemon_poll(struct file *,
31 struct poll_table_struct *);
32static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
33static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
34static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
35static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
36static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
37static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
38static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
39static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
40static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
41static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
42static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
43static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
44static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
45static void cachefiles_daemon_unbind(struct cachefiles_cache *);
46
47static unsigned long cachefiles_open;
48
49const struct file_operations cachefiles_daemon_fops = {
50 .owner = THIS_MODULE,
51 .open = cachefiles_daemon_open,
52 .release = cachefiles_daemon_release,
53 .read = cachefiles_daemon_read,
54 .write = cachefiles_daemon_write,
55 .poll = cachefiles_daemon_poll,
56 .llseek = noop_llseek,
57};
58
59struct cachefiles_daemon_cmd {
60 char name[8];
61 int (*handler)(struct cachefiles_cache *cache, char *args);
62};
63
64static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
65 { "bind", cachefiles_daemon_bind },
66 { "brun", cachefiles_daemon_brun },
67 { "bcull", cachefiles_daemon_bcull },
68 { "bstop", cachefiles_daemon_bstop },
69 { "cull", cachefiles_daemon_cull },
70 { "debug", cachefiles_daemon_debug },
71 { "dir", cachefiles_daemon_dir },
72 { "frun", cachefiles_daemon_frun },
73 { "fcull", cachefiles_daemon_fcull },
74 { "fstop", cachefiles_daemon_fstop },
75 { "inuse", cachefiles_daemon_inuse },
76 { "secctx", cachefiles_daemon_secctx },
77 { "tag", cachefiles_daemon_tag },
78#ifdef CONFIG_CACHEFILES_ONDEMAND
79 { "copen", cachefiles_ondemand_copen },
80 { "restore", cachefiles_ondemand_restore },
81#endif
82 { "", NULL }
83};
84
85
86/*
87 * Prepare a cache for caching.
88 */
89static int cachefiles_daemon_open(struct inode *inode, struct file *file)
90{
91 struct cachefiles_cache *cache;
92
93 _enter("");
94
95 /* only the superuser may do this */
96 if (!capable(CAP_SYS_ADMIN))
97 return -EPERM;
98
99 /* the cachefiles device may only be open once at a time */
100 if (xchg(&cachefiles_open, 1) == 1)
101 return -EBUSY;
102
103 /* allocate a cache record */
104 cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL);
105 if (!cache) {
106 cachefiles_open = 0;
107 return -ENOMEM;
108 }
109
110 mutex_init(&cache->daemon_mutex);
111 init_waitqueue_head(&cache->daemon_pollwq);
112 INIT_LIST_HEAD(&cache->volumes);
113 INIT_LIST_HEAD(&cache->object_list);
114 spin_lock_init(&cache->object_list_lock);
115 refcount_set(&cache->unbind_pincount, 1);
116 xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC);
117 xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1);
118
119 /* set default caching limits
120 * - limit at 1% free space and/or free files
121 * - cull below 5% free space and/or free files
122 * - cease culling above 7% free space and/or free files
123 */
124 cache->frun_percent = 7;
125 cache->fcull_percent = 5;
126 cache->fstop_percent = 1;
127 cache->brun_percent = 7;
128 cache->bcull_percent = 5;
129 cache->bstop_percent = 1;
130
131 file->private_data = cache;
132 cache->cachefilesd = file;
133 return 0;
134}
135
136static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
137{
138 struct xarray *xa = &cache->reqs;
139 struct cachefiles_req *req;
140 unsigned long index;
141
142 /*
143 * Make sure the following two operations won't be reordered.
144 * 1) set CACHEFILES_DEAD bit
145 * 2) flush requests in the xarray
146 * Otherwise the request may be enqueued after xarray has been
147 * flushed, leaving the orphan request never being completed.
148 *
149 * CPU 1 CPU 2
150 * ===== =====
151 * flush requests in the xarray
152 * test CACHEFILES_DEAD bit
153 * enqueue the request
154 * set CACHEFILES_DEAD bit
155 */
156 smp_mb();
157
158 xa_lock(xa);
159 xa_for_each(xa, index, req) {
160 req->error = -EIO;
161 complete(&req->done);
162 }
163 xa_unlock(xa);
164
165 xa_destroy(&cache->reqs);
166 xa_destroy(&cache->ondemand_ids);
167}
168
169void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
170{
171 if (refcount_dec_and_test(&cache->unbind_pincount)) {
172 cachefiles_daemon_unbind(cache);
173 cachefiles_open = 0;
174 kfree(cache);
175 }
176}
177
178void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
179{
180 refcount_inc(&cache->unbind_pincount);
181}
182
183/*
184 * Release a cache.
185 */
186static int cachefiles_daemon_release(struct inode *inode, struct file *file)
187{
188 struct cachefiles_cache *cache = file->private_data;
189
190 _enter("");
191
192 ASSERT(cache);
193
194 set_bit(CACHEFILES_DEAD, &cache->flags);
195
196 if (cachefiles_in_ondemand_mode(cache))
197 cachefiles_flush_reqs(cache);
198
199 /* clean up the control file interface */
200 cache->cachefilesd = NULL;
201 file->private_data = NULL;
202
203 cachefiles_put_unbind_pincount(cache);
204
205 _leave("");
206 return 0;
207}
208
209static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
210 char __user *_buffer, size_t buflen)
211{
212 unsigned long long b_released;
213 unsigned f_released;
214 char buffer[256];
215 int n;
216
217 /* check how much space the cache has */
218 cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
219
220 /* summarise */
221 f_released = atomic_xchg(&cache->f_released, 0);
222 b_released = atomic_long_xchg(&cache->b_released, 0);
223 clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
224
225 n = snprintf(buffer, sizeof(buffer),
226 "cull=%c"
227 " frun=%llx"
228 " fcull=%llx"
229 " fstop=%llx"
230 " brun=%llx"
231 " bcull=%llx"
232 " bstop=%llx"
233 " freleased=%x"
234 " breleased=%llx",
235 test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
236 (unsigned long long) cache->frun,
237 (unsigned long long) cache->fcull,
238 (unsigned long long) cache->fstop,
239 (unsigned long long) cache->brun,
240 (unsigned long long) cache->bcull,
241 (unsigned long long) cache->bstop,
242 f_released,
243 b_released);
244
245 if (n > buflen)
246 return -EMSGSIZE;
247
248 if (copy_to_user(_buffer, buffer, n) != 0)
249 return -EFAULT;
250
251 return n;
252}
253
254/*
255 * Read the cache state.
256 */
257static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
258 size_t buflen, loff_t *pos)
259{
260 struct cachefiles_cache *cache = file->private_data;
261
262 //_enter(",,%zu,", buflen);
263
264 if (!test_bit(CACHEFILES_READY, &cache->flags))
265 return 0;
266
267 if (cachefiles_in_ondemand_mode(cache))
268 return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
269 else
270 return cachefiles_do_daemon_read(cache, _buffer, buflen);
271}
272
273/*
274 * Take a command from cachefilesd, parse it and act on it.
275 */
276static ssize_t cachefiles_daemon_write(struct file *file,
277 const char __user *_data,
278 size_t datalen,
279 loff_t *pos)
280{
281 const struct cachefiles_daemon_cmd *cmd;
282 struct cachefiles_cache *cache = file->private_data;
283 ssize_t ret;
284 char *data, *args, *cp;
285
286 //_enter(",,%zu,", datalen);
287
288 ASSERT(cache);
289
290 if (test_bit(CACHEFILES_DEAD, &cache->flags))
291 return -EIO;
292
293 if (datalen > PAGE_SIZE - 1)
294 return -EOPNOTSUPP;
295
296 /* drag the command string into the kernel so we can parse it */
297 data = memdup_user_nul(_data, datalen);
298 if (IS_ERR(data))
299 return PTR_ERR(data);
300
301 ret = -EINVAL;
302 if (memchr(data, '\0', datalen))
303 goto error;
304
305 /* strip any newline */
306 cp = memchr(data, '\n', datalen);
307 if (cp) {
308 if (cp == data)
309 goto error;
310
311 *cp = '\0';
312 }
313
314 /* parse the command */
315 ret = -EOPNOTSUPP;
316
317 for (args = data; *args; args++)
318 if (isspace(*args))
319 break;
320 if (*args) {
321 if (args == data)
322 goto error;
323 *args = '\0';
324 args = skip_spaces(++args);
325 }
326
327 /* run the appropriate command handler */
328 for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
329 if (strcmp(cmd->name, data) == 0)
330 goto found_command;
331
332error:
333 kfree(data);
334 //_leave(" = %zd", ret);
335 return ret;
336
337found_command:
338 mutex_lock(&cache->daemon_mutex);
339
340 ret = -EIO;
341 if (!test_bit(CACHEFILES_DEAD, &cache->flags))
342 ret = cmd->handler(cache, args);
343
344 mutex_unlock(&cache->daemon_mutex);
345
346 if (ret == 0)
347 ret = datalen;
348 goto error;
349}
350
351/*
352 * Poll for culling state
353 * - use EPOLLOUT to indicate culling state
354 */
355static __poll_t cachefiles_daemon_poll(struct file *file,
356 struct poll_table_struct *poll)
357{
358 struct cachefiles_cache *cache = file->private_data;
359 XA_STATE(xas, &cache->reqs, 0);
360 struct cachefiles_req *req;
361 __poll_t mask;
362
363 poll_wait(file, &cache->daemon_pollwq, poll);
364 mask = 0;
365
366 if (cachefiles_in_ondemand_mode(cache)) {
367 if (!xa_empty(&cache->reqs)) {
368 rcu_read_lock();
369 xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
370 if (!cachefiles_ondemand_is_reopening_read(req)) {
371 mask |= EPOLLIN;
372 break;
373 }
374 }
375 rcu_read_unlock();
376 }
377 } else {
378 if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
379 mask |= EPOLLIN;
380 }
381
382 if (test_bit(CACHEFILES_CULLING, &cache->flags))
383 mask |= EPOLLOUT;
384
385 return mask;
386}
387
388/*
389 * Give a range error for cache space constraints
390 * - can be tail-called
391 */
392static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
393 char *args)
394{
395 pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
396
397 return -EINVAL;
398}
399
400/*
401 * Set the percentage of files at which to stop culling
402 * - command: "frun <N>%"
403 */
404static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
405{
406 unsigned long frun;
407
408 _enter(",%s", args);
409
410 if (!*args)
411 return -EINVAL;
412
413 frun = simple_strtoul(args, &args, 10);
414 if (args[0] != '%' || args[1] != '\0')
415 return -EINVAL;
416
417 if (frun <= cache->fcull_percent || frun >= 100)
418 return cachefiles_daemon_range_error(cache, args);
419
420 cache->frun_percent = frun;
421 return 0;
422}
423
424/*
425 * Set the percentage of files at which to start culling
426 * - command: "fcull <N>%"
427 */
428static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
429{
430 unsigned long fcull;
431
432 _enter(",%s", args);
433
434 if (!*args)
435 return -EINVAL;
436
437 fcull = simple_strtoul(args, &args, 10);
438 if (args[0] != '%' || args[1] != '\0')
439 return -EINVAL;
440
441 if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
442 return cachefiles_daemon_range_error(cache, args);
443
444 cache->fcull_percent = fcull;
445 return 0;
446}
447
448/*
449 * Set the percentage of files at which to stop allocating
450 * - command: "fstop <N>%"
451 */
452static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
453{
454 unsigned long fstop;
455
456 _enter(",%s", args);
457
458 if (!*args)
459 return -EINVAL;
460
461 fstop = simple_strtoul(args, &args, 10);
462 if (args[0] != '%' || args[1] != '\0')
463 return -EINVAL;
464
465 if (fstop >= cache->fcull_percent)
466 return cachefiles_daemon_range_error(cache, args);
467
468 cache->fstop_percent = fstop;
469 return 0;
470}
471
472/*
473 * Set the percentage of blocks at which to stop culling
474 * - command: "brun <N>%"
475 */
476static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
477{
478 unsigned long brun;
479
480 _enter(",%s", args);
481
482 if (!*args)
483 return -EINVAL;
484
485 brun = simple_strtoul(args, &args, 10);
486 if (args[0] != '%' || args[1] != '\0')
487 return -EINVAL;
488
489 if (brun <= cache->bcull_percent || brun >= 100)
490 return cachefiles_daemon_range_error(cache, args);
491
492 cache->brun_percent = brun;
493 return 0;
494}
495
496/*
497 * Set the percentage of blocks at which to start culling
498 * - command: "bcull <N>%"
499 */
500static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
501{
502 unsigned long bcull;
503
504 _enter(",%s", args);
505
506 if (!*args)
507 return -EINVAL;
508
509 bcull = simple_strtoul(args, &args, 10);
510 if (args[0] != '%' || args[1] != '\0')
511 return -EINVAL;
512
513 if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
514 return cachefiles_daemon_range_error(cache, args);
515
516 cache->bcull_percent = bcull;
517 return 0;
518}
519
520/*
521 * Set the percentage of blocks at which to stop allocating
522 * - command: "bstop <N>%"
523 */
524static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
525{
526 unsigned long bstop;
527
528 _enter(",%s", args);
529
530 if (!*args)
531 return -EINVAL;
532
533 bstop = simple_strtoul(args, &args, 10);
534 if (args[0] != '%' || args[1] != '\0')
535 return -EINVAL;
536
537 if (bstop >= cache->bcull_percent)
538 return cachefiles_daemon_range_error(cache, args);
539
540 cache->bstop_percent = bstop;
541 return 0;
542}
543
544/*
545 * Set the cache directory
546 * - command: "dir <name>"
547 */
548static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
549{
550 char *dir;
551
552 _enter(",%s", args);
553
554 if (!*args) {
555 pr_err("Empty directory specified\n");
556 return -EINVAL;
557 }
558
559 if (cache->rootdirname) {
560 pr_err("Second cache directory specified\n");
561 return -EEXIST;
562 }
563
564 dir = kstrdup(args, GFP_KERNEL);
565 if (!dir)
566 return -ENOMEM;
567
568 cache->rootdirname = dir;
569 return 0;
570}
571
572/*
573 * Set the cache security context
574 * - command: "secctx <ctx>"
575 */
576static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
577{
578 char *secctx;
579
580 _enter(",%s", args);
581
582 if (!*args) {
583 pr_err("Empty security context specified\n");
584 return -EINVAL;
585 }
586
587 if (cache->secctx) {
588 pr_err("Second security context specified\n");
589 return -EINVAL;
590 }
591
592 secctx = kstrdup(args, GFP_KERNEL);
593 if (!secctx)
594 return -ENOMEM;
595
596 cache->secctx = secctx;
597 return 0;
598}
599
600/*
601 * Set the cache tag
602 * - command: "tag <name>"
603 */
604static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
605{
606 char *tag;
607
608 _enter(",%s", args);
609
610 if (!*args) {
611 pr_err("Empty tag specified\n");
612 return -EINVAL;
613 }
614
615 if (cache->tag)
616 return -EEXIST;
617
618 tag = kstrdup(args, GFP_KERNEL);
619 if (!tag)
620 return -ENOMEM;
621
622 cache->tag = tag;
623 return 0;
624}
625
626/*
627 * Request a node in the cache be culled from the current working directory
628 * - command: "cull <name>"
629 */
630static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
631{
632 struct path path;
633 const struct cred *saved_cred;
634 int ret;
635
636 _enter(",%s", args);
637
638 if (strchr(args, '/'))
639 goto inval;
640
641 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
642 pr_err("cull applied to unready cache\n");
643 return -EIO;
644 }
645
646 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
647 pr_err("cull applied to dead cache\n");
648 return -EIO;
649 }
650
651 get_fs_pwd(current->fs, &path);
652
653 if (!d_can_lookup(path.dentry))
654 goto notdir;
655
656 cachefiles_begin_secure(cache, &saved_cred);
657 ret = cachefiles_cull(cache, path.dentry, args);
658 cachefiles_end_secure(cache, saved_cred);
659
660 path_put(&path);
661 _leave(" = %d", ret);
662 return ret;
663
664notdir:
665 path_put(&path);
666 pr_err("cull command requires dirfd to be a directory\n");
667 return -ENOTDIR;
668
669inval:
670 pr_err("cull command requires dirfd and filename\n");
671 return -EINVAL;
672}
673
674/*
675 * Set debugging mode
676 * - command: "debug <mask>"
677 */
678static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
679{
680 unsigned long mask;
681
682 _enter(",%s", args);
683
684 mask = simple_strtoul(args, &args, 0);
685 if (args[0] != '\0')
686 goto inval;
687
688 cachefiles_debug = mask;
689 _leave(" = 0");
690 return 0;
691
692inval:
693 pr_err("debug command requires mask\n");
694 return -EINVAL;
695}
696
697/*
698 * Find out whether an object in the current working directory is in use or not
699 * - command: "inuse <name>"
700 */
701static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
702{
703 struct path path;
704 const struct cred *saved_cred;
705 int ret;
706
707 //_enter(",%s", args);
708
709 if (strchr(args, '/'))
710 goto inval;
711
712 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
713 pr_err("inuse applied to unready cache\n");
714 return -EIO;
715 }
716
717 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
718 pr_err("inuse applied to dead cache\n");
719 return -EIO;
720 }
721
722 get_fs_pwd(current->fs, &path);
723
724 if (!d_can_lookup(path.dentry))
725 goto notdir;
726
727 cachefiles_begin_secure(cache, &saved_cred);
728 ret = cachefiles_check_in_use(cache, path.dentry, args);
729 cachefiles_end_secure(cache, saved_cred);
730
731 path_put(&path);
732 //_leave(" = %d", ret);
733 return ret;
734
735notdir:
736 path_put(&path);
737 pr_err("inuse command requires dirfd to be a directory\n");
738 return -ENOTDIR;
739
740inval:
741 pr_err("inuse command requires dirfd and filename\n");
742 return -EINVAL;
743}
744
745/*
746 * Bind a directory as a cache
747 */
748static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
749{
750 _enter("{%u,%u,%u,%u,%u,%u},%s",
751 cache->frun_percent,
752 cache->fcull_percent,
753 cache->fstop_percent,
754 cache->brun_percent,
755 cache->bcull_percent,
756 cache->bstop_percent,
757 args);
758
759 if (cache->fstop_percent >= cache->fcull_percent ||
760 cache->fcull_percent >= cache->frun_percent ||
761 cache->frun_percent >= 100)
762 return -ERANGE;
763
764 if (cache->bstop_percent >= cache->bcull_percent ||
765 cache->bcull_percent >= cache->brun_percent ||
766 cache->brun_percent >= 100)
767 return -ERANGE;
768
769 if (!cache->rootdirname) {
770 pr_err("No cache directory specified\n");
771 return -EINVAL;
772 }
773
774 /* Don't permit already bound caches to be re-bound */
775 if (test_bit(CACHEFILES_READY, &cache->flags)) {
776 pr_err("Cache already bound\n");
777 return -EBUSY;
778 }
779
780 if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
781 if (!strcmp(args, "ondemand")) {
782 set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
783 } else if (*args) {
784 pr_err("Invalid argument to the 'bind' command\n");
785 return -EINVAL;
786 }
787 } else if (*args) {
788 pr_err("'bind' command doesn't take an argument\n");
789 return -EINVAL;
790 }
791
792 /* Make sure we have copies of the tag string */
793 if (!cache->tag) {
794 /*
795 * The tag string is released by the fops->release()
796 * function, so we don't release it on error here
797 */
798 cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
799 if (!cache->tag)
800 return -ENOMEM;
801 }
802
803 return cachefiles_add_cache(cache);
804}
805
806/*
807 * Unbind a cache.
808 */
809static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
810{
811 _enter("");
812
813 if (test_bit(CACHEFILES_READY, &cache->flags))
814 cachefiles_withdraw_cache(cache);
815
816 cachefiles_put_directory(cache->graveyard);
817 cachefiles_put_directory(cache->store);
818 mntput(cache->mnt);
819 put_cred(cache->cache_cred);
820
821 kfree(cache->rootdirname);
822 kfree(cache->secctx);
823 kfree(cache->tag);
824
825 _leave("");
826}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Daemon interface
3 *
4 * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/sched.h>
11#include <linux/completion.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/file.h>
15#include <linux/namei.h>
16#include <linux/poll.h>
17#include <linux/mount.h>
18#include <linux/statfs.h>
19#include <linux/ctype.h>
20#include <linux/string.h>
21#include <linux/fs_struct.h>
22#include "internal.h"
23
24static int cachefiles_daemon_open(struct inode *, struct file *);
25static int cachefiles_daemon_release(struct inode *, struct file *);
26static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
27 loff_t *);
28static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
29 size_t, loff_t *);
30static __poll_t cachefiles_daemon_poll(struct file *,
31 struct poll_table_struct *);
32static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
33static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
34static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
35static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
36static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
37static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
38static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
39static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
40static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
41static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
42static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
43static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
44static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
45static void cachefiles_daemon_unbind(struct cachefiles_cache *);
46
47static unsigned long cachefiles_open;
48
49const struct file_operations cachefiles_daemon_fops = {
50 .owner = THIS_MODULE,
51 .open = cachefiles_daemon_open,
52 .release = cachefiles_daemon_release,
53 .read = cachefiles_daemon_read,
54 .write = cachefiles_daemon_write,
55 .poll = cachefiles_daemon_poll,
56 .llseek = noop_llseek,
57};
58
59struct cachefiles_daemon_cmd {
60 char name[8];
61 int (*handler)(struct cachefiles_cache *cache, char *args);
62};
63
64static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
65 { "bind", cachefiles_daemon_bind },
66 { "brun", cachefiles_daemon_brun },
67 { "bcull", cachefiles_daemon_bcull },
68 { "bstop", cachefiles_daemon_bstop },
69 { "cull", cachefiles_daemon_cull },
70 { "debug", cachefiles_daemon_debug },
71 { "dir", cachefiles_daemon_dir },
72 { "frun", cachefiles_daemon_frun },
73 { "fcull", cachefiles_daemon_fcull },
74 { "fstop", cachefiles_daemon_fstop },
75 { "inuse", cachefiles_daemon_inuse },
76 { "secctx", cachefiles_daemon_secctx },
77 { "tag", cachefiles_daemon_tag },
78#ifdef CONFIG_CACHEFILES_ONDEMAND
79 { "copen", cachefiles_ondemand_copen },
80#endif
81 { "", NULL }
82};
83
84
85/*
86 * Prepare a cache for caching.
87 */
88static int cachefiles_daemon_open(struct inode *inode, struct file *file)
89{
90 struct cachefiles_cache *cache;
91
92 _enter("");
93
94 /* only the superuser may do this */
95 if (!capable(CAP_SYS_ADMIN))
96 return -EPERM;
97
98 /* the cachefiles device may only be open once at a time */
99 if (xchg(&cachefiles_open, 1) == 1)
100 return -EBUSY;
101
102 /* allocate a cache record */
103 cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL);
104 if (!cache) {
105 cachefiles_open = 0;
106 return -ENOMEM;
107 }
108
109 mutex_init(&cache->daemon_mutex);
110 init_waitqueue_head(&cache->daemon_pollwq);
111 INIT_LIST_HEAD(&cache->volumes);
112 INIT_LIST_HEAD(&cache->object_list);
113 spin_lock_init(&cache->object_list_lock);
114 refcount_set(&cache->unbind_pincount, 1);
115 xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC);
116 xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1);
117
118 /* set default caching limits
119 * - limit at 1% free space and/or free files
120 * - cull below 5% free space and/or free files
121 * - cease culling above 7% free space and/or free files
122 */
123 cache->frun_percent = 7;
124 cache->fcull_percent = 5;
125 cache->fstop_percent = 1;
126 cache->brun_percent = 7;
127 cache->bcull_percent = 5;
128 cache->bstop_percent = 1;
129
130 file->private_data = cache;
131 cache->cachefilesd = file;
132 return 0;
133}
134
135static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
136{
137 struct xarray *xa = &cache->reqs;
138 struct cachefiles_req *req;
139 unsigned long index;
140
141 /*
142 * Make sure the following two operations won't be reordered.
143 * 1) set CACHEFILES_DEAD bit
144 * 2) flush requests in the xarray
145 * Otherwise the request may be enqueued after xarray has been
146 * flushed, leaving the orphan request never being completed.
147 *
148 * CPU 1 CPU 2
149 * ===== =====
150 * flush requests in the xarray
151 * test CACHEFILES_DEAD bit
152 * enqueue the request
153 * set CACHEFILES_DEAD bit
154 */
155 smp_mb();
156
157 xa_lock(xa);
158 xa_for_each(xa, index, req) {
159 req->error = -EIO;
160 complete(&req->done);
161 }
162 xa_unlock(xa);
163
164 xa_destroy(&cache->reqs);
165 xa_destroy(&cache->ondemand_ids);
166}
167
168void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
169{
170 if (refcount_dec_and_test(&cache->unbind_pincount)) {
171 cachefiles_daemon_unbind(cache);
172 cachefiles_open = 0;
173 kfree(cache);
174 }
175}
176
177void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
178{
179 refcount_inc(&cache->unbind_pincount);
180}
181
182/*
183 * Release a cache.
184 */
185static int cachefiles_daemon_release(struct inode *inode, struct file *file)
186{
187 struct cachefiles_cache *cache = file->private_data;
188
189 _enter("");
190
191 ASSERT(cache);
192
193 set_bit(CACHEFILES_DEAD, &cache->flags);
194
195 if (cachefiles_in_ondemand_mode(cache))
196 cachefiles_flush_reqs(cache);
197
198 /* clean up the control file interface */
199 cache->cachefilesd = NULL;
200 file->private_data = NULL;
201
202 cachefiles_put_unbind_pincount(cache);
203
204 _leave("");
205 return 0;
206}
207
208static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
209 char __user *_buffer, size_t buflen)
210{
211 unsigned long long b_released;
212 unsigned f_released;
213 char buffer[256];
214 int n;
215
216 /* check how much space the cache has */
217 cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
218
219 /* summarise */
220 f_released = atomic_xchg(&cache->f_released, 0);
221 b_released = atomic_long_xchg(&cache->b_released, 0);
222 clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
223
224 n = snprintf(buffer, sizeof(buffer),
225 "cull=%c"
226 " frun=%llx"
227 " fcull=%llx"
228 " fstop=%llx"
229 " brun=%llx"
230 " bcull=%llx"
231 " bstop=%llx"
232 " freleased=%x"
233 " breleased=%llx",
234 test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
235 (unsigned long long) cache->frun,
236 (unsigned long long) cache->fcull,
237 (unsigned long long) cache->fstop,
238 (unsigned long long) cache->brun,
239 (unsigned long long) cache->bcull,
240 (unsigned long long) cache->bstop,
241 f_released,
242 b_released);
243
244 if (n > buflen)
245 return -EMSGSIZE;
246
247 if (copy_to_user(_buffer, buffer, n) != 0)
248 return -EFAULT;
249
250 return n;
251}
252
253/*
254 * Read the cache state.
255 */
256static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
257 size_t buflen, loff_t *pos)
258{
259 struct cachefiles_cache *cache = file->private_data;
260
261 //_enter(",,%zu,", buflen);
262
263 if (!test_bit(CACHEFILES_READY, &cache->flags))
264 return 0;
265
266 if (cachefiles_in_ondemand_mode(cache))
267 return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
268 else
269 return cachefiles_do_daemon_read(cache, _buffer, buflen);
270}
271
272/*
273 * Take a command from cachefilesd, parse it and act on it.
274 */
275static ssize_t cachefiles_daemon_write(struct file *file,
276 const char __user *_data,
277 size_t datalen,
278 loff_t *pos)
279{
280 const struct cachefiles_daemon_cmd *cmd;
281 struct cachefiles_cache *cache = file->private_data;
282 ssize_t ret;
283 char *data, *args, *cp;
284
285 //_enter(",,%zu,", datalen);
286
287 ASSERT(cache);
288
289 if (test_bit(CACHEFILES_DEAD, &cache->flags))
290 return -EIO;
291
292 if (datalen > PAGE_SIZE - 1)
293 return -EOPNOTSUPP;
294
295 /* drag the command string into the kernel so we can parse it */
296 data = memdup_user_nul(_data, datalen);
297 if (IS_ERR(data))
298 return PTR_ERR(data);
299
300 ret = -EINVAL;
301 if (memchr(data, '\0', datalen))
302 goto error;
303
304 /* strip any newline */
305 cp = memchr(data, '\n', datalen);
306 if (cp) {
307 if (cp == data)
308 goto error;
309
310 *cp = '\0';
311 }
312
313 /* parse the command */
314 ret = -EOPNOTSUPP;
315
316 for (args = data; *args; args++)
317 if (isspace(*args))
318 break;
319 if (*args) {
320 if (args == data)
321 goto error;
322 *args = '\0';
323 args = skip_spaces(++args);
324 }
325
326 /* run the appropriate command handler */
327 for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
328 if (strcmp(cmd->name, data) == 0)
329 goto found_command;
330
331error:
332 kfree(data);
333 //_leave(" = %zd", ret);
334 return ret;
335
336found_command:
337 mutex_lock(&cache->daemon_mutex);
338
339 ret = -EIO;
340 if (!test_bit(CACHEFILES_DEAD, &cache->flags))
341 ret = cmd->handler(cache, args);
342
343 mutex_unlock(&cache->daemon_mutex);
344
345 if (ret == 0)
346 ret = datalen;
347 goto error;
348}
349
350/*
351 * Poll for culling state
352 * - use EPOLLOUT to indicate culling state
353 */
354static __poll_t cachefiles_daemon_poll(struct file *file,
355 struct poll_table_struct *poll)
356{
357 struct cachefiles_cache *cache = file->private_data;
358 __poll_t mask;
359
360 poll_wait(file, &cache->daemon_pollwq, poll);
361 mask = 0;
362
363 if (cachefiles_in_ondemand_mode(cache)) {
364 if (!xa_empty(&cache->reqs))
365 mask |= EPOLLIN;
366 } else {
367 if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
368 mask |= EPOLLIN;
369 }
370
371 if (test_bit(CACHEFILES_CULLING, &cache->flags))
372 mask |= EPOLLOUT;
373
374 return mask;
375}
376
377/*
378 * Give a range error for cache space constraints
379 * - can be tail-called
380 */
381static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
382 char *args)
383{
384 pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
385
386 return -EINVAL;
387}
388
389/*
390 * Set the percentage of files at which to stop culling
391 * - command: "frun <N>%"
392 */
393static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
394{
395 unsigned long frun;
396
397 _enter(",%s", args);
398
399 if (!*args)
400 return -EINVAL;
401
402 frun = simple_strtoul(args, &args, 10);
403 if (args[0] != '%' || args[1] != '\0')
404 return -EINVAL;
405
406 if (frun <= cache->fcull_percent || frun >= 100)
407 return cachefiles_daemon_range_error(cache, args);
408
409 cache->frun_percent = frun;
410 return 0;
411}
412
413/*
414 * Set the percentage of files at which to start culling
415 * - command: "fcull <N>%"
416 */
417static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
418{
419 unsigned long fcull;
420
421 _enter(",%s", args);
422
423 if (!*args)
424 return -EINVAL;
425
426 fcull = simple_strtoul(args, &args, 10);
427 if (args[0] != '%' || args[1] != '\0')
428 return -EINVAL;
429
430 if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
431 return cachefiles_daemon_range_error(cache, args);
432
433 cache->fcull_percent = fcull;
434 return 0;
435}
436
437/*
438 * Set the percentage of files at which to stop allocating
439 * - command: "fstop <N>%"
440 */
441static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
442{
443 unsigned long fstop;
444
445 _enter(",%s", args);
446
447 if (!*args)
448 return -EINVAL;
449
450 fstop = simple_strtoul(args, &args, 10);
451 if (args[0] != '%' || args[1] != '\0')
452 return -EINVAL;
453
454 if (fstop >= cache->fcull_percent)
455 return cachefiles_daemon_range_error(cache, args);
456
457 cache->fstop_percent = fstop;
458 return 0;
459}
460
461/*
462 * Set the percentage of blocks at which to stop culling
463 * - command: "brun <N>%"
464 */
465static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
466{
467 unsigned long brun;
468
469 _enter(",%s", args);
470
471 if (!*args)
472 return -EINVAL;
473
474 brun = simple_strtoul(args, &args, 10);
475 if (args[0] != '%' || args[1] != '\0')
476 return -EINVAL;
477
478 if (brun <= cache->bcull_percent || brun >= 100)
479 return cachefiles_daemon_range_error(cache, args);
480
481 cache->brun_percent = brun;
482 return 0;
483}
484
485/*
486 * Set the percentage of blocks at which to start culling
487 * - command: "bcull <N>%"
488 */
489static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
490{
491 unsigned long bcull;
492
493 _enter(",%s", args);
494
495 if (!*args)
496 return -EINVAL;
497
498 bcull = simple_strtoul(args, &args, 10);
499 if (args[0] != '%' || args[1] != '\0')
500 return -EINVAL;
501
502 if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
503 return cachefiles_daemon_range_error(cache, args);
504
505 cache->bcull_percent = bcull;
506 return 0;
507}
508
509/*
510 * Set the percentage of blocks at which to stop allocating
511 * - command: "bstop <N>%"
512 */
513static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
514{
515 unsigned long bstop;
516
517 _enter(",%s", args);
518
519 if (!*args)
520 return -EINVAL;
521
522 bstop = simple_strtoul(args, &args, 10);
523 if (args[0] != '%' || args[1] != '\0')
524 return -EINVAL;
525
526 if (bstop >= cache->bcull_percent)
527 return cachefiles_daemon_range_error(cache, args);
528
529 cache->bstop_percent = bstop;
530 return 0;
531}
532
533/*
534 * Set the cache directory
535 * - command: "dir <name>"
536 */
537static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
538{
539 char *dir;
540
541 _enter(",%s", args);
542
543 if (!*args) {
544 pr_err("Empty directory specified\n");
545 return -EINVAL;
546 }
547
548 if (cache->rootdirname) {
549 pr_err("Second cache directory specified\n");
550 return -EEXIST;
551 }
552
553 dir = kstrdup(args, GFP_KERNEL);
554 if (!dir)
555 return -ENOMEM;
556
557 cache->rootdirname = dir;
558 return 0;
559}
560
561/*
562 * Set the cache security context
563 * - command: "secctx <ctx>"
564 */
565static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
566{
567 char *secctx;
568
569 _enter(",%s", args);
570
571 if (!*args) {
572 pr_err("Empty security context specified\n");
573 return -EINVAL;
574 }
575
576 if (cache->secctx) {
577 pr_err("Second security context specified\n");
578 return -EINVAL;
579 }
580
581 secctx = kstrdup(args, GFP_KERNEL);
582 if (!secctx)
583 return -ENOMEM;
584
585 cache->secctx = secctx;
586 return 0;
587}
588
589/*
590 * Set the cache tag
591 * - command: "tag <name>"
592 */
593static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
594{
595 char *tag;
596
597 _enter(",%s", args);
598
599 if (!*args) {
600 pr_err("Empty tag specified\n");
601 return -EINVAL;
602 }
603
604 if (cache->tag)
605 return -EEXIST;
606
607 tag = kstrdup(args, GFP_KERNEL);
608 if (!tag)
609 return -ENOMEM;
610
611 cache->tag = tag;
612 return 0;
613}
614
615/*
616 * Request a node in the cache be culled from the current working directory
617 * - command: "cull <name>"
618 */
619static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
620{
621 struct path path;
622 const struct cred *saved_cred;
623 int ret;
624
625 _enter(",%s", args);
626
627 if (strchr(args, '/'))
628 goto inval;
629
630 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
631 pr_err("cull applied to unready cache\n");
632 return -EIO;
633 }
634
635 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
636 pr_err("cull applied to dead cache\n");
637 return -EIO;
638 }
639
640 get_fs_pwd(current->fs, &path);
641
642 if (!d_can_lookup(path.dentry))
643 goto notdir;
644
645 cachefiles_begin_secure(cache, &saved_cred);
646 ret = cachefiles_cull(cache, path.dentry, args);
647 cachefiles_end_secure(cache, saved_cred);
648
649 path_put(&path);
650 _leave(" = %d", ret);
651 return ret;
652
653notdir:
654 path_put(&path);
655 pr_err("cull command requires dirfd to be a directory\n");
656 return -ENOTDIR;
657
658inval:
659 pr_err("cull command requires dirfd and filename\n");
660 return -EINVAL;
661}
662
663/*
664 * Set debugging mode
665 * - command: "debug <mask>"
666 */
667static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
668{
669 unsigned long mask;
670
671 _enter(",%s", args);
672
673 mask = simple_strtoul(args, &args, 0);
674 if (args[0] != '\0')
675 goto inval;
676
677 cachefiles_debug = mask;
678 _leave(" = 0");
679 return 0;
680
681inval:
682 pr_err("debug command requires mask\n");
683 return -EINVAL;
684}
685
686/*
687 * Find out whether an object in the current working directory is in use or not
688 * - command: "inuse <name>"
689 */
690static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
691{
692 struct path path;
693 const struct cred *saved_cred;
694 int ret;
695
696 //_enter(",%s", args);
697
698 if (strchr(args, '/'))
699 goto inval;
700
701 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
702 pr_err("inuse applied to unready cache\n");
703 return -EIO;
704 }
705
706 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
707 pr_err("inuse applied to dead cache\n");
708 return -EIO;
709 }
710
711 get_fs_pwd(current->fs, &path);
712
713 if (!d_can_lookup(path.dentry))
714 goto notdir;
715
716 cachefiles_begin_secure(cache, &saved_cred);
717 ret = cachefiles_check_in_use(cache, path.dentry, args);
718 cachefiles_end_secure(cache, saved_cred);
719
720 path_put(&path);
721 //_leave(" = %d", ret);
722 return ret;
723
724notdir:
725 path_put(&path);
726 pr_err("inuse command requires dirfd to be a directory\n");
727 return -ENOTDIR;
728
729inval:
730 pr_err("inuse command requires dirfd and filename\n");
731 return -EINVAL;
732}
733
734/*
735 * Bind a directory as a cache
736 */
737static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
738{
739 _enter("{%u,%u,%u,%u,%u,%u},%s",
740 cache->frun_percent,
741 cache->fcull_percent,
742 cache->fstop_percent,
743 cache->brun_percent,
744 cache->bcull_percent,
745 cache->bstop_percent,
746 args);
747
748 if (cache->fstop_percent >= cache->fcull_percent ||
749 cache->fcull_percent >= cache->frun_percent ||
750 cache->frun_percent >= 100)
751 return -ERANGE;
752
753 if (cache->bstop_percent >= cache->bcull_percent ||
754 cache->bcull_percent >= cache->brun_percent ||
755 cache->brun_percent >= 100)
756 return -ERANGE;
757
758 if (!cache->rootdirname) {
759 pr_err("No cache directory specified\n");
760 return -EINVAL;
761 }
762
763 /* Don't permit already bound caches to be re-bound */
764 if (test_bit(CACHEFILES_READY, &cache->flags)) {
765 pr_err("Cache already bound\n");
766 return -EBUSY;
767 }
768
769 if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
770 if (!strcmp(args, "ondemand")) {
771 set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
772 } else if (*args) {
773 pr_err("Invalid argument to the 'bind' command\n");
774 return -EINVAL;
775 }
776 } else if (*args) {
777 pr_err("'bind' command doesn't take an argument\n");
778 return -EINVAL;
779 }
780
781 /* Make sure we have copies of the tag string */
782 if (!cache->tag) {
783 /*
784 * The tag string is released by the fops->release()
785 * function, so we don't release it on error here
786 */
787 cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
788 if (!cache->tag)
789 return -ENOMEM;
790 }
791
792 return cachefiles_add_cache(cache);
793}
794
795/*
796 * Unbind a cache.
797 */
798static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
799{
800 _enter("");
801
802 if (test_bit(CACHEFILES_READY, &cache->flags))
803 cachefiles_withdraw_cache(cache);
804
805 cachefiles_put_directory(cache->graveyard);
806 cachefiles_put_directory(cache->store);
807 mntput(cache->mnt);
808
809 kfree(cache->rootdirname);
810 kfree(cache->secctx);
811 kfree(cache->tag);
812
813 _leave("");
814}