Loading...
1/*
2 * This contains encryption functions for per-file encryption.
3 *
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
6 *
7 * Written by Michael Halcrow, 2014.
8 *
9 * Filename encryption additions
10 * Uday Savagaonkar, 2014
11 * Encryption policy handling additions
12 * Ildar Muslukhov, 2014
13 * Add fscrypt_pullback_bio_page()
14 * Jaegeuk Kim, 2015.
15 *
16 * This has not yet undergone a rigorous security audit.
17 *
18 * The usage of AES-XTS should conform to recommendations in NIST
19 * Special Publication 800-38E and IEEE P1619/D16.
20 */
21
22#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
27#include <linux/bio.h>
28#include <linux/dcache.h>
29#include <linux/namei.h>
30#include <linux/fscrypto.h>
31#include <linux/ecryptfs.h>
32
33static unsigned int num_prealloc_crypto_pages = 32;
34static unsigned int num_prealloc_crypto_ctxs = 128;
35
36module_param(num_prealloc_crypto_pages, uint, 0444);
37MODULE_PARM_DESC(num_prealloc_crypto_pages,
38 "Number of crypto pages to preallocate");
39module_param(num_prealloc_crypto_ctxs, uint, 0444);
40MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
41 "Number of crypto contexts to preallocate");
42
43static mempool_t *fscrypt_bounce_page_pool = NULL;
44
45static LIST_HEAD(fscrypt_free_ctxs);
46static DEFINE_SPINLOCK(fscrypt_ctx_lock);
47
48static struct workqueue_struct *fscrypt_read_workqueue;
49static DEFINE_MUTEX(fscrypt_init_mutex);
50
51static struct kmem_cache *fscrypt_ctx_cachep;
52struct kmem_cache *fscrypt_info_cachep;
53
54/**
55 * fscrypt_release_ctx() - Releases an encryption context
56 * @ctx: The encryption context to release.
57 *
58 * If the encryption context was allocated from the pre-allocated pool, returns
59 * it to that pool. Else, frees it.
60 *
61 * If there's a bounce page in the context, this frees that.
62 */
63void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
64{
65 unsigned long flags;
66
67 if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
68 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
69 ctx->w.bounce_page = NULL;
70 }
71 ctx->w.control_page = NULL;
72 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
73 kmem_cache_free(fscrypt_ctx_cachep, ctx);
74 } else {
75 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
76 list_add(&ctx->free_list, &fscrypt_free_ctxs);
77 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
78 }
79}
80EXPORT_SYMBOL(fscrypt_release_ctx);
81
82/**
83 * fscrypt_get_ctx() - Gets an encryption context
84 * @inode: The inode for which we are doing the crypto
85 * @gfp_flags: The gfp flag for memory allocation
86 *
87 * Allocates and initializes an encryption context.
88 *
89 * Return: An allocated and initialized encryption context on success; error
90 * value or NULL otherwise.
91 */
92struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
93{
94 struct fscrypt_ctx *ctx = NULL;
95 struct fscrypt_info *ci = inode->i_crypt_info;
96 unsigned long flags;
97
98 if (ci == NULL)
99 return ERR_PTR(-ENOKEY);
100
101 /*
102 * We first try getting the ctx from a free list because in
103 * the common case the ctx will have an allocated and
104 * initialized crypto tfm, so it's probably a worthwhile
105 * optimization. For the bounce page, we first try getting it
106 * from the kernel allocator because that's just about as fast
107 * as getting it from a list and because a cache of free pages
108 * should generally be a "last resort" option for a filesystem
109 * to be able to do its job.
110 */
111 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
112 ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
113 struct fscrypt_ctx, free_list);
114 if (ctx)
115 list_del(&ctx->free_list);
116 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
117 if (!ctx) {
118 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
119 if (!ctx)
120 return ERR_PTR(-ENOMEM);
121 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
122 } else {
123 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
124 }
125 ctx->flags &= ~FS_WRITE_PATH_FL;
126 return ctx;
127}
128EXPORT_SYMBOL(fscrypt_get_ctx);
129
130/**
131 * fscrypt_complete() - The completion callback for page encryption
132 * @req: The asynchronous encryption request context
133 * @res: The result of the encryption operation
134 */
135static void fscrypt_complete(struct crypto_async_request *req, int res)
136{
137 struct fscrypt_completion_result *ecr = req->data;
138
139 if (res == -EINPROGRESS)
140 return;
141 ecr->res = res;
142 complete(&ecr->completion);
143}
144
145typedef enum {
146 FS_DECRYPT = 0,
147 FS_ENCRYPT,
148} fscrypt_direction_t;
149
150static int do_page_crypto(struct inode *inode,
151 fscrypt_direction_t rw, pgoff_t index,
152 struct page *src_page, struct page *dest_page,
153 gfp_t gfp_flags)
154{
155 u8 xts_tweak[FS_XTS_TWEAK_SIZE];
156 struct skcipher_request *req = NULL;
157 DECLARE_FS_COMPLETION_RESULT(ecr);
158 struct scatterlist dst, src;
159 struct fscrypt_info *ci = inode->i_crypt_info;
160 struct crypto_skcipher *tfm = ci->ci_ctfm;
161 int res = 0;
162
163 req = skcipher_request_alloc(tfm, gfp_flags);
164 if (!req) {
165 printk_ratelimited(KERN_ERR
166 "%s: crypto_request_alloc() failed\n",
167 __func__);
168 return -ENOMEM;
169 }
170
171 skcipher_request_set_callback(
172 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
173 fscrypt_complete, &ecr);
174
175 BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
176 memcpy(xts_tweak, &index, sizeof(index));
177 memset(&xts_tweak[sizeof(index)], 0,
178 FS_XTS_TWEAK_SIZE - sizeof(index));
179
180 sg_init_table(&dst, 1);
181 sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
182 sg_init_table(&src, 1);
183 sg_set_page(&src, src_page, PAGE_SIZE, 0);
184 skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
185 xts_tweak);
186 if (rw == FS_DECRYPT)
187 res = crypto_skcipher_decrypt(req);
188 else
189 res = crypto_skcipher_encrypt(req);
190 if (res == -EINPROGRESS || res == -EBUSY) {
191 BUG_ON(req->base.data != &ecr);
192 wait_for_completion(&ecr.completion);
193 res = ecr.res;
194 }
195 skcipher_request_free(req);
196 if (res) {
197 printk_ratelimited(KERN_ERR
198 "%s: crypto_skcipher_encrypt() returned %d\n",
199 __func__, res);
200 return res;
201 }
202 return 0;
203}
204
205static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
206{
207 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
208 if (ctx->w.bounce_page == NULL)
209 return ERR_PTR(-ENOMEM);
210 ctx->flags |= FS_WRITE_PATH_FL;
211 return ctx->w.bounce_page;
212}
213
214/**
215 * fscypt_encrypt_page() - Encrypts a page
216 * @inode: The inode for which the encryption should take place
217 * @plaintext_page: The page to encrypt. Must be locked.
218 * @gfp_flags: The gfp flag for memory allocation
219 *
220 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
221 * encryption context.
222 *
223 * Called on the page write path. The caller must call
224 * fscrypt_restore_control_page() on the returned ciphertext page to
225 * release the bounce buffer and the encryption context.
226 *
227 * Return: An allocated page with the encrypted content on success. Else, an
228 * error value or NULL.
229 */
230struct page *fscrypt_encrypt_page(struct inode *inode,
231 struct page *plaintext_page, gfp_t gfp_flags)
232{
233 struct fscrypt_ctx *ctx;
234 struct page *ciphertext_page = NULL;
235 int err;
236
237 BUG_ON(!PageLocked(plaintext_page));
238
239 ctx = fscrypt_get_ctx(inode, gfp_flags);
240 if (IS_ERR(ctx))
241 return (struct page *)ctx;
242
243 /* The encryption operation will require a bounce page. */
244 ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
245 if (IS_ERR(ciphertext_page))
246 goto errout;
247
248 ctx->w.control_page = plaintext_page;
249 err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
250 plaintext_page, ciphertext_page,
251 gfp_flags);
252 if (err) {
253 ciphertext_page = ERR_PTR(err);
254 goto errout;
255 }
256 SetPagePrivate(ciphertext_page);
257 set_page_private(ciphertext_page, (unsigned long)ctx);
258 lock_page(ciphertext_page);
259 return ciphertext_page;
260
261errout:
262 fscrypt_release_ctx(ctx);
263 return ciphertext_page;
264}
265EXPORT_SYMBOL(fscrypt_encrypt_page);
266
267/**
268 * f2crypt_decrypt_page() - Decrypts a page in-place
269 * @page: The page to decrypt. Must be locked.
270 *
271 * Decrypts page in-place using the ctx encryption context.
272 *
273 * Called from the read completion callback.
274 *
275 * Return: Zero on success, non-zero otherwise.
276 */
277int fscrypt_decrypt_page(struct page *page)
278{
279 BUG_ON(!PageLocked(page));
280
281 return do_page_crypto(page->mapping->host,
282 FS_DECRYPT, page->index, page, page, GFP_NOFS);
283}
284EXPORT_SYMBOL(fscrypt_decrypt_page);
285
286int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
287 sector_t pblk, unsigned int len)
288{
289 struct fscrypt_ctx *ctx;
290 struct page *ciphertext_page = NULL;
291 struct bio *bio;
292 int ret, err = 0;
293
294 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
295
296 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
297 if (IS_ERR(ctx))
298 return PTR_ERR(ctx);
299
300 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
301 if (IS_ERR(ciphertext_page)) {
302 err = PTR_ERR(ciphertext_page);
303 goto errout;
304 }
305
306 while (len--) {
307 err = do_page_crypto(inode, FS_ENCRYPT, lblk,
308 ZERO_PAGE(0), ciphertext_page,
309 GFP_NOFS);
310 if (err)
311 goto errout;
312
313 bio = bio_alloc(GFP_NOWAIT, 1);
314 if (!bio) {
315 err = -ENOMEM;
316 goto errout;
317 }
318 bio->bi_bdev = inode->i_sb->s_bdev;
319 bio->bi_iter.bi_sector =
320 pblk << (inode->i_sb->s_blocksize_bits - 9);
321 ret = bio_add_page(bio, ciphertext_page,
322 inode->i_sb->s_blocksize, 0);
323 if (ret != inode->i_sb->s_blocksize) {
324 /* should never happen! */
325 WARN_ON(1);
326 bio_put(bio);
327 err = -EIO;
328 goto errout;
329 }
330 err = submit_bio_wait(WRITE, bio);
331 if ((err == 0) && bio->bi_error)
332 err = -EIO;
333 bio_put(bio);
334 if (err)
335 goto errout;
336 lblk++;
337 pblk++;
338 }
339 err = 0;
340errout:
341 fscrypt_release_ctx(ctx);
342 return err;
343}
344EXPORT_SYMBOL(fscrypt_zeroout_range);
345
346/*
347 * Validate dentries for encrypted directories to make sure we aren't
348 * potentially caching stale data after a key has been added or
349 * removed.
350 */
351static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
352{
353 struct dentry *dir;
354 struct fscrypt_info *ci;
355 int dir_has_key, cached_with_key;
356
357 if (flags & LOOKUP_RCU)
358 return -ECHILD;
359
360 dir = dget_parent(dentry);
361 if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
362 dput(dir);
363 return 0;
364 }
365
366 ci = d_inode(dir)->i_crypt_info;
367 if (ci && ci->ci_keyring_key &&
368 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
369 (1 << KEY_FLAG_REVOKED) |
370 (1 << KEY_FLAG_DEAD))))
371 ci = NULL;
372
373 /* this should eventually be an flag in d_flags */
374 spin_lock(&dentry->d_lock);
375 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
376 spin_unlock(&dentry->d_lock);
377 dir_has_key = (ci != NULL);
378 dput(dir);
379
380 /*
381 * If the dentry was cached without the key, and it is a
382 * negative dentry, it might be a valid name. We can't check
383 * if the key has since been made available due to locking
384 * reasons, so we fail the validation so ext4_lookup() can do
385 * this check.
386 *
387 * We also fail the validation if the dentry was created with
388 * the key present, but we no longer have the key, or vice versa.
389 */
390 if ((!cached_with_key && d_is_negative(dentry)) ||
391 (!cached_with_key && dir_has_key) ||
392 (cached_with_key && !dir_has_key))
393 return 0;
394 return 1;
395}
396
397const struct dentry_operations fscrypt_d_ops = {
398 .d_revalidate = fscrypt_d_revalidate,
399};
400EXPORT_SYMBOL(fscrypt_d_ops);
401
402/*
403 * Call fscrypt_decrypt_page on every single page, reusing the encryption
404 * context.
405 */
406static void completion_pages(struct work_struct *work)
407{
408 struct fscrypt_ctx *ctx =
409 container_of(work, struct fscrypt_ctx, r.work);
410 struct bio *bio = ctx->r.bio;
411 struct bio_vec *bv;
412 int i;
413
414 bio_for_each_segment_all(bv, bio, i) {
415 struct page *page = bv->bv_page;
416 int ret = fscrypt_decrypt_page(page);
417
418 if (ret) {
419 WARN_ON_ONCE(1);
420 SetPageError(page);
421 } else {
422 SetPageUptodate(page);
423 }
424 unlock_page(page);
425 }
426 fscrypt_release_ctx(ctx);
427 bio_put(bio);
428}
429
430void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
431{
432 INIT_WORK(&ctx->r.work, completion_pages);
433 ctx->r.bio = bio;
434 queue_work(fscrypt_read_workqueue, &ctx->r.work);
435}
436EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
437
438void fscrypt_pullback_bio_page(struct page **page, bool restore)
439{
440 struct fscrypt_ctx *ctx;
441 struct page *bounce_page;
442
443 /* The bounce data pages are unmapped. */
444 if ((*page)->mapping)
445 return;
446
447 /* The bounce data page is unmapped. */
448 bounce_page = *page;
449 ctx = (struct fscrypt_ctx *)page_private(bounce_page);
450
451 /* restore control page */
452 *page = ctx->w.control_page;
453
454 if (restore)
455 fscrypt_restore_control_page(bounce_page);
456}
457EXPORT_SYMBOL(fscrypt_pullback_bio_page);
458
459void fscrypt_restore_control_page(struct page *page)
460{
461 struct fscrypt_ctx *ctx;
462
463 ctx = (struct fscrypt_ctx *)page_private(page);
464 set_page_private(page, (unsigned long)NULL);
465 ClearPagePrivate(page);
466 unlock_page(page);
467 fscrypt_release_ctx(ctx);
468}
469EXPORT_SYMBOL(fscrypt_restore_control_page);
470
471static void fscrypt_destroy(void)
472{
473 struct fscrypt_ctx *pos, *n;
474
475 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
476 kmem_cache_free(fscrypt_ctx_cachep, pos);
477 INIT_LIST_HEAD(&fscrypt_free_ctxs);
478 mempool_destroy(fscrypt_bounce_page_pool);
479 fscrypt_bounce_page_pool = NULL;
480}
481
482/**
483 * fscrypt_initialize() - allocate major buffers for fs encryption.
484 *
485 * We only call this when we start accessing encrypted files, since it
486 * results in memory getting allocated that wouldn't otherwise be used.
487 *
488 * Return: Zero on success, non-zero otherwise.
489 */
490int fscrypt_initialize(void)
491{
492 int i, res = -ENOMEM;
493
494 if (fscrypt_bounce_page_pool)
495 return 0;
496
497 mutex_lock(&fscrypt_init_mutex);
498 if (fscrypt_bounce_page_pool)
499 goto already_initialized;
500
501 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
502 struct fscrypt_ctx *ctx;
503
504 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
505 if (!ctx)
506 goto fail;
507 list_add(&ctx->free_list, &fscrypt_free_ctxs);
508 }
509
510 fscrypt_bounce_page_pool =
511 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
512 if (!fscrypt_bounce_page_pool)
513 goto fail;
514
515already_initialized:
516 mutex_unlock(&fscrypt_init_mutex);
517 return 0;
518fail:
519 fscrypt_destroy();
520 mutex_unlock(&fscrypt_init_mutex);
521 return res;
522}
523EXPORT_SYMBOL(fscrypt_initialize);
524
525/**
526 * fscrypt_init() - Set up for fs encryption.
527 */
528static int __init fscrypt_init(void)
529{
530 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
531 WQ_HIGHPRI, 0);
532 if (!fscrypt_read_workqueue)
533 goto fail;
534
535 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
536 if (!fscrypt_ctx_cachep)
537 goto fail_free_queue;
538
539 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
540 if (!fscrypt_info_cachep)
541 goto fail_free_ctx;
542
543 return 0;
544
545fail_free_ctx:
546 kmem_cache_destroy(fscrypt_ctx_cachep);
547fail_free_queue:
548 destroy_workqueue(fscrypt_read_workqueue);
549fail:
550 return -ENOMEM;
551}
552module_init(fscrypt_init)
553
554/**
555 * fscrypt_exit() - Shutdown the fs encryption system
556 */
557static void __exit fscrypt_exit(void)
558{
559 fscrypt_destroy();
560
561 if (fscrypt_read_workqueue)
562 destroy_workqueue(fscrypt_read_workqueue);
563 kmem_cache_destroy(fscrypt_ctx_cachep);
564 kmem_cache_destroy(fscrypt_info_cachep);
565}
566module_exit(fscrypt_exit);
567
568MODULE_LICENSE("GPL");
1/*
2 * This contains encryption functions for per-file encryption.
3 *
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
6 *
7 * Written by Michael Halcrow, 2014.
8 *
9 * Filename encryption additions
10 * Uday Savagaonkar, 2014
11 * Encryption policy handling additions
12 * Ildar Muslukhov, 2014
13 * Add fscrypt_pullback_bio_page()
14 * Jaegeuk Kim, 2015.
15 *
16 * This has not yet undergone a rigorous security audit.
17 *
18 * The usage of AES-XTS should conform to recommendations in NIST
19 * Special Publication 800-38E and IEEE P1619/D16.
20 */
21
22#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
27#include <linux/bio.h>
28#include <linux/dcache.h>
29#include <linux/namei.h>
30#include "fscrypt_private.h"
31
32static unsigned int num_prealloc_crypto_pages = 32;
33static unsigned int num_prealloc_crypto_ctxs = 128;
34
35module_param(num_prealloc_crypto_pages, uint, 0444);
36MODULE_PARM_DESC(num_prealloc_crypto_pages,
37 "Number of crypto pages to preallocate");
38module_param(num_prealloc_crypto_ctxs, uint, 0444);
39MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
40 "Number of crypto contexts to preallocate");
41
42static mempool_t *fscrypt_bounce_page_pool = NULL;
43
44static LIST_HEAD(fscrypt_free_ctxs);
45static DEFINE_SPINLOCK(fscrypt_ctx_lock);
46
47static struct workqueue_struct *fscrypt_read_workqueue;
48static DEFINE_MUTEX(fscrypt_init_mutex);
49
50static struct kmem_cache *fscrypt_ctx_cachep;
51struct kmem_cache *fscrypt_info_cachep;
52
53/**
54 * fscrypt_release_ctx() - Releases an encryption context
55 * @ctx: The encryption context to release.
56 *
57 * If the encryption context was allocated from the pre-allocated pool, returns
58 * it to that pool. Else, frees it.
59 *
60 * If there's a bounce page in the context, this frees that.
61 */
62void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
63{
64 unsigned long flags;
65
66 if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
67 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
68 ctx->w.bounce_page = NULL;
69 }
70 ctx->w.control_page = NULL;
71 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
72 kmem_cache_free(fscrypt_ctx_cachep, ctx);
73 } else {
74 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
75 list_add(&ctx->free_list, &fscrypt_free_ctxs);
76 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
77 }
78}
79EXPORT_SYMBOL(fscrypt_release_ctx);
80
81/**
82 * fscrypt_get_ctx() - Gets an encryption context
83 * @inode: The inode for which we are doing the crypto
84 * @gfp_flags: The gfp flag for memory allocation
85 *
86 * Allocates and initializes an encryption context.
87 *
88 * Return: An allocated and initialized encryption context on success; error
89 * value or NULL otherwise.
90 */
91struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
92{
93 struct fscrypt_ctx *ctx = NULL;
94 struct fscrypt_info *ci = inode->i_crypt_info;
95 unsigned long flags;
96
97 if (ci == NULL)
98 return ERR_PTR(-ENOKEY);
99
100 /*
101 * We first try getting the ctx from a free list because in
102 * the common case the ctx will have an allocated and
103 * initialized crypto tfm, so it's probably a worthwhile
104 * optimization. For the bounce page, we first try getting it
105 * from the kernel allocator because that's just about as fast
106 * as getting it from a list and because a cache of free pages
107 * should generally be a "last resort" option for a filesystem
108 * to be able to do its job.
109 */
110 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
111 ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
112 struct fscrypt_ctx, free_list);
113 if (ctx)
114 list_del(&ctx->free_list);
115 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
116 if (!ctx) {
117 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
118 if (!ctx)
119 return ERR_PTR(-ENOMEM);
120 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
121 } else {
122 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
123 }
124 ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
125 return ctx;
126}
127EXPORT_SYMBOL(fscrypt_get_ctx);
128
129/**
130 * page_crypt_complete() - completion callback for page crypto
131 * @req: The asynchronous cipher request context
132 * @res: The result of the cipher operation
133 */
134static void page_crypt_complete(struct crypto_async_request *req, int res)
135{
136 struct fscrypt_completion_result *ecr = req->data;
137
138 if (res == -EINPROGRESS)
139 return;
140 ecr->res = res;
141 complete(&ecr->completion);
142}
143
144typedef enum {
145 FS_DECRYPT = 0,
146 FS_ENCRYPT,
147} fscrypt_direction_t;
148
149static int do_page_crypto(const struct inode *inode,
150 fscrypt_direction_t rw, u64 lblk_num,
151 struct page *src_page, struct page *dest_page,
152 unsigned int len, unsigned int offs,
153 gfp_t gfp_flags)
154{
155 struct {
156 __le64 index;
157 u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
158 } xts_tweak;
159 struct skcipher_request *req = NULL;
160 DECLARE_FS_COMPLETION_RESULT(ecr);
161 struct scatterlist dst, src;
162 struct fscrypt_info *ci = inode->i_crypt_info;
163 struct crypto_skcipher *tfm = ci->ci_ctfm;
164 int res = 0;
165
166 BUG_ON(len == 0);
167
168 req = skcipher_request_alloc(tfm, gfp_flags);
169 if (!req) {
170 printk_ratelimited(KERN_ERR
171 "%s: crypto_request_alloc() failed\n",
172 __func__);
173 return -ENOMEM;
174 }
175
176 skcipher_request_set_callback(
177 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
178 page_crypt_complete, &ecr);
179
180 BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
181 xts_tweak.index = cpu_to_le64(lblk_num);
182 memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
183
184 sg_init_table(&dst, 1);
185 sg_set_page(&dst, dest_page, len, offs);
186 sg_init_table(&src, 1);
187 sg_set_page(&src, src_page, len, offs);
188 skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
189 if (rw == FS_DECRYPT)
190 res = crypto_skcipher_decrypt(req);
191 else
192 res = crypto_skcipher_encrypt(req);
193 if (res == -EINPROGRESS || res == -EBUSY) {
194 BUG_ON(req->base.data != &ecr);
195 wait_for_completion(&ecr.completion);
196 res = ecr.res;
197 }
198 skcipher_request_free(req);
199 if (res) {
200 printk_ratelimited(KERN_ERR
201 "%s: crypto_skcipher_encrypt() returned %d\n",
202 __func__, res);
203 return res;
204 }
205 return 0;
206}
207
208static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
209{
210 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
211 if (ctx->w.bounce_page == NULL)
212 return ERR_PTR(-ENOMEM);
213 ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
214 return ctx->w.bounce_page;
215}
216
217/**
218 * fscypt_encrypt_page() - Encrypts a page
219 * @inode: The inode for which the encryption should take place
220 * @page: The page to encrypt. Must be locked for bounce-page
221 * encryption.
222 * @len: Length of data to encrypt in @page and encrypted
223 * data in returned page.
224 * @offs: Offset of data within @page and returned
225 * page holding encrypted data.
226 * @lblk_num: Logical block number. This must be unique for multiple
227 * calls with same inode, except when overwriting
228 * previously written data.
229 * @gfp_flags: The gfp flag for memory allocation
230 *
231 * Encrypts @page using the ctx encryption context. Performs encryption
232 * either in-place or into a newly allocated bounce page.
233 * Called on the page write path.
234 *
235 * Bounce page allocation is the default.
236 * In this case, the contents of @page are encrypted and stored in an
237 * allocated bounce page. @page has to be locked and the caller must call
238 * fscrypt_restore_control_page() on the returned ciphertext page to
239 * release the bounce buffer and the encryption context.
240 *
241 * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
242 * fscrypt_operations. Here, the input-page is returned with its content
243 * encrypted.
244 *
245 * Return: A page with the encrypted content on success. Else, an
246 * error value or NULL.
247 */
248struct page *fscrypt_encrypt_page(const struct inode *inode,
249 struct page *page,
250 unsigned int len,
251 unsigned int offs,
252 u64 lblk_num, gfp_t gfp_flags)
253
254{
255 struct fscrypt_ctx *ctx;
256 struct page *ciphertext_page = page;
257 int err;
258
259 BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
260
261 if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
262 /* with inplace-encryption we just encrypt the page */
263 err = do_page_crypto(inode, FS_ENCRYPT, lblk_num,
264 page, ciphertext_page,
265 len, offs, gfp_flags);
266 if (err)
267 return ERR_PTR(err);
268
269 return ciphertext_page;
270 }
271
272 BUG_ON(!PageLocked(page));
273
274 ctx = fscrypt_get_ctx(inode, gfp_flags);
275 if (IS_ERR(ctx))
276 return (struct page *)ctx;
277
278 /* The encryption operation will require a bounce page. */
279 ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
280 if (IS_ERR(ciphertext_page))
281 goto errout;
282
283 ctx->w.control_page = page;
284 err = do_page_crypto(inode, FS_ENCRYPT, lblk_num,
285 page, ciphertext_page,
286 len, offs, gfp_flags);
287 if (err) {
288 ciphertext_page = ERR_PTR(err);
289 goto errout;
290 }
291 SetPagePrivate(ciphertext_page);
292 set_page_private(ciphertext_page, (unsigned long)ctx);
293 lock_page(ciphertext_page);
294 return ciphertext_page;
295
296errout:
297 fscrypt_release_ctx(ctx);
298 return ciphertext_page;
299}
300EXPORT_SYMBOL(fscrypt_encrypt_page);
301
302/**
303 * fscrypt_decrypt_page() - Decrypts a page in-place
304 * @inode: The corresponding inode for the page to decrypt.
305 * @page: The page to decrypt. Must be locked in case
306 * it is a writeback page (FS_CFLG_OWN_PAGES unset).
307 * @len: Number of bytes in @page to be decrypted.
308 * @offs: Start of data in @page.
309 * @lblk_num: Logical block number.
310 *
311 * Decrypts page in-place using the ctx encryption context.
312 *
313 * Called from the read completion callback.
314 *
315 * Return: Zero on success, non-zero otherwise.
316 */
317int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
318 unsigned int len, unsigned int offs, u64 lblk_num)
319{
320 if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
321 BUG_ON(!PageLocked(page));
322
323 return do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, len,
324 offs, GFP_NOFS);
325}
326EXPORT_SYMBOL(fscrypt_decrypt_page);
327
328int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
329 sector_t pblk, unsigned int len)
330{
331 struct fscrypt_ctx *ctx;
332 struct page *ciphertext_page = NULL;
333 struct bio *bio;
334 int ret, err = 0;
335
336 BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
337
338 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
339 if (IS_ERR(ctx))
340 return PTR_ERR(ctx);
341
342 ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
343 if (IS_ERR(ciphertext_page)) {
344 err = PTR_ERR(ciphertext_page);
345 goto errout;
346 }
347
348 while (len--) {
349 err = do_page_crypto(inode, FS_ENCRYPT, lblk,
350 ZERO_PAGE(0), ciphertext_page,
351 PAGE_SIZE, 0, GFP_NOFS);
352 if (err)
353 goto errout;
354
355 bio = bio_alloc(GFP_NOWAIT, 1);
356 if (!bio) {
357 err = -ENOMEM;
358 goto errout;
359 }
360 bio->bi_bdev = inode->i_sb->s_bdev;
361 bio->bi_iter.bi_sector =
362 pblk << (inode->i_sb->s_blocksize_bits - 9);
363 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
364 ret = bio_add_page(bio, ciphertext_page,
365 inode->i_sb->s_blocksize, 0);
366 if (ret != inode->i_sb->s_blocksize) {
367 /* should never happen! */
368 WARN_ON(1);
369 bio_put(bio);
370 err = -EIO;
371 goto errout;
372 }
373 err = submit_bio_wait(bio);
374 if ((err == 0) && bio->bi_error)
375 err = -EIO;
376 bio_put(bio);
377 if (err)
378 goto errout;
379 lblk++;
380 pblk++;
381 }
382 err = 0;
383errout:
384 fscrypt_release_ctx(ctx);
385 return err;
386}
387EXPORT_SYMBOL(fscrypt_zeroout_range);
388
389/*
390 * Validate dentries for encrypted directories to make sure we aren't
391 * potentially caching stale data after a key has been added or
392 * removed.
393 */
394static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
395{
396 struct dentry *dir;
397 int dir_has_key, cached_with_key;
398
399 if (flags & LOOKUP_RCU)
400 return -ECHILD;
401
402 dir = dget_parent(dentry);
403 if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
404 dput(dir);
405 return 0;
406 }
407
408 /* this should eventually be an flag in d_flags */
409 spin_lock(&dentry->d_lock);
410 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
411 spin_unlock(&dentry->d_lock);
412 dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
413 dput(dir);
414
415 /*
416 * If the dentry was cached without the key, and it is a
417 * negative dentry, it might be a valid name. We can't check
418 * if the key has since been made available due to locking
419 * reasons, so we fail the validation so ext4_lookup() can do
420 * this check.
421 *
422 * We also fail the validation if the dentry was created with
423 * the key present, but we no longer have the key, or vice versa.
424 */
425 if ((!cached_with_key && d_is_negative(dentry)) ||
426 (!cached_with_key && dir_has_key) ||
427 (cached_with_key && !dir_has_key))
428 return 0;
429 return 1;
430}
431
432const struct dentry_operations fscrypt_d_ops = {
433 .d_revalidate = fscrypt_d_revalidate,
434};
435EXPORT_SYMBOL(fscrypt_d_ops);
436
437/*
438 * Call fscrypt_decrypt_page on every single page, reusing the encryption
439 * context.
440 */
441static void completion_pages(struct work_struct *work)
442{
443 struct fscrypt_ctx *ctx =
444 container_of(work, struct fscrypt_ctx, r.work);
445 struct bio *bio = ctx->r.bio;
446 struct bio_vec *bv;
447 int i;
448
449 bio_for_each_segment_all(bv, bio, i) {
450 struct page *page = bv->bv_page;
451 int ret = fscrypt_decrypt_page(page->mapping->host, page,
452 PAGE_SIZE, 0, page->index);
453
454 if (ret) {
455 WARN_ON_ONCE(1);
456 SetPageError(page);
457 } else {
458 SetPageUptodate(page);
459 }
460 unlock_page(page);
461 }
462 fscrypt_release_ctx(ctx);
463 bio_put(bio);
464}
465
466void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
467{
468 INIT_WORK(&ctx->r.work, completion_pages);
469 ctx->r.bio = bio;
470 queue_work(fscrypt_read_workqueue, &ctx->r.work);
471}
472EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
473
474void fscrypt_pullback_bio_page(struct page **page, bool restore)
475{
476 struct fscrypt_ctx *ctx;
477 struct page *bounce_page;
478
479 /* The bounce data pages are unmapped. */
480 if ((*page)->mapping)
481 return;
482
483 /* The bounce data page is unmapped. */
484 bounce_page = *page;
485 ctx = (struct fscrypt_ctx *)page_private(bounce_page);
486
487 /* restore control page */
488 *page = ctx->w.control_page;
489
490 if (restore)
491 fscrypt_restore_control_page(bounce_page);
492}
493EXPORT_SYMBOL(fscrypt_pullback_bio_page);
494
495void fscrypt_restore_control_page(struct page *page)
496{
497 struct fscrypt_ctx *ctx;
498
499 ctx = (struct fscrypt_ctx *)page_private(page);
500 set_page_private(page, (unsigned long)NULL);
501 ClearPagePrivate(page);
502 unlock_page(page);
503 fscrypt_release_ctx(ctx);
504}
505EXPORT_SYMBOL(fscrypt_restore_control_page);
506
507static void fscrypt_destroy(void)
508{
509 struct fscrypt_ctx *pos, *n;
510
511 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
512 kmem_cache_free(fscrypt_ctx_cachep, pos);
513 INIT_LIST_HEAD(&fscrypt_free_ctxs);
514 mempool_destroy(fscrypt_bounce_page_pool);
515 fscrypt_bounce_page_pool = NULL;
516}
517
518/**
519 * fscrypt_initialize() - allocate major buffers for fs encryption.
520 * @cop_flags: fscrypt operations flags
521 *
522 * We only call this when we start accessing encrypted files, since it
523 * results in memory getting allocated that wouldn't otherwise be used.
524 *
525 * Return: Zero on success, non-zero otherwise.
526 */
527int fscrypt_initialize(unsigned int cop_flags)
528{
529 int i, res = -ENOMEM;
530
531 /*
532 * No need to allocate a bounce page pool if there already is one or
533 * this FS won't use it.
534 */
535 if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool)
536 return 0;
537
538 mutex_lock(&fscrypt_init_mutex);
539 if (fscrypt_bounce_page_pool)
540 goto already_initialized;
541
542 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
543 struct fscrypt_ctx *ctx;
544
545 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
546 if (!ctx)
547 goto fail;
548 list_add(&ctx->free_list, &fscrypt_free_ctxs);
549 }
550
551 fscrypt_bounce_page_pool =
552 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
553 if (!fscrypt_bounce_page_pool)
554 goto fail;
555
556already_initialized:
557 mutex_unlock(&fscrypt_init_mutex);
558 return 0;
559fail:
560 fscrypt_destroy();
561 mutex_unlock(&fscrypt_init_mutex);
562 return res;
563}
564
565/**
566 * fscrypt_init() - Set up for fs encryption.
567 */
568static int __init fscrypt_init(void)
569{
570 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
571 WQ_HIGHPRI, 0);
572 if (!fscrypt_read_workqueue)
573 goto fail;
574
575 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
576 if (!fscrypt_ctx_cachep)
577 goto fail_free_queue;
578
579 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
580 if (!fscrypt_info_cachep)
581 goto fail_free_ctx;
582
583 return 0;
584
585fail_free_ctx:
586 kmem_cache_destroy(fscrypt_ctx_cachep);
587fail_free_queue:
588 destroy_workqueue(fscrypt_read_workqueue);
589fail:
590 return -ENOMEM;
591}
592module_init(fscrypt_init)
593
594/**
595 * fscrypt_exit() - Shutdown the fs encryption system
596 */
597static void __exit fscrypt_exit(void)
598{
599 fscrypt_destroy();
600
601 if (fscrypt_read_workqueue)
602 destroy_workqueue(fscrypt_read_workqueue);
603 kmem_cache_destroy(fscrypt_ctx_cachep);
604 kmem_cache_destroy(fscrypt_info_cachep);
605}
606module_exit(fscrypt_exit);
607
608MODULE_LICENSE("GPL");