Loading...
1/*
2 * This contains encryption functions for per-file encryption.
3 *
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
6 *
7 * Written by Michael Halcrow, 2014.
8 *
9 * Filename encryption additions
10 * Uday Savagaonkar, 2014
11 * Encryption policy handling additions
12 * Ildar Muslukhov, 2014
13 * Add fscrypt_pullback_bio_page()
14 * Jaegeuk Kim, 2015.
15 *
16 * This has not yet undergone a rigorous security audit.
17 *
18 * The usage of AES-XTS should conform to recommendations in NIST
19 * Special Publication 800-38E and IEEE P1619/D16.
20 */
21
22#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
27#include <linux/dcache.h>
28#include <linux/namei.h>
29#include <crypto/aes.h>
30#include <crypto/skcipher.h>
31#include "fscrypt_private.h"
32
33static unsigned int num_prealloc_crypto_pages = 32;
34static unsigned int num_prealloc_crypto_ctxs = 128;
35
36module_param(num_prealloc_crypto_pages, uint, 0444);
37MODULE_PARM_DESC(num_prealloc_crypto_pages,
38 "Number of crypto pages to preallocate");
39module_param(num_prealloc_crypto_ctxs, uint, 0444);
40MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
41 "Number of crypto contexts to preallocate");
42
43static mempool_t *fscrypt_bounce_page_pool = NULL;
44
45static LIST_HEAD(fscrypt_free_ctxs);
46static DEFINE_SPINLOCK(fscrypt_ctx_lock);
47
48struct workqueue_struct *fscrypt_read_workqueue;
49static DEFINE_MUTEX(fscrypt_init_mutex);
50
51static struct kmem_cache *fscrypt_ctx_cachep;
52struct kmem_cache *fscrypt_info_cachep;
53
54/**
55 * fscrypt_release_ctx() - Releases an encryption context
56 * @ctx: The encryption context to release.
57 *
58 * If the encryption context was allocated from the pre-allocated pool, returns
59 * it to that pool. Else, frees it.
60 *
61 * If there's a bounce page in the context, this frees that.
62 */
63void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
64{
65 unsigned long flags;
66
67 if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
68 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
69 ctx->w.bounce_page = NULL;
70 }
71 ctx->w.control_page = NULL;
72 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
73 kmem_cache_free(fscrypt_ctx_cachep, ctx);
74 } else {
75 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
76 list_add(&ctx->free_list, &fscrypt_free_ctxs);
77 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
78 }
79}
80EXPORT_SYMBOL(fscrypt_release_ctx);
81
82/**
83 * fscrypt_get_ctx() - Gets an encryption context
84 * @inode: The inode for which we are doing the crypto
85 * @gfp_flags: The gfp flag for memory allocation
86 *
87 * Allocates and initializes an encryption context.
88 *
89 * Return: An allocated and initialized encryption context on success; error
90 * value or NULL otherwise.
91 */
92struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
93{
94 struct fscrypt_ctx *ctx = NULL;
95 struct fscrypt_info *ci = inode->i_crypt_info;
96 unsigned long flags;
97
98 if (ci == NULL)
99 return ERR_PTR(-ENOKEY);
100
101 /*
102 * We first try getting the ctx from a free list because in
103 * the common case the ctx will have an allocated and
104 * initialized crypto tfm, so it's probably a worthwhile
105 * optimization. For the bounce page, we first try getting it
106 * from the kernel allocator because that's just about as fast
107 * as getting it from a list and because a cache of free pages
108 * should generally be a "last resort" option for a filesystem
109 * to be able to do its job.
110 */
111 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
112 ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
113 struct fscrypt_ctx, free_list);
114 if (ctx)
115 list_del(&ctx->free_list);
116 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
117 if (!ctx) {
118 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
119 if (!ctx)
120 return ERR_PTR(-ENOMEM);
121 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
122 } else {
123 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
124 }
125 ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
126 return ctx;
127}
128EXPORT_SYMBOL(fscrypt_get_ctx);
129
130int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
131 u64 lblk_num, struct page *src_page,
132 struct page *dest_page, unsigned int len,
133 unsigned int offs, gfp_t gfp_flags)
134{
135 struct {
136 __le64 index;
137 u8 padding[FS_IV_SIZE - sizeof(__le64)];
138 } iv;
139 struct skcipher_request *req = NULL;
140 DECLARE_CRYPTO_WAIT(wait);
141 struct scatterlist dst, src;
142 struct fscrypt_info *ci = inode->i_crypt_info;
143 struct crypto_skcipher *tfm = ci->ci_ctfm;
144 int res = 0;
145
146 BUG_ON(len == 0);
147
148 BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
149 BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
150 iv.index = cpu_to_le64(lblk_num);
151 memset(iv.padding, 0, sizeof(iv.padding));
152
153 if (ci->ci_essiv_tfm != NULL) {
154 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
155 (u8 *)&iv);
156 }
157
158 req = skcipher_request_alloc(tfm, gfp_flags);
159 if (!req) {
160 printk_ratelimited(KERN_ERR
161 "%s: crypto_request_alloc() failed\n",
162 __func__);
163 return -ENOMEM;
164 }
165
166 skcipher_request_set_callback(
167 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
168 crypto_req_done, &wait);
169
170 sg_init_table(&dst, 1);
171 sg_set_page(&dst, dest_page, len, offs);
172 sg_init_table(&src, 1);
173 sg_set_page(&src, src_page, len, offs);
174 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
175 if (rw == FS_DECRYPT)
176 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
177 else
178 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
179 skcipher_request_free(req);
180 if (res) {
181 printk_ratelimited(KERN_ERR
182 "%s: crypto_skcipher_encrypt() returned %d\n",
183 __func__, res);
184 return res;
185 }
186 return 0;
187}
188
189struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
190 gfp_t gfp_flags)
191{
192 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
193 if (ctx->w.bounce_page == NULL)
194 return ERR_PTR(-ENOMEM);
195 ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
196 return ctx->w.bounce_page;
197}
198
199/**
200 * fscypt_encrypt_page() - Encrypts a page
201 * @inode: The inode for which the encryption should take place
202 * @page: The page to encrypt. Must be locked for bounce-page
203 * encryption.
204 * @len: Length of data to encrypt in @page and encrypted
205 * data in returned page.
206 * @offs: Offset of data within @page and returned
207 * page holding encrypted data.
208 * @lblk_num: Logical block number. This must be unique for multiple
209 * calls with same inode, except when overwriting
210 * previously written data.
211 * @gfp_flags: The gfp flag for memory allocation
212 *
213 * Encrypts @page using the ctx encryption context. Performs encryption
214 * either in-place or into a newly allocated bounce page.
215 * Called on the page write path.
216 *
217 * Bounce page allocation is the default.
218 * In this case, the contents of @page are encrypted and stored in an
219 * allocated bounce page. @page has to be locked and the caller must call
220 * fscrypt_restore_control_page() on the returned ciphertext page to
221 * release the bounce buffer and the encryption context.
222 *
223 * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
224 * fscrypt_operations. Here, the input-page is returned with its content
225 * encrypted.
226 *
227 * Return: A page with the encrypted content on success. Else, an
228 * error value or NULL.
229 */
230struct page *fscrypt_encrypt_page(const struct inode *inode,
231 struct page *page,
232 unsigned int len,
233 unsigned int offs,
234 u64 lblk_num, gfp_t gfp_flags)
235
236{
237 struct fscrypt_ctx *ctx;
238 struct page *ciphertext_page = page;
239 int err;
240
241 BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
242
243 if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
244 /* with inplace-encryption we just encrypt the page */
245 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
246 ciphertext_page, len, offs,
247 gfp_flags);
248 if (err)
249 return ERR_PTR(err);
250
251 return ciphertext_page;
252 }
253
254 BUG_ON(!PageLocked(page));
255
256 ctx = fscrypt_get_ctx(inode, gfp_flags);
257 if (IS_ERR(ctx))
258 return (struct page *)ctx;
259
260 /* The encryption operation will require a bounce page. */
261 ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
262 if (IS_ERR(ciphertext_page))
263 goto errout;
264
265 ctx->w.control_page = page;
266 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
267 page, ciphertext_page, len, offs,
268 gfp_flags);
269 if (err) {
270 ciphertext_page = ERR_PTR(err);
271 goto errout;
272 }
273 SetPagePrivate(ciphertext_page);
274 set_page_private(ciphertext_page, (unsigned long)ctx);
275 lock_page(ciphertext_page);
276 return ciphertext_page;
277
278errout:
279 fscrypt_release_ctx(ctx);
280 return ciphertext_page;
281}
282EXPORT_SYMBOL(fscrypt_encrypt_page);
283
284/**
285 * fscrypt_decrypt_page() - Decrypts a page in-place
286 * @inode: The corresponding inode for the page to decrypt.
287 * @page: The page to decrypt. Must be locked in case
288 * it is a writeback page (FS_CFLG_OWN_PAGES unset).
289 * @len: Number of bytes in @page to be decrypted.
290 * @offs: Start of data in @page.
291 * @lblk_num: Logical block number.
292 *
293 * Decrypts page in-place using the ctx encryption context.
294 *
295 * Called from the read completion callback.
296 *
297 * Return: Zero on success, non-zero otherwise.
298 */
299int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
300 unsigned int len, unsigned int offs, u64 lblk_num)
301{
302 if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
303 BUG_ON(!PageLocked(page));
304
305 return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
306 len, offs, GFP_NOFS);
307}
308EXPORT_SYMBOL(fscrypt_decrypt_page);
309
310/*
311 * Validate dentries for encrypted directories to make sure we aren't
312 * potentially caching stale data after a key has been added or
313 * removed.
314 */
315static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
316{
317 struct dentry *dir;
318 int dir_has_key, cached_with_key;
319
320 if (flags & LOOKUP_RCU)
321 return -ECHILD;
322
323 dir = dget_parent(dentry);
324 if (!IS_ENCRYPTED(d_inode(dir))) {
325 dput(dir);
326 return 0;
327 }
328
329 /* this should eventually be an flag in d_flags */
330 spin_lock(&dentry->d_lock);
331 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
332 spin_unlock(&dentry->d_lock);
333 dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
334 dput(dir);
335
336 /*
337 * If the dentry was cached without the key, and it is a
338 * negative dentry, it might be a valid name. We can't check
339 * if the key has since been made available due to locking
340 * reasons, so we fail the validation so ext4_lookup() can do
341 * this check.
342 *
343 * We also fail the validation if the dentry was created with
344 * the key present, but we no longer have the key, or vice versa.
345 */
346 if ((!cached_with_key && d_is_negative(dentry)) ||
347 (!cached_with_key && dir_has_key) ||
348 (cached_with_key && !dir_has_key))
349 return 0;
350 return 1;
351}
352
353const struct dentry_operations fscrypt_d_ops = {
354 .d_revalidate = fscrypt_d_revalidate,
355};
356EXPORT_SYMBOL(fscrypt_d_ops);
357
358void fscrypt_restore_control_page(struct page *page)
359{
360 struct fscrypt_ctx *ctx;
361
362 ctx = (struct fscrypt_ctx *)page_private(page);
363 set_page_private(page, (unsigned long)NULL);
364 ClearPagePrivate(page);
365 unlock_page(page);
366 fscrypt_release_ctx(ctx);
367}
368EXPORT_SYMBOL(fscrypt_restore_control_page);
369
370static void fscrypt_destroy(void)
371{
372 struct fscrypt_ctx *pos, *n;
373
374 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
375 kmem_cache_free(fscrypt_ctx_cachep, pos);
376 INIT_LIST_HEAD(&fscrypt_free_ctxs);
377 mempool_destroy(fscrypt_bounce_page_pool);
378 fscrypt_bounce_page_pool = NULL;
379}
380
381/**
382 * fscrypt_initialize() - allocate major buffers for fs encryption.
383 * @cop_flags: fscrypt operations flags
384 *
385 * We only call this when we start accessing encrypted files, since it
386 * results in memory getting allocated that wouldn't otherwise be used.
387 *
388 * Return: Zero on success, non-zero otherwise.
389 */
390int fscrypt_initialize(unsigned int cop_flags)
391{
392 int i, res = -ENOMEM;
393
394 /* No need to allocate a bounce page pool if this FS won't use it. */
395 if (cop_flags & FS_CFLG_OWN_PAGES)
396 return 0;
397
398 mutex_lock(&fscrypt_init_mutex);
399 if (fscrypt_bounce_page_pool)
400 goto already_initialized;
401
402 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
403 struct fscrypt_ctx *ctx;
404
405 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
406 if (!ctx)
407 goto fail;
408 list_add(&ctx->free_list, &fscrypt_free_ctxs);
409 }
410
411 fscrypt_bounce_page_pool =
412 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
413 if (!fscrypt_bounce_page_pool)
414 goto fail;
415
416already_initialized:
417 mutex_unlock(&fscrypt_init_mutex);
418 return 0;
419fail:
420 fscrypt_destroy();
421 mutex_unlock(&fscrypt_init_mutex);
422 return res;
423}
424
425/**
426 * fscrypt_init() - Set up for fs encryption.
427 */
428static int __init fscrypt_init(void)
429{
430 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
431 WQ_HIGHPRI, 0);
432 if (!fscrypt_read_workqueue)
433 goto fail;
434
435 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
436 if (!fscrypt_ctx_cachep)
437 goto fail_free_queue;
438
439 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
440 if (!fscrypt_info_cachep)
441 goto fail_free_ctx;
442
443 return 0;
444
445fail_free_ctx:
446 kmem_cache_destroy(fscrypt_ctx_cachep);
447fail_free_queue:
448 destroy_workqueue(fscrypt_read_workqueue);
449fail:
450 return -ENOMEM;
451}
452module_init(fscrypt_init)
453
454/**
455 * fscrypt_exit() - Shutdown the fs encryption system
456 */
457static void __exit fscrypt_exit(void)
458{
459 fscrypt_destroy();
460
461 if (fscrypt_read_workqueue)
462 destroy_workqueue(fscrypt_read_workqueue);
463 kmem_cache_destroy(fscrypt_ctx_cachep);
464 kmem_cache_destroy(fscrypt_info_cachep);
465
466 fscrypt_essiv_cleanup();
467}
468module_exit(fscrypt_exit);
469
470MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This contains encryption functions for per-file encryption.
4 *
5 * Copyright (C) 2015, Google, Inc.
6 * Copyright (C) 2015, Motorola Mobility
7 *
8 * Written by Michael Halcrow, 2014.
9 *
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
14 * Add fscrypt_pullback_bio_page()
15 * Jaegeuk Kim, 2015.
16 *
17 * This has not yet undergone a rigorous security audit.
18 *
19 * The usage of AES-XTS should conform to recommendations in NIST
20 * Special Publication 800-38E and IEEE P1619/D16.
21 */
22
23#include <linux/pagemap.h>
24#include <linux/mempool.h>
25#include <linux/module.h>
26#include <linux/scatterlist.h>
27#include <linux/ratelimit.h>
28#include <crypto/skcipher.h>
29#include "fscrypt_private.h"
30
31static unsigned int num_prealloc_crypto_pages = 32;
32
33module_param(num_prealloc_crypto_pages, uint, 0444);
34MODULE_PARM_DESC(num_prealloc_crypto_pages,
35 "Number of crypto pages to preallocate");
36
37static mempool_t *fscrypt_bounce_page_pool = NULL;
38
39static struct workqueue_struct *fscrypt_read_workqueue;
40static DEFINE_MUTEX(fscrypt_init_mutex);
41
42struct kmem_cache *fscrypt_info_cachep;
43
44void fscrypt_enqueue_decrypt_work(struct work_struct *work)
45{
46 queue_work(fscrypt_read_workqueue, work);
47}
48EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
49
50struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
51{
52 return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
53}
54
55/**
56 * fscrypt_free_bounce_page() - free a ciphertext bounce page
57 * @bounce_page: the bounce page to free, or NULL
58 *
59 * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
60 * or by fscrypt_alloc_bounce_page() directly.
61 */
62void fscrypt_free_bounce_page(struct page *bounce_page)
63{
64 if (!bounce_page)
65 return;
66 set_page_private(bounce_page, (unsigned long)NULL);
67 ClearPagePrivate(bounce_page);
68 mempool_free(bounce_page, fscrypt_bounce_page_pool);
69}
70EXPORT_SYMBOL(fscrypt_free_bounce_page);
71
72/*
73 * Generate the IV for the given logical block number within the given file.
74 * For filenames encryption, lblk_num == 0.
75 *
76 * Keep this in sync with fscrypt_limit_io_blocks(). fscrypt_limit_io_blocks()
77 * needs to know about any IV generation methods where the low bits of IV don't
78 * simply contain the lblk_num (e.g., IV_INO_LBLK_32).
79 */
80void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
81 const struct fscrypt_info *ci)
82{
83 u8 flags = fscrypt_policy_flags(&ci->ci_policy);
84
85 memset(iv, 0, ci->ci_mode->ivsize);
86
87 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
88 WARN_ON_ONCE(lblk_num > U32_MAX);
89 WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
90 lblk_num |= (u64)ci->ci_inode->i_ino << 32;
91 } else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
92 WARN_ON_ONCE(lblk_num > U32_MAX);
93 lblk_num = (u32)(ci->ci_hashed_ino + lblk_num);
94 } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
95 memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
96 }
97 iv->lblk_num = cpu_to_le64(lblk_num);
98}
99
100/* Encrypt or decrypt a single filesystem block of file contents */
101int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
102 u64 lblk_num, struct page *src_page,
103 struct page *dest_page, unsigned int len,
104 unsigned int offs, gfp_t gfp_flags)
105{
106 union fscrypt_iv iv;
107 struct skcipher_request *req = NULL;
108 DECLARE_CRYPTO_WAIT(wait);
109 struct scatterlist dst, src;
110 struct fscrypt_info *ci = inode->i_crypt_info;
111 struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
112 int res = 0;
113
114 if (WARN_ON_ONCE(len <= 0))
115 return -EINVAL;
116 if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
117 return -EINVAL;
118
119 fscrypt_generate_iv(&iv, lblk_num, ci);
120
121 req = skcipher_request_alloc(tfm, gfp_flags);
122 if (!req)
123 return -ENOMEM;
124
125 skcipher_request_set_callback(
126 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
127 crypto_req_done, &wait);
128
129 sg_init_table(&dst, 1);
130 sg_set_page(&dst, dest_page, len, offs);
131 sg_init_table(&src, 1);
132 sg_set_page(&src, src_page, len, offs);
133 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
134 if (rw == FS_DECRYPT)
135 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
136 else
137 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
138 skcipher_request_free(req);
139 if (res) {
140 fscrypt_err(inode, "%scryption failed for block %llu: %d",
141 (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
142 return res;
143 }
144 return 0;
145}
146
147/**
148 * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a
149 * pagecache page
150 * @page: The locked pagecache page containing the block(s) to encrypt
151 * @len: Total size of the block(s) to encrypt. Must be a nonzero
152 * multiple of the filesystem's block size.
153 * @offs: Byte offset within @page of the first block to encrypt. Must be
154 * a multiple of the filesystem's block size.
155 * @gfp_flags: Memory allocation flags. See details below.
156 *
157 * A new bounce page is allocated, and the specified block(s) are encrypted into
158 * it. In the bounce page, the ciphertext block(s) will be located at the same
159 * offsets at which the plaintext block(s) were located in the source page; any
160 * other parts of the bounce page will be left uninitialized. However, normally
161 * blocksize == PAGE_SIZE and the whole page is encrypted at once.
162 *
163 * This is for use by the filesystem's ->writepages() method.
164 *
165 * The bounce page allocation is mempool-backed, so it will always succeed when
166 * @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS. However,
167 * only the first page of each bio can be allocated this way. To prevent
168 * deadlocks, for any additional pages a mask like GFP_NOWAIT must be used.
169 *
170 * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
171 */
172struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
173 unsigned int len,
174 unsigned int offs,
175 gfp_t gfp_flags)
176
177{
178 const struct inode *inode = page->mapping->host;
179 const unsigned int blockbits = inode->i_blkbits;
180 const unsigned int blocksize = 1 << blockbits;
181 struct page *ciphertext_page;
182 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
183 (offs >> blockbits);
184 unsigned int i;
185 int err;
186
187 if (WARN_ON_ONCE(!PageLocked(page)))
188 return ERR_PTR(-EINVAL);
189
190 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
191 return ERR_PTR(-EINVAL);
192
193 ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
194 if (!ciphertext_page)
195 return ERR_PTR(-ENOMEM);
196
197 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
198 err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
199 page, ciphertext_page,
200 blocksize, i, gfp_flags);
201 if (err) {
202 fscrypt_free_bounce_page(ciphertext_page);
203 return ERR_PTR(err);
204 }
205 }
206 SetPagePrivate(ciphertext_page);
207 set_page_private(ciphertext_page, (unsigned long)page);
208 return ciphertext_page;
209}
210EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
211
212/**
213 * fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
214 * @inode: The inode to which this block belongs
215 * @page: The page containing the block to encrypt
216 * @len: Size of block to encrypt. This must be a multiple of
217 * FSCRYPT_CONTENTS_ALIGNMENT.
218 * @offs: Byte offset within @page at which the block to encrypt begins
219 * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
220 * number of the block within the file
221 * @gfp_flags: Memory allocation flags
222 *
223 * Encrypt a possibly-compressed filesystem block that is located in an
224 * arbitrary page, not necessarily in the original pagecache page. The @inode
225 * and @lblk_num must be specified, as they can't be determined from @page.
226 *
227 * Return: 0 on success; -errno on failure
228 */
229int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
230 unsigned int len, unsigned int offs,
231 u64 lblk_num, gfp_t gfp_flags)
232{
233 return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
234 len, offs, gfp_flags);
235}
236EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
237
238/**
239 * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a
240 * pagecache page
241 * @page: The locked pagecache page containing the block(s) to decrypt
242 * @len: Total size of the block(s) to decrypt. Must be a nonzero
243 * multiple of the filesystem's block size.
244 * @offs: Byte offset within @page of the first block to decrypt. Must be
245 * a multiple of the filesystem's block size.
246 *
247 * The specified block(s) are decrypted in-place within the pagecache page,
248 * which must still be locked and not uptodate. Normally, blocksize ==
249 * PAGE_SIZE and the whole page is decrypted at once.
250 *
251 * This is for use by the filesystem's ->readahead() method.
252 *
253 * Return: 0 on success; -errno on failure
254 */
255int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
256 unsigned int offs)
257{
258 const struct inode *inode = page->mapping->host;
259 const unsigned int blockbits = inode->i_blkbits;
260 const unsigned int blocksize = 1 << blockbits;
261 u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
262 (offs >> blockbits);
263 unsigned int i;
264 int err;
265
266 if (WARN_ON_ONCE(!PageLocked(page)))
267 return -EINVAL;
268
269 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
270 return -EINVAL;
271
272 for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
273 err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
274 page, blocksize, i, GFP_NOFS);
275 if (err)
276 return err;
277 }
278 return 0;
279}
280EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
281
282/**
283 * fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
284 * @inode: The inode to which this block belongs
285 * @page: The page containing the block to decrypt
286 * @len: Size of block to decrypt. This must be a multiple of
287 * FSCRYPT_CONTENTS_ALIGNMENT.
288 * @offs: Byte offset within @page at which the block to decrypt begins
289 * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
290 * number of the block within the file
291 *
292 * Decrypt a possibly-compressed filesystem block that is located in an
293 * arbitrary page, not necessarily in the original pagecache page. The @inode
294 * and @lblk_num must be specified, as they can't be determined from @page.
295 *
296 * Return: 0 on success; -errno on failure
297 */
298int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
299 unsigned int len, unsigned int offs,
300 u64 lblk_num)
301{
302 return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
303 len, offs, GFP_NOFS);
304}
305EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
306
307/**
308 * fscrypt_initialize() - allocate major buffers for fs encryption.
309 * @cop_flags: fscrypt operations flags
310 *
311 * We only call this when we start accessing encrypted files, since it
312 * results in memory getting allocated that wouldn't otherwise be used.
313 *
314 * Return: 0 on success; -errno on failure
315 */
316int fscrypt_initialize(unsigned int cop_flags)
317{
318 int err = 0;
319
320 /* No need to allocate a bounce page pool if this FS won't use it. */
321 if (cop_flags & FS_CFLG_OWN_PAGES)
322 return 0;
323
324 mutex_lock(&fscrypt_init_mutex);
325 if (fscrypt_bounce_page_pool)
326 goto out_unlock;
327
328 err = -ENOMEM;
329 fscrypt_bounce_page_pool =
330 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
331 if (!fscrypt_bounce_page_pool)
332 goto out_unlock;
333
334 err = 0;
335out_unlock:
336 mutex_unlock(&fscrypt_init_mutex);
337 return err;
338}
339
340void fscrypt_msg(const struct inode *inode, const char *level,
341 const char *fmt, ...)
342{
343 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
344 DEFAULT_RATELIMIT_BURST);
345 struct va_format vaf;
346 va_list args;
347
348 if (!__ratelimit(&rs))
349 return;
350
351 va_start(args, fmt);
352 vaf.fmt = fmt;
353 vaf.va = &args;
354 if (inode && inode->i_ino)
355 printk("%sfscrypt (%s, inode %lu): %pV\n",
356 level, inode->i_sb->s_id, inode->i_ino, &vaf);
357 else if (inode)
358 printk("%sfscrypt (%s): %pV\n", level, inode->i_sb->s_id, &vaf);
359 else
360 printk("%sfscrypt: %pV\n", level, &vaf);
361 va_end(args);
362}
363
364/**
365 * fscrypt_init() - Set up for fs encryption.
366 *
367 * Return: 0 on success; -errno on failure
368 */
369static int __init fscrypt_init(void)
370{
371 int err = -ENOMEM;
372
373 /*
374 * Use an unbound workqueue to allow bios to be decrypted in parallel
375 * even when they happen to complete on the same CPU. This sacrifices
376 * locality, but it's worthwhile since decryption is CPU-intensive.
377 *
378 * Also use a high-priority workqueue to prioritize decryption work,
379 * which blocks reads from completing, over regular application tasks.
380 */
381 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
382 WQ_UNBOUND | WQ_HIGHPRI,
383 num_online_cpus());
384 if (!fscrypt_read_workqueue)
385 goto fail;
386
387 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
388 if (!fscrypt_info_cachep)
389 goto fail_free_queue;
390
391 err = fscrypt_init_keyring();
392 if (err)
393 goto fail_free_info;
394
395 return 0;
396
397fail_free_info:
398 kmem_cache_destroy(fscrypt_info_cachep);
399fail_free_queue:
400 destroy_workqueue(fscrypt_read_workqueue);
401fail:
402 return err;
403}
404late_initcall(fscrypt_init)