Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * This contains encryption functions for per-file encryption.
  4 *
  5 * Copyright (C) 2015, Google, Inc.
  6 * Copyright (C) 2015, Motorola Mobility
  7 *
  8 * Written by Michael Halcrow, 2014.
  9 *
 10 * Filename encryption additions
 11 *	Uday Savagaonkar, 2014
 12 * Encryption policy handling additions
 13 *	Ildar Muslukhov, 2014
 14 * Add fscrypt_pullback_bio_page()
 15 *	Jaegeuk Kim, 2015.
 16 *
 17 * This has not yet undergone a rigorous security audit.
 18 *
 19 * The usage of AES-XTS should conform to recommendations in NIST
 20 * Special Publication 800-38E and IEEE P1619/D16.
 21 */
 22
 23#include <linux/pagemap.h>
 24#include <linux/mempool.h>
 25#include <linux/module.h>
 26#include <linux/scatterlist.h>
 27#include <linux/ratelimit.h>
 
 28#include <linux/dcache.h>
 29#include <linux/namei.h>
 30#include <crypto/aes.h>
 31#include <crypto/skcipher.h>
 32#include "fscrypt_private.h"
 33
 34static unsigned int num_prealloc_crypto_pages = 32;
 35static unsigned int num_prealloc_crypto_ctxs = 128;
 36
 37module_param(num_prealloc_crypto_pages, uint, 0444);
 38MODULE_PARM_DESC(num_prealloc_crypto_pages,
 39		"Number of crypto pages to preallocate");
 40module_param(num_prealloc_crypto_ctxs, uint, 0444);
 41MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
 42		"Number of crypto contexts to preallocate");
 43
 44static mempool_t *fscrypt_bounce_page_pool = NULL;
 45
 46static LIST_HEAD(fscrypt_free_ctxs);
 47static DEFINE_SPINLOCK(fscrypt_ctx_lock);
 48
 49static struct workqueue_struct *fscrypt_read_workqueue;
 50static DEFINE_MUTEX(fscrypt_init_mutex);
 51
 52static struct kmem_cache *fscrypt_ctx_cachep;
 53struct kmem_cache *fscrypt_info_cachep;
 54
 55void fscrypt_enqueue_decrypt_work(struct work_struct *work)
 56{
 57	queue_work(fscrypt_read_workqueue, work);
 58}
 59EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
 60
 61/**
 62 * fscrypt_release_ctx() - Release a decryption context
 63 * @ctx: The decryption context to release.
 
 
 
 64 *
 65 * If the decryption context was allocated from the pre-allocated pool, return
 66 * it to that pool.  Else, free it.
 67 */
 68void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
 69{
 70	unsigned long flags;
 71
 
 
 
 
 
 72	if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
 73		kmem_cache_free(fscrypt_ctx_cachep, ctx);
 74	} else {
 75		spin_lock_irqsave(&fscrypt_ctx_lock, flags);
 76		list_add(&ctx->free_list, &fscrypt_free_ctxs);
 77		spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
 78	}
 79}
 80EXPORT_SYMBOL(fscrypt_release_ctx);
 81
 82/**
 83 * fscrypt_get_ctx() - Get a decryption context
 
 84 * @gfp_flags:   The gfp flag for memory allocation
 85 *
 86 * Allocate and initialize a decryption context.
 87 *
 88 * Return: A new decryption context on success; an ERR_PTR() otherwise.
 
 89 */
 90struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
 91{
 92	struct fscrypt_ctx *ctx;
 
 93	unsigned long flags;
 94
 
 
 
 95	/*
 96	 * First try getting a ctx from the free list so that we don't have to
 97	 * call into the slab allocator.
 
 
 
 
 
 
 98	 */
 99	spin_lock_irqsave(&fscrypt_ctx_lock, flags);
100	ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
101					struct fscrypt_ctx, free_list);
102	if (ctx)
103		list_del(&ctx->free_list);
104	spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
105	if (!ctx) {
106		ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
107		if (!ctx)
108			return ERR_PTR(-ENOMEM);
109		ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
110	} else {
111		ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
112	}
 
113	return ctx;
114}
115EXPORT_SYMBOL(fscrypt_get_ctx);
116
117struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
118{
119	return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
120}
121
122/**
123 * fscrypt_free_bounce_page() - free a ciphertext bounce page
124 *
125 * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
126 * or by fscrypt_alloc_bounce_page() directly.
127 */
128void fscrypt_free_bounce_page(struct page *bounce_page)
129{
130	if (!bounce_page)
131		return;
132	set_page_private(bounce_page, (unsigned long)NULL);
133	ClearPagePrivate(bounce_page);
134	mempool_free(bounce_page, fscrypt_bounce_page_pool);
135}
136EXPORT_SYMBOL(fscrypt_free_bounce_page);
137
138void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
139			 const struct fscrypt_info *ci)
140{
141	memset(iv, 0, ci->ci_mode->ivsize);
142	iv->lblk_num = cpu_to_le64(lblk_num);
143
144	if (fscrypt_is_direct_key_policy(&ci->ci_policy))
145		memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
146
147	if (ci->ci_essiv_tfm != NULL)
148		crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
149}
150
151/* Encrypt or decrypt a single filesystem block of file contents */
152int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
153			u64 lblk_num, struct page *src_page,
154			struct page *dest_page, unsigned int len,
155			unsigned int offs, gfp_t gfp_flags)
156{
157	union fscrypt_iv iv;
 
 
 
 
 
 
 
 
158	struct skcipher_request *req = NULL;
159	DECLARE_CRYPTO_WAIT(wait);
160	struct scatterlist dst, src;
161	struct fscrypt_info *ci = inode->i_crypt_info;
162	struct crypto_skcipher *tfm = ci->ci_ctfm;
163	int res = 0;
164
165	if (WARN_ON_ONCE(len <= 0))
166		return -EINVAL;
167	if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))
168		return -EINVAL;
169
170	fscrypt_generate_iv(&iv, lblk_num, ci);
171
172	req = skcipher_request_alloc(tfm, gfp_flags);
173	if (!req)
 
 
 
174		return -ENOMEM;
 
175
176	skcipher_request_set_callback(
177		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
178		crypto_req_done, &wait);
 
 
 
 
179
180	sg_init_table(&dst, 1);
181	sg_set_page(&dst, dest_page, len, offs);
182	sg_init_table(&src, 1);
183	sg_set_page(&src, src_page, len, offs);
184	skcipher_request_set_crypt(req, &src, &dst, len, &iv);
185	if (rw == FS_DECRYPT)
186		res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
187	else
188		res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
 
 
 
 
 
189	skcipher_request_free(req);
190	if (res) {
191		fscrypt_err(inode, "%scryption failed for block %llu: %d",
192			    (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
 
193		return res;
194	}
195	return 0;
196}
197
 
 
 
 
 
 
 
 
 
198/**
199 * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page
200 * @page:      The locked pagecache page containing the block(s) to encrypt
201 * @len:       Total size of the block(s) to encrypt.  Must be a nonzero
202 *		multiple of the filesystem's block size.
203 * @offs:      Byte offset within @page of the first block to encrypt.  Must be
204 *		a multiple of the filesystem's block size.
205 * @gfp_flags: Memory allocation flags
206 *
207 * A new bounce page is allocated, and the specified block(s) are encrypted into
208 * it.  In the bounce page, the ciphertext block(s) will be located at the same
209 * offsets at which the plaintext block(s) were located in the source page; any
210 * other parts of the bounce page will be left uninitialized.  However, normally
211 * blocksize == PAGE_SIZE and the whole page is encrypted at once.
212 *
213 * This is for use by the filesystem's ->writepages() method.
214 *
215 * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
216 */
217struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
218					      unsigned int len,
219					      unsigned int offs,
220					      gfp_t gfp_flags)
221
222{
223	const struct inode *inode = page->mapping->host;
224	const unsigned int blockbits = inode->i_blkbits;
225	const unsigned int blocksize = 1 << blockbits;
226	struct page *ciphertext_page;
227	u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
228		       (offs >> blockbits);
229	unsigned int i;
 
 
 
 
 
 
 
 
230	int err;
231
232	if (WARN_ON_ONCE(!PageLocked(page)))
233		return ERR_PTR(-EINVAL);
234
235	if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
236		return ERR_PTR(-EINVAL);
 
 
 
 
 
237
238	ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
239	if (!ciphertext_page)
240		return ERR_PTR(-ENOMEM);
241
242	for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
243		err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
244					  page, ciphertext_page,
245					  blocksize, i, gfp_flags);
246		if (err) {
247			fscrypt_free_bounce_page(ciphertext_page);
248			return ERR_PTR(err);
249		}
 
 
 
 
 
 
 
 
 
 
250	}
251	SetPagePrivate(ciphertext_page);
252	set_page_private(ciphertext_page, (unsigned long)page);
 
 
 
 
 
253	return ciphertext_page;
254}
255EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
256
257/**
258 * fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
259 * @inode:     The inode to which this block belongs
260 * @page:      The page containing the block to encrypt
261 * @len:       Size of block to encrypt.  Doesn't need to be a multiple of the
262 *		fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
263 * @offs:      Byte offset within @page at which the block to encrypt begins
264 * @lblk_num:  Filesystem logical block number of the block, i.e. the 0-based
265 *		number of the block within the file
266 * @gfp_flags: Memory allocation flags
267 *
268 * Encrypt a possibly-compressed filesystem block that is located in an
269 * arbitrary page, not necessarily in the original pagecache page.  The @inode
270 * and @lblk_num must be specified, as they can't be determined from @page.
271 *
272 * Return: 0 on success; -errno on failure
273 */
274int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
275				  unsigned int len, unsigned int offs,
276				  u64 lblk_num, gfp_t gfp_flags)
277{
278	return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
279				   len, offs, gfp_flags);
280}
281EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
282
283/**
284 * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page
285 * @page:      The locked pagecache page containing the block(s) to decrypt
286 * @len:       Total size of the block(s) to decrypt.  Must be a nonzero
287 *		multiple of the filesystem's block size.
288 * @offs:      Byte offset within @page of the first block to decrypt.  Must be
289 *		a multiple of the filesystem's block size.
290 *
291 * The specified block(s) are decrypted in-place within the pagecache page,
292 * which must still be locked and not uptodate.  Normally, blocksize ==
293 * PAGE_SIZE and the whole page is decrypted at once.
294 *
295 * This is for use by the filesystem's ->readpages() method.
296 *
297 * Return: 0 on success; -errno on failure
298 */
299int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
300				     unsigned int offs)
301{
302	const struct inode *inode = page->mapping->host;
303	const unsigned int blockbits = inode->i_blkbits;
304	const unsigned int blocksize = 1 << blockbits;
305	u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
306		       (offs >> blockbits);
307	unsigned int i;
308	int err;
309
310	if (WARN_ON_ONCE(!PageLocked(page)))
311		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
313	if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
314		return -EINVAL;
 
 
 
 
315
316	for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
317		err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
318					  page, blocksize, i, GFP_NOFS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319		if (err)
320			return err;
 
 
321	}
322	return 0;
323}
324EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
325
326/**
327 * fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
328 * @inode:     The inode to which this block belongs
329 * @page:      The page containing the block to decrypt
330 * @len:       Size of block to decrypt.  Doesn't need to be a multiple of the
331 *		fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
332 * @offs:      Byte offset within @page at which the block to decrypt begins
333 * @lblk_num:  Filesystem logical block number of the block, i.e. the 0-based
334 *		number of the block within the file
335 *
336 * Decrypt a possibly-compressed filesystem block that is located in an
337 * arbitrary page, not necessarily in the original pagecache page.  The @inode
338 * and @lblk_num must be specified, as they can't be determined from @page.
339 *
340 * Return: 0 on success; -errno on failure
341 */
342int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
343				  unsigned int len, unsigned int offs,
344				  u64 lblk_num)
345{
346	return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
347				   len, offs, GFP_NOFS);
348}
349EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
350
351/*
352 * Validate dentries in encrypted directories to make sure we aren't potentially
353 * caching stale dentries after a key has been added.
 
354 */
355static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
356{
357	struct dentry *dir;
358	int err;
359	int valid;
360
361	/*
362	 * Plaintext names are always valid, since fscrypt doesn't support
363	 * reverting to ciphertext names without evicting the directory's inode
364	 * -- which implies eviction of the dentries in the directory.
365	 */
366	if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
367		return 1;
368
369	/*
370	 * Ciphertext name; valid if the directory's key is still unavailable.
371	 *
372	 * Although fscrypt forbids rename() on ciphertext names, we still must
373	 * use dget_parent() here rather than use ->d_parent directly.  That's
374	 * because a corrupted fs image may contain directory hard links, which
375	 * the VFS handles by moving the directory's dentry tree in the dcache
376	 * each time ->lookup() finds the directory and it already has a dentry
377	 * elsewhere.  Thus ->d_parent can be changing, and we must safely grab
378	 * a reference to some ->d_parent to prevent it from being freed.
379	 */
380
381	if (flags & LOOKUP_RCU)
382		return -ECHILD;
383
384	dir = dget_parent(dentry);
385	err = fscrypt_get_encryption_info(d_inode(dir));
386	valid = !fscrypt_has_encryption_key(d_inode(dir));
387	dput(dir);
 
388
389	if (err < 0)
390		return err;
 
 
 
 
391
392	return valid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393}
394
395const struct dentry_operations fscrypt_d_ops = {
396	.d_revalidate = fscrypt_d_revalidate,
397};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398
399static void fscrypt_destroy(void)
400{
401	struct fscrypt_ctx *pos, *n;
402
403	list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
404		kmem_cache_free(fscrypt_ctx_cachep, pos);
405	INIT_LIST_HEAD(&fscrypt_free_ctxs);
406	mempool_destroy(fscrypt_bounce_page_pool);
407	fscrypt_bounce_page_pool = NULL;
408}
409
410/**
411 * fscrypt_initialize() - allocate major buffers for fs encryption.
412 * @cop_flags:  fscrypt operations flags
413 *
414 * We only call this when we start accessing encrypted files, since it
415 * results in memory getting allocated that wouldn't otherwise be used.
416 *
417 * Return: Zero on success, non-zero otherwise.
418 */
419int fscrypt_initialize(unsigned int cop_flags)
420{
421	int i, res = -ENOMEM;
422
423	/* No need to allocate a bounce page pool if this FS won't use it. */
424	if (cop_flags & FS_CFLG_OWN_PAGES)
 
 
 
425		return 0;
426
427	mutex_lock(&fscrypt_init_mutex);
428	if (fscrypt_bounce_page_pool)
429		goto already_initialized;
430
431	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
432		struct fscrypt_ctx *ctx;
433
434		ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
435		if (!ctx)
436			goto fail;
437		list_add(&ctx->free_list, &fscrypt_free_ctxs);
438	}
439
440	fscrypt_bounce_page_pool =
441		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
442	if (!fscrypt_bounce_page_pool)
443		goto fail;
444
445already_initialized:
446	mutex_unlock(&fscrypt_init_mutex);
447	return 0;
448fail:
449	fscrypt_destroy();
450	mutex_unlock(&fscrypt_init_mutex);
451	return res;
452}
453
454void fscrypt_msg(const struct inode *inode, const char *level,
455		 const char *fmt, ...)
456{
457	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
458				      DEFAULT_RATELIMIT_BURST);
459	struct va_format vaf;
460	va_list args;
461
462	if (!__ratelimit(&rs))
463		return;
464
465	va_start(args, fmt);
466	vaf.fmt = fmt;
467	vaf.va = &args;
468	if (inode)
469		printk("%sfscrypt (%s, inode %lu): %pV\n",
470		       level, inode->i_sb->s_id, inode->i_ino, &vaf);
471	else
472		printk("%sfscrypt: %pV\n", level, &vaf);
473	va_end(args);
474}
475
476/**
477 * fscrypt_init() - Set up for fs encryption.
478 */
479static int __init fscrypt_init(void)
480{
481	int err = -ENOMEM;
482
483	/*
484	 * Use an unbound workqueue to allow bios to be decrypted in parallel
485	 * even when they happen to complete on the same CPU.  This sacrifices
486	 * locality, but it's worthwhile since decryption is CPU-intensive.
487	 *
488	 * Also use a high-priority workqueue to prioritize decryption work,
489	 * which blocks reads from completing, over regular application tasks.
490	 */
491	fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
492						 WQ_UNBOUND | WQ_HIGHPRI,
493						 num_online_cpus());
494	if (!fscrypt_read_workqueue)
495		goto fail;
496
497	fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
498	if (!fscrypt_ctx_cachep)
499		goto fail_free_queue;
500
501	fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
502	if (!fscrypt_info_cachep)
503		goto fail_free_ctx;
504
505	err = fscrypt_init_keyring();
506	if (err)
507		goto fail_free_info;
508
509	return 0;
510
511fail_free_info:
512	kmem_cache_destroy(fscrypt_info_cachep);
513fail_free_ctx:
514	kmem_cache_destroy(fscrypt_ctx_cachep);
515fail_free_queue:
516	destroy_workqueue(fscrypt_read_workqueue);
517fail:
518	return err;
519}
520late_initcall(fscrypt_init)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.10.11
 
  1/*
  2 * This contains encryption functions for per-file encryption.
  3 *
  4 * Copyright (C) 2015, Google, Inc.
  5 * Copyright (C) 2015, Motorola Mobility
  6 *
  7 * Written by Michael Halcrow, 2014.
  8 *
  9 * Filename encryption additions
 10 *	Uday Savagaonkar, 2014
 11 * Encryption policy handling additions
 12 *	Ildar Muslukhov, 2014
 13 * Add fscrypt_pullback_bio_page()
 14 *	Jaegeuk Kim, 2015.
 15 *
 16 * This has not yet undergone a rigorous security audit.
 17 *
 18 * The usage of AES-XTS should conform to recommendations in NIST
 19 * Special Publication 800-38E and IEEE P1619/D16.
 20 */
 21
 22#include <linux/pagemap.h>
 23#include <linux/mempool.h>
 24#include <linux/module.h>
 25#include <linux/scatterlist.h>
 26#include <linux/ratelimit.h>
 27#include <linux/bio.h>
 28#include <linux/dcache.h>
 29#include <linux/namei.h>
 
 
 30#include "fscrypt_private.h"
 31
 32static unsigned int num_prealloc_crypto_pages = 32;
 33static unsigned int num_prealloc_crypto_ctxs = 128;
 34
 35module_param(num_prealloc_crypto_pages, uint, 0444);
 36MODULE_PARM_DESC(num_prealloc_crypto_pages,
 37		"Number of crypto pages to preallocate");
 38module_param(num_prealloc_crypto_ctxs, uint, 0444);
 39MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
 40		"Number of crypto contexts to preallocate");
 41
 42static mempool_t *fscrypt_bounce_page_pool = NULL;
 43
 44static LIST_HEAD(fscrypt_free_ctxs);
 45static DEFINE_SPINLOCK(fscrypt_ctx_lock);
 46
 47static struct workqueue_struct *fscrypt_read_workqueue;
 48static DEFINE_MUTEX(fscrypt_init_mutex);
 49
 50static struct kmem_cache *fscrypt_ctx_cachep;
 51struct kmem_cache *fscrypt_info_cachep;
 52
 
 
 
 
 
 
 53/**
 54 * fscrypt_release_ctx() - Releases an encryption context
 55 * @ctx: The encryption context to release.
 56 *
 57 * If the encryption context was allocated from the pre-allocated pool, returns
 58 * it to that pool. Else, frees it.
 59 *
 60 * If there's a bounce page in the context, this frees that.
 
 61 */
 62void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
 63{
 64	unsigned long flags;
 65
 66	if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
 67		mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
 68		ctx->w.bounce_page = NULL;
 69	}
 70	ctx->w.control_page = NULL;
 71	if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
 72		kmem_cache_free(fscrypt_ctx_cachep, ctx);
 73	} else {
 74		spin_lock_irqsave(&fscrypt_ctx_lock, flags);
 75		list_add(&ctx->free_list, &fscrypt_free_ctxs);
 76		spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
 77	}
 78}
 79EXPORT_SYMBOL(fscrypt_release_ctx);
 80
 81/**
 82 * fscrypt_get_ctx() - Gets an encryption context
 83 * @inode:       The inode for which we are doing the crypto
 84 * @gfp_flags:   The gfp flag for memory allocation
 85 *
 86 * Allocates and initializes an encryption context.
 87 *
 88 * Return: An allocated and initialized encryption context on success; error
 89 * value or NULL otherwise.
 90 */
 91struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
 92{
 93	struct fscrypt_ctx *ctx = NULL;
 94	struct fscrypt_info *ci = inode->i_crypt_info;
 95	unsigned long flags;
 96
 97	if (ci == NULL)
 98		return ERR_PTR(-ENOKEY);
 99
100	/*
101	 * We first try getting the ctx from a free list because in
102	 * the common case the ctx will have an allocated and
103	 * initialized crypto tfm, so it's probably a worthwhile
104	 * optimization. For the bounce page, we first try getting it
105	 * from the kernel allocator because that's just about as fast
106	 * as getting it from a list and because a cache of free pages
107	 * should generally be a "last resort" option for a filesystem
108	 * to be able to do its job.
109	 */
110	spin_lock_irqsave(&fscrypt_ctx_lock, flags);
111	ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
112					struct fscrypt_ctx, free_list);
113	if (ctx)
114		list_del(&ctx->free_list);
115	spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
116	if (!ctx) {
117		ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
118		if (!ctx)
119			return ERR_PTR(-ENOMEM);
120		ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
121	} else {
122		ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
123	}
124	ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
125	return ctx;
126}
127EXPORT_SYMBOL(fscrypt_get_ctx);
128
 
 
 
 
 
129/**
130 * page_crypt_complete() - completion callback for page crypto
131 * @req: The asynchronous cipher request context
132 * @res: The result of the cipher operation
 
133 */
134static void page_crypt_complete(struct crypto_async_request *req, int res)
 
 
 
 
 
 
 
 
 
 
 
135{
136	struct fscrypt_completion_result *ecr = req->data;
 
137
138	if (res == -EINPROGRESS)
139		return;
140	ecr->res = res;
141	complete(&ecr->completion);
 
142}
143
144typedef enum {
145	FS_DECRYPT = 0,
146	FS_ENCRYPT,
147} fscrypt_direction_t;
148
149static int do_page_crypto(const struct inode *inode,
150			fscrypt_direction_t rw, u64 lblk_num,
151			struct page *src_page, struct page *dest_page,
152			unsigned int len, unsigned int offs,
153			gfp_t gfp_flags)
154{
155	struct {
156		__le64 index;
157		u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
158	} xts_tweak;
159	struct skcipher_request *req = NULL;
160	DECLARE_FS_COMPLETION_RESULT(ecr);
161	struct scatterlist dst, src;
162	struct fscrypt_info *ci = inode->i_crypt_info;
163	struct crypto_skcipher *tfm = ci->ci_ctfm;
164	int res = 0;
165
166	BUG_ON(len == 0);
 
 
 
 
 
167
168	req = skcipher_request_alloc(tfm, gfp_flags);
169	if (!req) {
170		printk_ratelimited(KERN_ERR
171				"%s: crypto_request_alloc() failed\n",
172				__func__);
173		return -ENOMEM;
174	}
175
176	skcipher_request_set_callback(
177		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
178		page_crypt_complete, &ecr);
179
180	BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
181	xts_tweak.index = cpu_to_le64(lblk_num);
182	memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
183
184	sg_init_table(&dst, 1);
185	sg_set_page(&dst, dest_page, len, offs);
186	sg_init_table(&src, 1);
187	sg_set_page(&src, src_page, len, offs);
188	skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
189	if (rw == FS_DECRYPT)
190		res = crypto_skcipher_decrypt(req);
191	else
192		res = crypto_skcipher_encrypt(req);
193	if (res == -EINPROGRESS || res == -EBUSY) {
194		BUG_ON(req->base.data != &ecr);
195		wait_for_completion(&ecr.completion);
196		res = ecr.res;
197	}
198	skcipher_request_free(req);
199	if (res) {
200		printk_ratelimited(KERN_ERR
201			"%s: crypto_skcipher_encrypt() returned %d\n",
202			__func__, res);
203		return res;
204	}
205	return 0;
206}
207
208static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
209{
210	ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
211	if (ctx->w.bounce_page == NULL)
212		return ERR_PTR(-ENOMEM);
213	ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
214	return ctx->w.bounce_page;
215}
216
217/**
218 * fscypt_encrypt_page() - Encrypts a page
219 * @inode:     The inode for which the encryption should take place
220 * @page:      The page to encrypt. Must be locked for bounce-page
221 *             encryption.
222 * @len:       Length of data to encrypt in @page and encrypted
223 *             data in returned page.
224 * @offs:      Offset of data within @page and returned
225 *             page holding encrypted data.
226 * @lblk_num:  Logical block number. This must be unique for multiple
227 *             calls with same inode, except when overwriting
228 *             previously written data.
229 * @gfp_flags: The gfp flag for memory allocation
230 *
231 * Encrypts @page using the ctx encryption context. Performs encryption
232 * either in-place or into a newly allocated bounce page.
233 * Called on the page write path.
234 *
235 * Bounce page allocation is the default.
236 * In this case, the contents of @page are encrypted and stored in an
237 * allocated bounce page. @page has to be locked and the caller must call
238 * fscrypt_restore_control_page() on the returned ciphertext page to
239 * release the bounce buffer and the encryption context.
240 *
241 * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
242 * fscrypt_operations. Here, the input-page is returned with its content
243 * encrypted.
244 *
245 * Return: A page with the encrypted content on success. Else, an
246 * error value or NULL.
247 */
248struct page *fscrypt_encrypt_page(const struct inode *inode,
249				struct page *page,
250				unsigned int len,
251				unsigned int offs,
252				u64 lblk_num, gfp_t gfp_flags)
253
254{
255	struct fscrypt_ctx *ctx;
256	struct page *ciphertext_page = page;
257	int err;
258
259	BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
 
260
261	if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
262		/* with inplace-encryption we just encrypt the page */
263		err = do_page_crypto(inode, FS_ENCRYPT, lblk_num,
264					page, ciphertext_page,
265					len, offs, gfp_flags);
266		if (err)
267			return ERR_PTR(err);
268
269		return ciphertext_page;
270	}
 
271
272	BUG_ON(!PageLocked(page));
273
274	ctx = fscrypt_get_ctx(inode, gfp_flags);
275	if (IS_ERR(ctx))
276		return (struct page *)ctx;
277
278	/* The encryption operation will require a bounce page. */
279	ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
280	if (IS_ERR(ciphertext_page))
281		goto errout;
282
283	ctx->w.control_page = page;
284	err = do_page_crypto(inode, FS_ENCRYPT, lblk_num,
285					page, ciphertext_page,
286					len, offs, gfp_flags);
287	if (err) {
288		ciphertext_page = ERR_PTR(err);
289		goto errout;
290	}
291	SetPagePrivate(ciphertext_page);
292	set_page_private(ciphertext_page, (unsigned long)ctx);
293	lock_page(ciphertext_page);
294	return ciphertext_page;
295
296errout:
297	fscrypt_release_ctx(ctx);
298	return ciphertext_page;
299}
300EXPORT_SYMBOL(fscrypt_encrypt_page);
301
302/**
303 * fscrypt_decrypt_page() - Decrypts a page in-place
304 * @inode:     The corresponding inode for the page to decrypt.
305 * @page:      The page to decrypt. Must be locked in case
306 *             it is a writeback page (FS_CFLG_OWN_PAGES unset).
307 * @len:       Number of bytes in @page to be decrypted.
308 * @offs:      Start of data in @page.
309 * @lblk_num:  Logical block number.
310 *
311 * Decrypts page in-place using the ctx encryption context.
312 *
313 * Called from the read completion callback.
314 *
315 * Return: Zero on success, non-zero otherwise.
316 */
317int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
318			unsigned int len, unsigned int offs, u64 lblk_num)
 
 
 
319{
320	if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
321		BUG_ON(!PageLocked(page));
 
 
322
323	return do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, len,
324			offs, GFP_NOFS);
325}
326EXPORT_SYMBOL(fscrypt_decrypt_page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327
328int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
329				sector_t pblk, unsigned int len)
330{
331	struct fscrypt_ctx *ctx;
332	struct page *ciphertext_page = NULL;
333	struct bio *bio;
334	int ret, err = 0;
335
336	BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
337
338	ctx = fscrypt_get_ctx(inode, GFP_NOFS);
339	if (IS_ERR(ctx))
340		return PTR_ERR(ctx);
341
342	ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
343	if (IS_ERR(ciphertext_page)) {
344		err = PTR_ERR(ciphertext_page);
345		goto errout;
346	}
347
348	while (len--) {
349		err = do_page_crypto(inode, FS_ENCRYPT, lblk,
350					ZERO_PAGE(0), ciphertext_page,
351					PAGE_SIZE, 0, GFP_NOFS);
352		if (err)
353			goto errout;
354
355		bio = bio_alloc(GFP_NOWAIT, 1);
356		if (!bio) {
357			err = -ENOMEM;
358			goto errout;
359		}
360		bio->bi_bdev = inode->i_sb->s_bdev;
361		bio->bi_iter.bi_sector =
362			pblk << (inode->i_sb->s_blocksize_bits - 9);
363		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
364		ret = bio_add_page(bio, ciphertext_page,
365					inode->i_sb->s_blocksize, 0);
366		if (ret != inode->i_sb->s_blocksize) {
367			/* should never happen! */
368			WARN_ON(1);
369			bio_put(bio);
370			err = -EIO;
371			goto errout;
372		}
373		err = submit_bio_wait(bio);
374		if ((err == 0) && bio->bi_error)
375			err = -EIO;
376		bio_put(bio);
377		if (err)
378			goto errout;
379		lblk++;
380		pblk++;
381	}
382	err = 0;
383errout:
384	fscrypt_release_ctx(ctx);
385	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386}
387EXPORT_SYMBOL(fscrypt_zeroout_range);
388
389/*
390 * Validate dentries for encrypted directories to make sure we aren't
391 * potentially caching stale data after a key has been added or
392 * removed.
393 */
394static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
395{
396	struct dentry *dir;
397	int dir_has_key, cached_with_key;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398
399	if (flags & LOOKUP_RCU)
400		return -ECHILD;
401
402	dir = dget_parent(dentry);
403	if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
404		dput(dir);
405		return 0;
406	}
407
408	/* this should eventually be an flag in d_flags */
409	spin_lock(&dentry->d_lock);
410	cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
411	spin_unlock(&dentry->d_lock);
412	dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
413	dput(dir);
414
415	/*
416	 * If the dentry was cached without the key, and it is a
417	 * negative dentry, it might be a valid name.  We can't check
418	 * if the key has since been made available due to locking
419	 * reasons, so we fail the validation so ext4_lookup() can do
420	 * this check.
421	 *
422	 * We also fail the validation if the dentry was created with
423	 * the key present, but we no longer have the key, or vice versa.
424	 */
425	if ((!cached_with_key && d_is_negative(dentry)) ||
426			(!cached_with_key && dir_has_key) ||
427			(cached_with_key && !dir_has_key))
428		return 0;
429	return 1;
430}
431
432const struct dentry_operations fscrypt_d_ops = {
433	.d_revalidate = fscrypt_d_revalidate,
434};
435EXPORT_SYMBOL(fscrypt_d_ops);
436
437/*
438 * Call fscrypt_decrypt_page on every single page, reusing the encryption
439 * context.
440 */
441static void completion_pages(struct work_struct *work)
442{
443	struct fscrypt_ctx *ctx =
444		container_of(work, struct fscrypt_ctx, r.work);
445	struct bio *bio = ctx->r.bio;
446	struct bio_vec *bv;
447	int i;
448
449	bio_for_each_segment_all(bv, bio, i) {
450		struct page *page = bv->bv_page;
451		int ret = fscrypt_decrypt_page(page->mapping->host, page,
452				PAGE_SIZE, 0, page->index);
453
454		if (ret) {
455			WARN_ON_ONCE(1);
456			SetPageError(page);
457		} else {
458			SetPageUptodate(page);
459		}
460		unlock_page(page);
461	}
462	fscrypt_release_ctx(ctx);
463	bio_put(bio);
464}
465
466void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
467{
468	INIT_WORK(&ctx->r.work, completion_pages);
469	ctx->r.bio = bio;
470	queue_work(fscrypt_read_workqueue, &ctx->r.work);
471}
472EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
473
474void fscrypt_pullback_bio_page(struct page **page, bool restore)
475{
476	struct fscrypt_ctx *ctx;
477	struct page *bounce_page;
478
479	/* The bounce data pages are unmapped. */
480	if ((*page)->mapping)
481		return;
482
483	/* The bounce data page is unmapped. */
484	bounce_page = *page;
485	ctx = (struct fscrypt_ctx *)page_private(bounce_page);
486
487	/* restore control page */
488	*page = ctx->w.control_page;
489
490	if (restore)
491		fscrypt_restore_control_page(bounce_page);
492}
493EXPORT_SYMBOL(fscrypt_pullback_bio_page);
494
495void fscrypt_restore_control_page(struct page *page)
496{
497	struct fscrypt_ctx *ctx;
498
499	ctx = (struct fscrypt_ctx *)page_private(page);
500	set_page_private(page, (unsigned long)NULL);
501	ClearPagePrivate(page);
502	unlock_page(page);
503	fscrypt_release_ctx(ctx);
504}
505EXPORT_SYMBOL(fscrypt_restore_control_page);
506
507static void fscrypt_destroy(void)
508{
509	struct fscrypt_ctx *pos, *n;
510
511	list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
512		kmem_cache_free(fscrypt_ctx_cachep, pos);
513	INIT_LIST_HEAD(&fscrypt_free_ctxs);
514	mempool_destroy(fscrypt_bounce_page_pool);
515	fscrypt_bounce_page_pool = NULL;
516}
517
518/**
519 * fscrypt_initialize() - allocate major buffers for fs encryption.
520 * @cop_flags:  fscrypt operations flags
521 *
522 * We only call this when we start accessing encrypted files, since it
523 * results in memory getting allocated that wouldn't otherwise be used.
524 *
525 * Return: Zero on success, non-zero otherwise.
526 */
527int fscrypt_initialize(unsigned int cop_flags)
528{
529	int i, res = -ENOMEM;
530
531	/*
532	 * No need to allocate a bounce page pool if there already is one or
533	 * this FS won't use it.
534	 */
535	if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool)
536		return 0;
537
538	mutex_lock(&fscrypt_init_mutex);
539	if (fscrypt_bounce_page_pool)
540		goto already_initialized;
541
542	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
543		struct fscrypt_ctx *ctx;
544
545		ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
546		if (!ctx)
547			goto fail;
548		list_add(&ctx->free_list, &fscrypt_free_ctxs);
549	}
550
551	fscrypt_bounce_page_pool =
552		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
553	if (!fscrypt_bounce_page_pool)
554		goto fail;
555
556already_initialized:
557	mutex_unlock(&fscrypt_init_mutex);
558	return 0;
559fail:
560	fscrypt_destroy();
561	mutex_unlock(&fscrypt_init_mutex);
562	return res;
563}
564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
565/**
566 * fscrypt_init() - Set up for fs encryption.
567 */
568static int __init fscrypt_init(void)
569{
 
 
 
 
 
 
 
 
 
 
570	fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
571							WQ_HIGHPRI, 0);
 
572	if (!fscrypt_read_workqueue)
573		goto fail;
574
575	fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
576	if (!fscrypt_ctx_cachep)
577		goto fail_free_queue;
578
579	fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
580	if (!fscrypt_info_cachep)
581		goto fail_free_ctx;
582
 
 
 
 
583	return 0;
584
 
 
585fail_free_ctx:
586	kmem_cache_destroy(fscrypt_ctx_cachep);
587fail_free_queue:
588	destroy_workqueue(fscrypt_read_workqueue);
589fail:
590	return -ENOMEM;
591}
592module_init(fscrypt_init)
593
594/**
595 * fscrypt_exit() - Shutdown the fs encryption system
596 */
597static void __exit fscrypt_exit(void)
598{
599	fscrypt_destroy();
600
601	if (fscrypt_read_workqueue)
602		destroy_workqueue(fscrypt_read_workqueue);
603	kmem_cache_destroy(fscrypt_ctx_cachep);
604	kmem_cache_destroy(fscrypt_info_cachep);
605}
606module_exit(fscrypt_exit);
607
608MODULE_LICENSE("GPL");