Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  2
  3#include <linux/quotaops.h>
  4#include <linux/uuid.h>
  5
  6#include "ext4.h"
  7#include "xattr.h"
  8#include "ext4_jbd2.h"
  9
 10static void ext4_fname_from_fscrypt_name(struct ext4_filename *dst,
 11					 const struct fscrypt_name *src)
 12{
 13	memset(dst, 0, sizeof(*dst));
 14
 15	dst->usr_fname = src->usr_fname;
 16	dst->disk_name = src->disk_name;
 17	dst->hinfo.hash = src->hash;
 18	dst->hinfo.minor_hash = src->minor_hash;
 19	dst->crypto_buf = src->crypto_buf;
 20}
 21
 22int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
 23			      int lookup, struct ext4_filename *fname)
 24{
 25	struct fscrypt_name name;
 26	int err;
 27
 28	err = fscrypt_setup_filename(dir, iname, lookup, &name);
 29	if (err)
 30		return err;
 31
 32	ext4_fname_from_fscrypt_name(fname, &name);
 33
 34#if IS_ENABLED(CONFIG_UNICODE)
 35	err = ext4_fname_setup_ci_filename(dir, iname, fname);
 36#endif
 37	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38}
 39
 40int ext4_fname_prepare_lookup(struct inode *dir, struct dentry *dentry,
 41			      struct ext4_filename *fname)
 42{
 43	struct fscrypt_name name;
 44	int err;
 
 
 
 
 
 
 
 
 
 
 
 45
 46	err = fscrypt_prepare_lookup(dir, dentry, &name);
 47	if (err)
 48		return err;
 49
 50	ext4_fname_from_fscrypt_name(fname, &name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51
 52#if IS_ENABLED(CONFIG_UNICODE)
 53	err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname);
 54#endif
 55	return err;
 
 
 
 56}
 57
 58void ext4_fname_free_filename(struct ext4_filename *fname)
 59{
 60	struct fscrypt_name name;
 61
 62	name.crypto_buf = fname->crypto_buf;
 63	fscrypt_free_filename(&name);
 64
 65	fname->crypto_buf.name = NULL;
 66	fname->usr_fname = NULL;
 67	fname->disk_name.name = NULL;
 68
 69#if IS_ENABLED(CONFIG_UNICODE)
 70	kfree(fname->cf_name.name);
 71	fname->cf_name.name = NULL;
 72#endif
 73}
 74
 75static bool uuid_is_zero(__u8 u[16])
 76{
 77	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 78
 79	for (i = 0; i < 16; i++)
 80		if (u[i])
 81			return false;
 82	return true;
 
 
 
 
 
 
 
 
 
 83}
 84
 85int ext4_ioctl_get_encryption_pwsalt(struct file *filp, void __user *arg)
 86{
 87	struct super_block *sb = file_inode(filp)->i_sb;
 88	struct ext4_sb_info *sbi = EXT4_SB(sb);
 89	int err, err2;
 90	handle_t *handle;
 91
 92	if (!ext4_has_feature_encrypt(sb))
 93		return -EOPNOTSUPP;
 94
 95	if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) {
 96		err = mnt_want_write_file(filp);
 97		if (err)
 98			return err;
 99		handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
100		if (IS_ERR(handle)) {
101			err = PTR_ERR(handle);
102			goto pwsalt_err_exit;
103		}
104		err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh,
105						    EXT4_JTR_NONE);
106		if (err)
107			goto pwsalt_err_journal;
108		lock_buffer(sbi->s_sbh);
109		generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
110		ext4_superblock_csum_set(sb);
111		unlock_buffer(sbi->s_sbh);
112		err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
113pwsalt_err_journal:
114		err2 = ext4_journal_stop(handle);
115		if (err2 && !err)
116			err = err2;
117pwsalt_err_exit:
118		mnt_drop_write_file(filp);
119		if (err)
120			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121	}
122
123	if (copy_to_user(arg, sbi->s_es->s_encrypt_pw_salt, 16))
124		return -EFAULT;
125	return 0;
126}
127
128static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
 
129{
130	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
131				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
132}
133
134static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
135							void *fs_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136{
137	handle_t *handle = fs_data;
138	int res, res2, credits, retries = 0;
 
139
140	/*
141	 * Encrypting the root directory is not allowed because e2fsck expects
142	 * lost+found to exist and be unencrypted, and encrypting the root
143	 * directory would imply encrypting the lost+found directory as well as
144	 * the filename "lost+found" itself.
145	 */
146	if (inode->i_ino == EXT4_ROOT_INO)
147		return -EPERM;
148
149	if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
150		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
152	if (ext4_test_inode_flag(inode, EXT4_INODE_DAX))
153		return -EOPNOTSUPP;
154
155	res = ext4_convert_inline_data(inode);
156	if (res)
157		return res;
 
 
 
 
 
 
158
159	/*
160	 * If a journal handle was specified, then the encryption context is
161	 * being set on a new inode via inheritance and is part of a larger
162	 * transaction to create the inode.  Otherwise the encryption context is
163	 * being set on an existing inode in its own transaction.  Only in the
164	 * latter case should the "retry on ENOSPC" logic be used.
165	 */
166
167	if (handle) {
168		res = ext4_xattr_set_handle(handle, inode,
169					    EXT4_XATTR_INDEX_ENCRYPTION,
170					    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
171					    ctx, len, 0);
172		if (!res) {
173			ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
174			ext4_clear_inode_state(inode,
175					EXT4_STATE_MAY_INLINE_DATA);
176			/*
177			 * Update inode->i_flags - S_ENCRYPTED will be enabled,
178			 * S_DAX may be disabled
179			 */
180			ext4_set_inode_flags(inode, false);
181		}
182		return res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183	}
184
185	res = dquot_initialize(inode);
186	if (res)
187		return res;
188retry:
189	res = ext4_xattr_set_credits(inode, len, false /* is_create */,
190				     &credits);
191	if (res)
192		return res;
193
194	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
195	if (IS_ERR(handle))
196		return PTR_ERR(handle);
197
198	res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
199				    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
200				    ctx, len, 0);
201	if (!res) {
202		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
203		/*
204		 * Update inode->i_flags - S_ENCRYPTED will be enabled,
205		 * S_DAX may be disabled
206		 */
207		ext4_set_inode_flags(inode, false);
208		res = ext4_mark_inode_dirty(handle, inode);
209		if (res)
210			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
211	}
212	res2 = ext4_journal_stop(handle);
213
214	if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
215		goto retry;
216	if (!res)
217		res = res2;
218	return res;
219}
220
221static const union fscrypt_policy *ext4_get_dummy_policy(struct super_block *sb)
222{
223	return EXT4_SB(sb)->s_dummy_enc_policy.policy;
224}
225
226static bool ext4_has_stable_inodes(struct super_block *sb)
 
 
 
 
 
 
 
227{
228	return ext4_has_feature_stable_inodes(sb);
 
 
229}
230
231static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
232				       int *ino_bits_ret, int *lblk_bits_ret)
233{
234	*ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
235	*lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236}
237
238const struct fscrypt_operations ext4_cryptops = {
239	.key_prefix		= "ext4:",
240	.get_context		= ext4_get_context,
241	.set_context		= ext4_set_context,
242	.get_dummy_policy	= ext4_get_dummy_policy,
243	.empty_dir		= ext4_empty_dir,
244	.has_stable_inodes	= ext4_has_stable_inodes,
245	.get_ino_and_lblk_bits	= ext4_get_ino_and_lblk_bits,
246};
v4.6
  1/*
  2 * linux/fs/ext4/crypto.c
  3 *
  4 * Copyright (C) 2015, Google, Inc.
  5 *
  6 * This contains encryption functions for ext4
  7 *
  8 * Written by Michael Halcrow, 2014.
  9 *
 10 * Filename encryption additions
 11 *	Uday Savagaonkar, 2014
 12 * Encryption policy handling additions
 13 *	Ildar Muslukhov, 2014
 14 *
 15 * This has not yet undergone a rigorous security audit.
 16 *
 17 * The usage of AES-XTS should conform to recommendations in NIST
 18 * Special Publication 800-38E and IEEE P1619/D16.
 19 */
 20
 21#include <crypto/skcipher.h>
 22#include <keys/user-type.h>
 23#include <keys/encrypted-type.h>
 24#include <linux/ecryptfs.h>
 25#include <linux/gfp.h>
 26#include <linux/kernel.h>
 27#include <linux/key.h>
 28#include <linux/list.h>
 29#include <linux/mempool.h>
 30#include <linux/module.h>
 31#include <linux/mutex.h>
 32#include <linux/random.h>
 33#include <linux/scatterlist.h>
 34#include <linux/spinlock_types.h>
 35#include <linux/namei.h>
 36
 37#include "ext4_extents.h"
 
 
 
 38#include "xattr.h"
 
 39
 40/* Encryption added and removed here! (L: */
 
 
 
 41
 42static unsigned int num_prealloc_crypto_pages = 32;
 43static unsigned int num_prealloc_crypto_ctxs = 128;
 
 
 
 
 44
 45module_param(num_prealloc_crypto_pages, uint, 0444);
 46MODULE_PARM_DESC(num_prealloc_crypto_pages,
 47		 "Number of crypto pages to preallocate");
 48module_param(num_prealloc_crypto_ctxs, uint, 0444);
 49MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
 50		 "Number of crypto contexts to preallocate");
 51
 52static mempool_t *ext4_bounce_page_pool;
 53
 54static LIST_HEAD(ext4_free_crypto_ctxs);
 55static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
 56
 57static struct kmem_cache *ext4_crypto_ctx_cachep;
 58struct kmem_cache *ext4_crypt_info_cachep;
 59
 60/**
 61 * ext4_release_crypto_ctx() - Releases an encryption context
 62 * @ctx: The encryption context to release.
 63 *
 64 * If the encryption context was allocated from the pre-allocated pool, returns
 65 * it to that pool. Else, frees it.
 66 *
 67 * If there's a bounce page in the context, this frees that.
 68 */
 69void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
 70{
 71	unsigned long flags;
 72
 73	if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page)
 74		mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
 75	ctx->w.bounce_page = NULL;
 76	ctx->w.control_page = NULL;
 77	if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
 78		kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
 79	} else {
 80		spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
 81		list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
 82		spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
 83	}
 84}
 85
 86/**
 87 * ext4_get_crypto_ctx() - Gets an encryption context
 88 * @inode:       The inode for which we are doing the crypto
 89 *
 90 * Allocates and initializes an encryption context.
 91 *
 92 * Return: An allocated and initialized encryption context on success; error
 93 * value or NULL otherwise.
 94 */
 95struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
 96					    gfp_t gfp_flags)
 97{
 98	struct ext4_crypto_ctx *ctx = NULL;
 99	int res = 0;
100	unsigned long flags;
101	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
102
103	if (ci == NULL)
104		return ERR_PTR(-ENOKEY);
 
105
106	/*
107	 * We first try getting the ctx from a free list because in
108	 * the common case the ctx will have an allocated and
109	 * initialized crypto tfm, so it's probably a worthwhile
110	 * optimization. For the bounce page, we first try getting it
111	 * from the kernel allocator because that's just about as fast
112	 * as getting it from a list and because a cache of free pages
113	 * should generally be a "last resort" option for a filesystem
114	 * to be able to do its job.
115	 */
116	spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
117	ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
118				       struct ext4_crypto_ctx, free_list);
119	if (ctx)
120		list_del(&ctx->free_list);
121	spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
122	if (!ctx) {
123		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
124		if (!ctx) {
125			res = -ENOMEM;
126			goto out;
127		}
128		ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
129	} else {
130		ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
131	}
132	ctx->flags &= ~EXT4_WRITE_PATH_FL;
133
134out:
135	if (res) {
136		if (!IS_ERR_OR_NULL(ctx))
137			ext4_release_crypto_ctx(ctx);
138		ctx = ERR_PTR(res);
139	}
140	return ctx;
141}
142
143struct workqueue_struct *ext4_read_workqueue;
144static DEFINE_MUTEX(crypto_init);
 
 
 
 
 
 
 
 
145
146/**
147 * ext4_exit_crypto() - Shutdown the ext4 encryption system
148 */
149void ext4_exit_crypto(void)
150{
151	struct ext4_crypto_ctx *pos, *n;
152
153	list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list)
154		kmem_cache_free(ext4_crypto_ctx_cachep, pos);
155	INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
156	if (ext4_bounce_page_pool)
157		mempool_destroy(ext4_bounce_page_pool);
158	ext4_bounce_page_pool = NULL;
159	if (ext4_read_workqueue)
160		destroy_workqueue(ext4_read_workqueue);
161	ext4_read_workqueue = NULL;
162	if (ext4_crypto_ctx_cachep)
163		kmem_cache_destroy(ext4_crypto_ctx_cachep);
164	ext4_crypto_ctx_cachep = NULL;
165	if (ext4_crypt_info_cachep)
166		kmem_cache_destroy(ext4_crypt_info_cachep);
167	ext4_crypt_info_cachep = NULL;
168}
169
170/**
171 * ext4_init_crypto() - Set up for ext4 encryption.
172 *
173 * We only call this when we start accessing encrypted files, since it
174 * results in memory getting allocated that wouldn't otherwise be used.
175 *
176 * Return: Zero on success, non-zero otherwise.
177 */
178int ext4_init_crypto(void)
179{
180	int i, res = -ENOMEM;
181
182	mutex_lock(&crypto_init);
183	if (ext4_read_workqueue)
184		goto already_initialized;
185	ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
186	if (!ext4_read_workqueue)
187		goto fail;
188
189	ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
190					    SLAB_RECLAIM_ACCOUNT);
191	if (!ext4_crypto_ctx_cachep)
192		goto fail;
193
194	ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
195					    SLAB_RECLAIM_ACCOUNT);
196	if (!ext4_crypt_info_cachep)
197		goto fail;
198
199	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
200		struct ext4_crypto_ctx *ctx;
201
202		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
203		if (!ctx) {
204			res = -ENOMEM;
205			goto fail;
206		}
207		list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
208	}
209
210	ext4_bounce_page_pool =
211		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
212	if (!ext4_bounce_page_pool) {
213		res = -ENOMEM;
214		goto fail;
215	}
216already_initialized:
217	mutex_unlock(&crypto_init);
218	return 0;
219fail:
220	ext4_exit_crypto();
221	mutex_unlock(&crypto_init);
222	return res;
223}
224
225void ext4_restore_control_page(struct page *data_page)
226{
227	struct ext4_crypto_ctx *ctx =
228		(struct ext4_crypto_ctx *)page_private(data_page);
 
 
 
 
 
229
230	set_page_private(data_page, (unsigned long)NULL);
231	ClearPagePrivate(data_page);
232	unlock_page(data_page);
233	ext4_release_crypto_ctx(ctx);
234}
235
236/**
237 * ext4_crypt_complete() - The completion callback for page encryption
238 * @req: The asynchronous encryption request context
239 * @res: The result of the encryption operation
240 */
241static void ext4_crypt_complete(struct crypto_async_request *req, int res)
242{
243	struct ext4_completion_result *ecr = req->data;
244
245	if (res == -EINPROGRESS)
246		return;
247	ecr->res = res;
248	complete(&ecr->completion);
249}
250
251typedef enum {
252	EXT4_DECRYPT = 0,
253	EXT4_ENCRYPT,
254} ext4_direction_t;
255
256static int ext4_page_crypto(struct inode *inode,
257			    ext4_direction_t rw,
258			    pgoff_t index,
259			    struct page *src_page,
260			    struct page *dest_page,
261			    gfp_t gfp_flags)
262
263{
264	u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
265	struct skcipher_request *req = NULL;
266	DECLARE_EXT4_COMPLETION_RESULT(ecr);
267	struct scatterlist dst, src;
268	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
269	struct crypto_skcipher *tfm = ci->ci_ctfm;
270	int res = 0;
271
272	req = skcipher_request_alloc(tfm, gfp_flags);
273	if (!req) {
274		printk_ratelimited(KERN_ERR
275				   "%s: crypto_request_alloc() failed\n",
276				   __func__);
277		return -ENOMEM;
278	}
279	skcipher_request_set_callback(
280		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
281		ext4_crypt_complete, &ecr);
282
283	BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
284	memcpy(xts_tweak, &index, sizeof(index));
285	memset(&xts_tweak[sizeof(index)], 0,
286	       EXT4_XTS_TWEAK_SIZE - sizeof(index));
287
288	sg_init_table(&dst, 1);
289	sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
290	sg_init_table(&src, 1);
291	sg_set_page(&src, src_page, PAGE_SIZE, 0);
292	skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
293				   xts_tweak);
294	if (rw == EXT4_DECRYPT)
295		res = crypto_skcipher_decrypt(req);
296	else
297		res = crypto_skcipher_encrypt(req);
298	if (res == -EINPROGRESS || res == -EBUSY) {
299		wait_for_completion(&ecr.completion);
300		res = ecr.res;
301	}
302	skcipher_request_free(req);
303	if (res) {
304		printk_ratelimited(
305			KERN_ERR
306			"%s: crypto_skcipher_encrypt() returned %d\n",
307			__func__, res);
308		return res;
309	}
 
 
 
310	return 0;
311}
312
313static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
314				      gfp_t gfp_flags)
315{
316	ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
317	if (ctx->w.bounce_page == NULL)
318		return ERR_PTR(-ENOMEM);
319	ctx->flags |= EXT4_WRITE_PATH_FL;
320	return ctx->w.bounce_page;
321}
322
323/**
324 * ext4_encrypt() - Encrypts a page
325 * @inode:          The inode for which the encryption should take place
326 * @plaintext_page: The page to encrypt. Must be locked.
327 *
328 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
329 * encryption context.
330 *
331 * Called on the page write path.  The caller must call
332 * ext4_restore_control_page() on the returned ciphertext page to
333 * release the bounce buffer and the encryption context.
334 *
335 * Return: An allocated page with the encrypted content on success. Else, an
336 * error value or NULL.
337 */
338struct page *ext4_encrypt(struct inode *inode,
339			  struct page *plaintext_page,
340			  gfp_t gfp_flags)
341{
342	struct ext4_crypto_ctx *ctx;
343	struct page *ciphertext_page = NULL;
344	int err;
345
346	BUG_ON(!PageLocked(plaintext_page));
 
 
 
 
 
 
 
347
348	ctx = ext4_get_crypto_ctx(inode, gfp_flags);
349	if (IS_ERR(ctx))
350		return (struct page *) ctx;
351
352	/* The encryption operation will require a bounce page. */
353	ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
354	if (IS_ERR(ciphertext_page))
355		goto errout;
356	ctx->w.control_page = plaintext_page;
357	err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
358			       plaintext_page, ciphertext_page, gfp_flags);
359	if (err) {
360		ciphertext_page = ERR_PTR(err);
361	errout:
362		ext4_release_crypto_ctx(ctx);
363		return ciphertext_page;
364	}
365	SetPagePrivate(ciphertext_page);
366	set_page_private(ciphertext_page, (unsigned long)ctx);
367	lock_page(ciphertext_page);
368	return ciphertext_page;
369}
370
371/**
372 * ext4_decrypt() - Decrypts a page in-place
373 * @ctx:  The encryption context.
374 * @page: The page to decrypt. Must be locked.
375 *
376 * Decrypts page in-place using the ctx encryption context.
377 *
378 * Called from the read completion callback.
379 *
380 * Return: Zero on success, non-zero otherwise.
381 */
382int ext4_decrypt(struct page *page)
383{
384	BUG_ON(!PageLocked(page));
385
386	return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
387				page->index, page, page, GFP_NOFS);
388}
389
390int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
391			   ext4_fsblk_t pblk, ext4_lblk_t len)
392{
393	struct ext4_crypto_ctx	*ctx;
394	struct page		*ciphertext_page = NULL;
395	struct bio		*bio;
396	int			ret, err = 0;
397
398#if 0
399	ext4_msg(inode->i_sb, KERN_CRIT,
400		 "ext4_encrypted_zeroout ino %lu lblk %u len %u",
401		 (unsigned long) inode->i_ino, lblk, len);
402#endif
403
404	BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
 
405
406	ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
407	if (IS_ERR(ctx))
408		return PTR_ERR(ctx);
409
410	ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
411	if (IS_ERR(ciphertext_page)) {
412		err = PTR_ERR(ciphertext_page);
413		goto errout;
414	}
415
416	while (len--) {
417		err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
418				       ZERO_PAGE(0), ciphertext_page,
419				       GFP_NOFS);
420		if (err)
421			goto errout;
 
422
423		bio = bio_alloc(GFP_NOWAIT, 1);
424		if (!bio) {
425			err = -ENOMEM;
426			goto errout;
 
 
 
 
 
 
 
 
 
 
427		}
428		bio->bi_bdev = inode->i_sb->s_bdev;
429		bio->bi_iter.bi_sector =
430			pblk << (inode->i_sb->s_blocksize_bits - 9);
431		ret = bio_add_page(bio, ciphertext_page,
432				   inode->i_sb->s_blocksize, 0);
433		if (ret != inode->i_sb->s_blocksize) {
434			/* should never happen! */
435			ext4_msg(inode->i_sb, KERN_ERR,
436				 "bio_add_page failed: %d", ret);
437			WARN_ON(1);
438			bio_put(bio);
439			err = -EIO;
440			goto errout;
441		}
442		err = submit_bio_wait(WRITE, bio);
443		if ((err == 0) && bio->bi_error)
444			err = -EIO;
445		bio_put(bio);
446		if (err)
447			goto errout;
448		lblk++; pblk++;
449	}
450	err = 0;
451errout:
452	ext4_release_crypto_ctx(ctx);
453	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454}
455
456bool ext4_valid_contents_enc_mode(uint32_t mode)
457{
458	return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
459}
460
461/**
462 * ext4_validate_encryption_key_size() - Validate the encryption key size
463 * @mode: The key mode.
464 * @size: The key size to validate.
465 *
466 * Return: The validated key size for @mode. Zero if invalid.
467 */
468uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
469{
470	if (size == ext4_encryption_key_size(mode))
471		return size;
472	return 0;
473}
474
475/*
476 * Validate dentries for encrypted directories to make sure we aren't
477 * potentially caching stale data after a key has been added or
478 * removed.
479 */
480static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
481{
482	struct dentry *dir;
483	struct ext4_crypt_info *ci;
484	int dir_has_key, cached_with_key;
485
486	if (flags & LOOKUP_RCU)
487		return -ECHILD;
488
489	dir = dget_parent(dentry);
490	if (!ext4_encrypted_inode(d_inode(dir))) {
491		dput(dir);
492		return 0;
493	}
494	ci = EXT4_I(d_inode(dir))->i_crypt_info;
495	if (ci && ci->ci_keyring_key &&
496	    (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
497					  (1 << KEY_FLAG_REVOKED) |
498					  (1 << KEY_FLAG_DEAD))))
499		ci = NULL;
500
501	/* this should eventually be an flag in d_flags */
502	cached_with_key = dentry->d_fsdata != NULL;
503	dir_has_key = (ci != NULL);
504	dput(dir);
505
506	/*
507	 * If the dentry was cached without the key, and it is a
508	 * negative dentry, it might be a valid name.  We can't check
509	 * if the key has since been made available due to locking
510	 * reasons, so we fail the validation so ext4_lookup() can do
511	 * this check.
512	 *
513	 * We also fail the validation if the dentry was created with
514	 * the key present, but we no longer have the key, or vice versa.
515	 */
516	if ((!cached_with_key && d_is_negative(dentry)) ||
517	    (!cached_with_key && dir_has_key) ||
518	    (cached_with_key && !dir_has_key)) {
519#if 0				/* Revalidation debug */
520		char buf[80];
521		char *cp = simple_dname(dentry, buf, sizeof(buf));
522
523		if (IS_ERR(cp))
524			cp = (char *) "???";
525		pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata,
526		       cached_with_key, d_is_negative(dentry),
527		       dir_has_key);
528#endif
529		return 0;
530	}
531	return 1;
532}
533
534const struct dentry_operations ext4_encrypted_d_ops = {
535	.d_revalidate = ext4_d_revalidate,
 
 
 
 
 
 
536};