Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This contains encryption functions for per-file encryption.
  4 *
  5 * Copyright (C) 2015, Google, Inc.
  6 * Copyright (C) 2015, Motorola Mobility
  7 *
  8 * Written by Michael Halcrow, 2014.
  9 *
 10 * Filename encryption additions
 11 *	Uday Savagaonkar, 2014
 12 * Encryption policy handling additions
 13 *	Ildar Muslukhov, 2014
 14 * Add fscrypt_pullback_bio_page()
 15 *	Jaegeuk Kim, 2015.
 16 *
 17 * This has not yet undergone a rigorous security audit.
 18 *
 19 * The usage of AES-XTS should conform to recommendations in NIST
 20 * Special Publication 800-38E and IEEE P1619/D16.
 21 */
 22
 23#include <linux/pagemap.h>
 24#include <linux/module.h>
 25#include <linux/bio.h>
 26#include <linux/namei.h>
 27#include "fscrypt_private.h"
 28
 29/*
 30 * Call fscrypt_decrypt_page on every single page, reusing the encryption
 31 * context.
 32 */
 33static void completion_pages(struct work_struct *work)
 34{
 35	struct fscrypt_ctx *ctx =
 36		container_of(work, struct fscrypt_ctx, r.work);
 37	struct bio *bio = ctx->r.bio;
 38	struct bio_vec *bv;
 39	int i;
 40
 41	bio_for_each_segment_all(bv, bio, i) {
 42		struct page *page = bv->bv_page;
 43		int ret = fscrypt_decrypt_page(page->mapping->host, page,
 44				PAGE_SIZE, 0, page->index);
 45
 46		if (ret) {
 47			WARN_ON_ONCE(1);
 48			SetPageError(page);
 49		} else {
 50			SetPageUptodate(page);
 51		}
 52		unlock_page(page);
 53	}
 54	fscrypt_release_ctx(ctx);
 55	bio_put(bio);
 56}
 57
 58void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
 59{
 60	INIT_WORK(&ctx->r.work, completion_pages);
 61	ctx->r.bio = bio;
 62	queue_work(fscrypt_read_workqueue, &ctx->r.work);
 63}
 64EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
 65
 66void fscrypt_pullback_bio_page(struct page **page, bool restore)
 67{
 68	struct fscrypt_ctx *ctx;
 69	struct page *bounce_page;
 70
 71	/* The bounce data pages are unmapped. */
 72	if ((*page)->mapping)
 73		return;
 74
 75	/* The bounce data page is unmapped. */
 76	bounce_page = *page;
 77	ctx = (struct fscrypt_ctx *)page_private(bounce_page);
 78
 79	/* restore control page */
 80	*page = ctx->w.control_page;
 
 
 81
 82	if (restore)
 83		fscrypt_restore_control_page(bounce_page);
 
 
 
 84}
 85EXPORT_SYMBOL(fscrypt_pullback_bio_page);
 86
 87int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
 88				sector_t pblk, unsigned int len)
 89{
 90	struct fscrypt_ctx *ctx;
 91	struct page *ciphertext_page = NULL;
 
 92	struct bio *bio;
 93	int ret, err = 0;
 94
 95	BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
 96
 97	ctx = fscrypt_get_ctx(inode, GFP_NOFS);
 98	if (IS_ERR(ctx))
 99		return PTR_ERR(ctx);
100
101	ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
102	if (IS_ERR(ciphertext_page)) {
103		err = PTR_ERR(ciphertext_page);
104		goto errout;
105	}
106
107	while (len--) {
108		err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
109					     ZERO_PAGE(0), ciphertext_page,
110					     PAGE_SIZE, 0, GFP_NOFS);
111		if (err)
112			goto errout;
113
114		bio = bio_alloc(GFP_NOWAIT, 1);
115		if (!bio) {
116			err = -ENOMEM;
117			goto errout;
118		}
119		bio_set_dev(bio, inode->i_sb->s_bdev);
120		bio->bi_iter.bi_sector =
121			pblk << (inode->i_sb->s_blocksize_bits - 9);
122		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
123		ret = bio_add_page(bio, ciphertext_page,
124					inode->i_sb->s_blocksize, 0);
125		if (ret != inode->i_sb->s_blocksize) {
126			/* should never happen! */
127			WARN_ON(1);
128			bio_put(bio);
129			err = -EIO;
130			goto errout;
131		}
132		err = submit_bio_wait(bio);
133		if (err == 0 && bio->bi_status)
134			err = -EIO;
135		bio_put(bio);
136		if (err)
137			goto errout;
138		lblk++;
139		pblk++;
140	}
141	err = 0;
142errout:
143	fscrypt_release_ctx(ctx);
144	return err;
145}
146EXPORT_SYMBOL(fscrypt_zeroout_range);
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This contains encryption functions for per-file encryption.
  4 *
  5 * Copyright (C) 2015, Google, Inc.
  6 * Copyright (C) 2015, Motorola Mobility
  7 *
  8 * Written by Michael Halcrow, 2014.
  9 *
 10 * Filename encryption additions
 11 *	Uday Savagaonkar, 2014
 12 * Encryption policy handling additions
 13 *	Ildar Muslukhov, 2014
 14 * Add fscrypt_pullback_bio_page()
 15 *	Jaegeuk Kim, 2015.
 16 *
 17 * This has not yet undergone a rigorous security audit.
 18 *
 19 * The usage of AES-XTS should conform to recommendations in NIST
 20 * Special Publication 800-38E and IEEE P1619/D16.
 21 */
 22
 23#include <linux/pagemap.h>
 24#include <linux/module.h>
 25#include <linux/bio.h>
 26#include <linux/namei.h>
 27#include "fscrypt_private.h"
 28
 29static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
 
 
 
 
 30{
 
 
 
 31	struct bio_vec *bv;
 32	struct bvec_iter_all iter_all;
 33
 34	bio_for_each_segment_all(bv, bio, iter_all) {
 35		struct page *page = bv->bv_page;
 36		int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
 37							   bv->bv_offset);
 38		if (ret)
 
 
 39			SetPageError(page);
 40		else if (done)
 41			SetPageUptodate(page);
 42		if (done)
 43			unlock_page(page);
 44	}
 
 
 45}
 46
 47void fscrypt_decrypt_bio(struct bio *bio)
 48{
 49	__fscrypt_decrypt_bio(bio, false);
 
 
 50}
 51EXPORT_SYMBOL(fscrypt_decrypt_bio);
 52
 53static void completion_pages(struct work_struct *work)
 54{
 55	struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
 56	struct bio *bio = ctx->bio;
 
 
 
 
 
 
 
 
 57
 58	__fscrypt_decrypt_bio(bio, true);
 59	fscrypt_release_ctx(ctx);
 60	bio_put(bio);
 61}
 62
 63void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
 64{
 65	INIT_WORK(&ctx->work, completion_pages);
 66	ctx->bio = bio;
 67	fscrypt_enqueue_decrypt_work(&ctx->work);
 68}
 69EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
 70
 71int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
 72				sector_t pblk, unsigned int len)
 73{
 74	const unsigned int blockbits = inode->i_blkbits;
 75	const unsigned int blocksize = 1 << blockbits;
 76	struct page *ciphertext_page;
 77	struct bio *bio;
 78	int ret, err = 0;
 79
 80	ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
 81	if (!ciphertext_page)
 82		return -ENOMEM;
 
 
 
 
 
 
 
 
 83
 84	while (len--) {
 85		err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
 86					  ZERO_PAGE(0), ciphertext_page,
 87					  blocksize, 0, GFP_NOFS);
 88		if (err)
 89			goto errout;
 90
 91		bio = bio_alloc(GFP_NOWAIT, 1);
 92		if (!bio) {
 93			err = -ENOMEM;
 94			goto errout;
 95		}
 96		bio_set_dev(bio, inode->i_sb->s_bdev);
 97		bio->bi_iter.bi_sector = pblk << (blockbits - 9);
 
 98		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 99		ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
100		if (WARN_ON(ret != blocksize)) {
 
101			/* should never happen! */
 
102			bio_put(bio);
103			err = -EIO;
104			goto errout;
105		}
106		err = submit_bio_wait(bio);
107		if (err == 0 && bio->bi_status)
108			err = -EIO;
109		bio_put(bio);
110		if (err)
111			goto errout;
112		lblk++;
113		pblk++;
114	}
115	err = 0;
116errout:
117	fscrypt_free_bounce_page(ciphertext_page);
118	return err;
119}
120EXPORT_SYMBOL(fscrypt_zeroout_range);