Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright 2019 Google LLC
  4 */
  5
  6/*
  7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
  8 */
  9
 10#define pr_fmt(fmt) "blk-crypto: " fmt
 11
 12#include <linux/bio.h>
 13#include <linux/blkdev.h>
 14#include <linux/blk-crypto-profile.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17
 18#include "blk-crypto-internal.h"
 19
 20const struct blk_crypto_mode blk_crypto_modes[] = {
 21	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
 22		.name = "AES-256-XTS",
 23		.cipher_str = "xts(aes)",
 24		.keysize = 64,
 25		.ivsize = 16,
 26	},
 27	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
 28		.name = "AES-128-CBC-ESSIV",
 29		.cipher_str = "essiv(cbc(aes),sha256)",
 30		.keysize = 16,
 31		.ivsize = 16,
 32	},
 33	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
 34		.name = "Adiantum",
 35		.cipher_str = "adiantum(xchacha12,aes)",
 36		.keysize = 32,
 37		.ivsize = 32,
 38	},
 39	[BLK_ENCRYPTION_MODE_SM4_XTS] = {
 40		.name = "SM4-XTS",
 41		.cipher_str = "xts(sm4)",
 42		.keysize = 32,
 43		.ivsize = 16,
 44	},
 45};
 46
 47/*
 48 * This number needs to be at least (the number of threads doing IO
 49 * concurrently) * (maximum recursive depth of a bio), so that we don't
 50 * deadlock on crypt_ctx allocations. The default is chosen to be the same
 51 * as the default number of post read contexts in both EXT4 and F2FS.
 52 */
 53static int num_prealloc_crypt_ctxs = 128;
 54
 55module_param(num_prealloc_crypt_ctxs, int, 0444);
 56MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
 57		"Number of bio crypto contexts to preallocate");
 58
 59static struct kmem_cache *bio_crypt_ctx_cache;
 60static mempool_t *bio_crypt_ctx_pool;
 61
 62static int __init bio_crypt_ctx_init(void)
 63{
 64	size_t i;
 65
 66	bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
 67	if (!bio_crypt_ctx_cache)
 68		goto out_no_mem;
 69
 70	bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
 71						      bio_crypt_ctx_cache);
 72	if (!bio_crypt_ctx_pool)
 73		goto out_no_mem;
 74
 75	/* This is assumed in various places. */
 76	BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
 77
 78	/* Sanity check that no algorithm exceeds the defined limits. */
 79	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
 80		BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
 81		BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
 82	}
 83
 84	return 0;
 85out_no_mem:
 86	panic("Failed to allocate mem for bio crypt ctxs\n");
 87}
 88subsys_initcall(bio_crypt_ctx_init);
 89
 90void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
 91		       const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
 92{
 93	struct bio_crypt_ctx *bc;
 94
 95	/*
 96	 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
 97	 * that the mempool_alloc() can't fail.
 98	 */
 99	WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
100
101	bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
102
103	bc->bc_key = key;
104	memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
105
106	bio->bi_crypt_context = bc;
107}
108
109void __bio_crypt_free_ctx(struct bio *bio)
110{
111	mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
112	bio->bi_crypt_context = NULL;
113}
114
115int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
116{
117	dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
118	if (!dst->bi_crypt_context)
119		return -ENOMEM;
120	*dst->bi_crypt_context = *src->bi_crypt_context;
121	return 0;
122}
 
123
124/* Increments @dun by @inc, treating @dun as a multi-limb integer. */
125void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
126			     unsigned int inc)
127{
128	int i;
129
130	for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
131		dun[i] += inc;
132		/*
133		 * If the addition in this limb overflowed, then we need to
134		 * carry 1 into the next limb. Else the carry is 0.
135		 */
136		if (dun[i] < inc)
137			inc = 1;
138		else
139			inc = 0;
140	}
141}
142
143void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
144{
145	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
146
147	bio_crypt_dun_increment(bc->bc_dun,
148				bytes >> bc->bc_key->data_unit_size_bits);
149}
150
151/*
152 * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
153 * @next_dun, treating the DUNs as multi-limb integers.
154 */
155bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
156				 unsigned int bytes,
157				 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
158{
159	int i;
160	unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
161
162	for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
163		if (bc->bc_dun[i] + carry != next_dun[i])
164			return false;
165		/*
166		 * If the addition in this limb overflowed, then we need to
167		 * carry 1 into the next limb. Else the carry is 0.
168		 */
169		if ((bc->bc_dun[i] + carry) < carry)
170			carry = 1;
171		else
172			carry = 0;
173	}
174
175	/* If the DUN wrapped through 0, don't treat it as contiguous. */
176	return carry == 0;
177}
178
179/*
180 * Checks that two bio crypt contexts are compatible - i.e. that
181 * they are mergeable except for data_unit_num continuity.
182 */
183static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
184				     struct bio_crypt_ctx *bc2)
185{
186	if (!bc1)
187		return !bc2;
188
189	return bc2 && bc1->bc_key == bc2->bc_key;
190}
191
192bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
193{
194	return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
195}
196
197/*
198 * Checks that two bio crypt contexts are compatible, and also
199 * that their data_unit_nums are continuous (and can hence be merged)
200 * in the order @bc1 followed by @bc2.
201 */
202bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
203			     struct bio_crypt_ctx *bc2)
204{
205	if (!bio_crypt_ctx_compatible(bc1, bc2))
206		return false;
207
208	return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
209}
210
211/* Check that all I/O segments are data unit aligned. */
212static bool bio_crypt_check_alignment(struct bio *bio)
213{
214	const unsigned int data_unit_size =
215		bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
216	struct bvec_iter iter;
217	struct bio_vec bv;
218
219	bio_for_each_segment(bv, bio, iter) {
220		if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
221			return false;
222	}
223
224	return true;
225}
226
227blk_status_t __blk_crypto_init_request(struct request *rq)
228{
229	return blk_crypto_get_keyslot(rq->q->crypto_profile,
230				      rq->crypt_ctx->bc_key,
231				      &rq->crypt_keyslot);
232}
233
234/**
235 * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
236 *
237 * @rq: The request whose crypto fields to uninitialize.
238 *
239 * Completely uninitializes the crypto fields of a request. If a keyslot has
240 * been programmed into some inline encryption hardware, that keyslot is
241 * released. The rq->crypt_ctx is also freed.
242 */
243void __blk_crypto_free_request(struct request *rq)
244{
245	blk_crypto_put_keyslot(rq->crypt_keyslot);
246	mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
247	blk_crypto_rq_set_defaults(rq);
248}
249
250/**
251 * __blk_crypto_bio_prep - Prepare bio for inline encryption
252 *
253 * @bio_ptr: pointer to original bio pointer
254 *
255 * If the bio crypt context provided for the bio is supported by the underlying
256 * device's inline encryption hardware, do nothing.
257 *
258 * Otherwise, try to perform en/decryption for this bio by falling back to the
259 * kernel crypto API. When the crypto API fallback is used for encryption,
260 * blk-crypto may choose to split the bio into 2 - the first one that will
261 * continue to be processed and the second one that will be resubmitted via
262 * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
263 * of the aforementioned "first one", and *bio_ptr will be updated to this
264 * bounce bio.
265 *
266 * Caller must ensure bio has bio_crypt_ctx.
267 *
268 * Return: true on success; false on error (and bio->bi_status will be set
269 *	   appropriately, and bio_endio() will have been called so bio
270 *	   submission should abort).
271 */
272bool __blk_crypto_bio_prep(struct bio **bio_ptr)
273{
274	struct bio *bio = *bio_ptr;
275	const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
276
277	/* Error if bio has no data. */
278	if (WARN_ON_ONCE(!bio_has_data(bio))) {
279		bio->bi_status = BLK_STS_IOERR;
280		goto fail;
281	}
282
283	if (!bio_crypt_check_alignment(bio)) {
284		bio->bi_status = BLK_STS_IOERR;
285		goto fail;
286	}
287
288	/*
289	 * Success if device supports the encryption context, or if we succeeded
290	 * in falling back to the crypto API.
291	 */
292	if (blk_crypto_config_supported_natively(bio->bi_bdev,
293						 &bc_key->crypto_cfg))
294		return true;
 
295	if (blk_crypto_fallback_bio_prep(bio_ptr))
296		return true;
297fail:
298	bio_endio(*bio_ptr);
299	return false;
300}
301
302int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
303			     gfp_t gfp_mask)
 
 
 
 
 
 
 
 
304{
305	if (!rq->crypt_ctx) {
306		rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
307		if (!rq->crypt_ctx)
308			return -ENOMEM;
309	}
310	*rq->crypt_ctx = *bio->bi_crypt_context;
311	return 0;
312}
313
314/**
315 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
316 * @blk_key: Pointer to the blk_crypto_key to initialize.
317 * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
318 *	     @crypto_mode; see blk_crypto_modes[].
319 * @crypto_mode: identifier for the encryption algorithm to use
320 * @dun_bytes: number of bytes that will be used to specify the DUN when this
321 *	       key is used
322 * @data_unit_size: the data unit size to use for en/decryption
323 *
324 * Return: 0 on success, -errno on failure.  The caller is responsible for
325 *	   zeroizing both blk_key and raw_key when done with them.
326 */
327int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
328			enum blk_crypto_mode_num crypto_mode,
329			unsigned int dun_bytes,
330			unsigned int data_unit_size)
331{
332	const struct blk_crypto_mode *mode;
333
334	memset(blk_key, 0, sizeof(*blk_key));
335
336	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
337		return -EINVAL;
338
339	mode = &blk_crypto_modes[crypto_mode];
340	if (mode->keysize == 0)
341		return -EINVAL;
342
343	if (dun_bytes == 0 || dun_bytes > mode->ivsize)
344		return -EINVAL;
345
346	if (!is_power_of_2(data_unit_size))
347		return -EINVAL;
348
349	blk_key->crypto_cfg.crypto_mode = crypto_mode;
350	blk_key->crypto_cfg.dun_bytes = dun_bytes;
351	blk_key->crypto_cfg.data_unit_size = data_unit_size;
352	blk_key->data_unit_size_bits = ilog2(data_unit_size);
353	blk_key->size = mode->keysize;
354	memcpy(blk_key->raw, raw_key, mode->keysize);
355
356	return 0;
357}
358
359bool blk_crypto_config_supported_natively(struct block_device *bdev,
360					  const struct blk_crypto_config *cfg)
361{
362	return __blk_crypto_cfg_supported(bdev_get_queue(bdev)->crypto_profile,
363					  cfg);
364}
365
366/*
367 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
368 * block_device it's submitted to supports inline crypto, or the
369 * blk-crypto-fallback is enabled and supports the cfg).
370 */
371bool blk_crypto_config_supported(struct block_device *bdev,
372				 const struct blk_crypto_config *cfg)
373{
374	return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
375	       blk_crypto_config_supported_natively(bdev, cfg);
376}
377
378/**
379 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
380 * @bdev: block device to operate on
381 * @key: A key to use on the device
 
382 *
383 * Upper layers must call this function to ensure that either the hardware
384 * supports the key's crypto settings, or the crypto API fallback has transforms
385 * for the needed mode allocated and ready to go. This function may allocate
386 * an skcipher, and *should not* be called from the data path, since that might
387 * cause a deadlock
388 *
389 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
390 *	   blk-crypto-fallback is either disabled or the needed algorithm
391 *	   is disabled in the crypto API; or another -errno code.
392 */
393int blk_crypto_start_using_key(struct block_device *bdev,
394			       const struct blk_crypto_key *key)
395{
396	if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
397		return 0;
398	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
399}
400
401/**
402 * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
403 *			    it may have been programmed into
404 * @bdev: The block_device who's associated inline encryption hardware this key
405 *     might have been programmed into
406 * @key: The key to evict
407 *
408 * Upper layers (filesystems) must call this function to ensure that a key is
409 * evicted from any hardware that it might have been programmed into.  The key
410 * must not be in use by any in-flight IO when this function is called.
411 *
412 * Return: 0 on success or if the key wasn't in any keyslot; -errno on error.
413 */
414int blk_crypto_evict_key(struct block_device *bdev,
415			 const struct blk_crypto_key *key)
416{
417	struct request_queue *q = bdev_get_queue(bdev);
418
419	if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
420		return __blk_crypto_evict_key(q->crypto_profile, key);
421
422	/*
423	 * If the block_device didn't support the key, then blk-crypto-fallback
424	 * may have been used, so try to evict the key from blk-crypto-fallback.
 
425	 */
426	return blk_crypto_fallback_evict_key(key);
427}
428EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright 2019 Google LLC
  4 */
  5
  6/*
  7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
  8 */
  9
 10#define pr_fmt(fmt) "blk-crypto: " fmt
 11
 12#include <linux/bio.h>
 13#include <linux/blkdev.h>
 14#include <linux/keyslot-manager.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17
 18#include "blk-crypto-internal.h"
 19
 20const struct blk_crypto_mode blk_crypto_modes[] = {
 21	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
 
 22		.cipher_str = "xts(aes)",
 23		.keysize = 64,
 24		.ivsize = 16,
 25	},
 26	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
 
 27		.cipher_str = "essiv(cbc(aes),sha256)",
 28		.keysize = 16,
 29		.ivsize = 16,
 30	},
 31	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
 
 32		.cipher_str = "adiantum(xchacha12,aes)",
 33		.keysize = 32,
 34		.ivsize = 32,
 35	},
 
 
 
 
 
 
 36};
 37
 38/*
 39 * This number needs to be at least (the number of threads doing IO
 40 * concurrently) * (maximum recursive depth of a bio), so that we don't
 41 * deadlock on crypt_ctx allocations. The default is chosen to be the same
 42 * as the default number of post read contexts in both EXT4 and F2FS.
 43 */
 44static int num_prealloc_crypt_ctxs = 128;
 45
 46module_param(num_prealloc_crypt_ctxs, int, 0444);
 47MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
 48		"Number of bio crypto contexts to preallocate");
 49
 50static struct kmem_cache *bio_crypt_ctx_cache;
 51static mempool_t *bio_crypt_ctx_pool;
 52
 53static int __init bio_crypt_ctx_init(void)
 54{
 55	size_t i;
 56
 57	bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
 58	if (!bio_crypt_ctx_cache)
 59		goto out_no_mem;
 60
 61	bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
 62						      bio_crypt_ctx_cache);
 63	if (!bio_crypt_ctx_pool)
 64		goto out_no_mem;
 65
 66	/* This is assumed in various places. */
 67	BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
 68
 69	/* Sanity check that no algorithm exceeds the defined limits. */
 70	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
 71		BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
 72		BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
 73	}
 74
 75	return 0;
 76out_no_mem:
 77	panic("Failed to allocate mem for bio crypt ctxs\n");
 78}
 79subsys_initcall(bio_crypt_ctx_init);
 80
 81void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
 82		       const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
 83{
 84	struct bio_crypt_ctx *bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
 
 
 
 
 
 
 
 
 85
 86	bc->bc_key = key;
 87	memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
 88
 89	bio->bi_crypt_context = bc;
 90}
 91
 92void __bio_crypt_free_ctx(struct bio *bio)
 93{
 94	mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
 95	bio->bi_crypt_context = NULL;
 96}
 97
 98void __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
 99{
100	dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
 
 
101	*dst->bi_crypt_context = *src->bi_crypt_context;
 
102}
103EXPORT_SYMBOL_GPL(__bio_crypt_clone);
104
105/* Increments @dun by @inc, treating @dun as a multi-limb integer. */
106void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
107			     unsigned int inc)
108{
109	int i;
110
111	for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
112		dun[i] += inc;
113		/*
114		 * If the addition in this limb overflowed, then we need to
115		 * carry 1 into the next limb. Else the carry is 0.
116		 */
117		if (dun[i] < inc)
118			inc = 1;
119		else
120			inc = 0;
121	}
122}
123
124void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
125{
126	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
127
128	bio_crypt_dun_increment(bc->bc_dun,
129				bytes >> bc->bc_key->data_unit_size_bits);
130}
131
132/*
133 * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
134 * @next_dun, treating the DUNs as multi-limb integers.
135 */
136bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
137				 unsigned int bytes,
138				 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
139{
140	int i;
141	unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
142
143	for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
144		if (bc->bc_dun[i] + carry != next_dun[i])
145			return false;
146		/*
147		 * If the addition in this limb overflowed, then we need to
148		 * carry 1 into the next limb. Else the carry is 0.
149		 */
150		if ((bc->bc_dun[i] + carry) < carry)
151			carry = 1;
152		else
153			carry = 0;
154	}
155
156	/* If the DUN wrapped through 0, don't treat it as contiguous. */
157	return carry == 0;
158}
159
160/*
161 * Checks that two bio crypt contexts are compatible - i.e. that
162 * they are mergeable except for data_unit_num continuity.
163 */
164static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
165				     struct bio_crypt_ctx *bc2)
166{
167	if (!bc1)
168		return !bc2;
169
170	return bc2 && bc1->bc_key == bc2->bc_key;
171}
172
173bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
174{
175	return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
176}
177
178/*
179 * Checks that two bio crypt contexts are compatible, and also
180 * that their data_unit_nums are continuous (and can hence be merged)
181 * in the order @bc1 followed by @bc2.
182 */
183bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
184			     struct bio_crypt_ctx *bc2)
185{
186	if (!bio_crypt_ctx_compatible(bc1, bc2))
187		return false;
188
189	return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
190}
191
192/* Check that all I/O segments are data unit aligned. */
193static bool bio_crypt_check_alignment(struct bio *bio)
194{
195	const unsigned int data_unit_size =
196		bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
197	struct bvec_iter iter;
198	struct bio_vec bv;
199
200	bio_for_each_segment(bv, bio, iter) {
201		if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
202			return false;
203	}
204
205	return true;
206}
207
208blk_status_t __blk_crypto_init_request(struct request *rq)
209{
210	return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
211					&rq->crypt_keyslot);
 
212}
213
214/**
215 * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
216 *
217 * @rq: The request whose crypto fields to uninitialize.
218 *
219 * Completely uninitializes the crypto fields of a request. If a keyslot has
220 * been programmed into some inline encryption hardware, that keyslot is
221 * released. The rq->crypt_ctx is also freed.
222 */
223void __blk_crypto_free_request(struct request *rq)
224{
225	blk_ksm_put_slot(rq->crypt_keyslot);
226	mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
227	blk_crypto_rq_set_defaults(rq);
228}
229
230/**
231 * __blk_crypto_bio_prep - Prepare bio for inline encryption
232 *
233 * @bio_ptr: pointer to original bio pointer
234 *
235 * If the bio crypt context provided for the bio is supported by the underlying
236 * device's inline encryption hardware, do nothing.
237 *
238 * Otherwise, try to perform en/decryption for this bio by falling back to the
239 * kernel crypto API. When the crypto API fallback is used for encryption,
240 * blk-crypto may choose to split the bio into 2 - the first one that will
241 * continue to be processed and the second one that will be resubmitted via
242 * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
243 * of the aforementioned "first one", and *bio_ptr will be updated to this
244 * bounce bio.
245 *
246 * Caller must ensure bio has bio_crypt_ctx.
247 *
248 * Return: true on success; false on error (and bio->bi_status will be set
249 *	   appropriately, and bio_endio() will have been called so bio
250 *	   submission should abort).
251 */
252bool __blk_crypto_bio_prep(struct bio **bio_ptr)
253{
254	struct bio *bio = *bio_ptr;
255	const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
256
257	/* Error if bio has no data. */
258	if (WARN_ON_ONCE(!bio_has_data(bio))) {
259		bio->bi_status = BLK_STS_IOERR;
260		goto fail;
261	}
262
263	if (!bio_crypt_check_alignment(bio)) {
264		bio->bi_status = BLK_STS_IOERR;
265		goto fail;
266	}
267
268	/*
269	 * Success if device supports the encryption context, or if we succeeded
270	 * in falling back to the crypto API.
271	 */
272	if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
273					 &bc_key->crypto_cfg))
274		return true;
275
276	if (blk_crypto_fallback_bio_prep(bio_ptr))
277		return true;
278fail:
279	bio_endio(*bio_ptr);
280	return false;
281}
282
283/**
284 * __blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
285 *			      is inserted
286 *
287 * @rq: The request to prepare
288 * @bio: The first bio being inserted into the request
289 * @gfp_mask: gfp mask
290 */
291void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
292			      gfp_t gfp_mask)
293{
294	if (!rq->crypt_ctx)
295		rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
 
 
 
296	*rq->crypt_ctx = *bio->bi_crypt_context;
 
297}
298
299/**
300 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
301 * @blk_key: Pointer to the blk_crypto_key to initialize.
302 * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
303 *	     @crypto_mode; see blk_crypto_modes[].
304 * @crypto_mode: identifier for the encryption algorithm to use
305 * @dun_bytes: number of bytes that will be used to specify the DUN when this
306 *	       key is used
307 * @data_unit_size: the data unit size to use for en/decryption
308 *
309 * Return: 0 on success, -errno on failure.  The caller is responsible for
310 *	   zeroizing both blk_key and raw_key when done with them.
311 */
312int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
313			enum blk_crypto_mode_num crypto_mode,
314			unsigned int dun_bytes,
315			unsigned int data_unit_size)
316{
317	const struct blk_crypto_mode *mode;
318
319	memset(blk_key, 0, sizeof(*blk_key));
320
321	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
322		return -EINVAL;
323
324	mode = &blk_crypto_modes[crypto_mode];
325	if (mode->keysize == 0)
326		return -EINVAL;
327
328	if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
329		return -EINVAL;
330
331	if (!is_power_of_2(data_unit_size))
332		return -EINVAL;
333
334	blk_key->crypto_cfg.crypto_mode = crypto_mode;
335	blk_key->crypto_cfg.dun_bytes = dun_bytes;
336	blk_key->crypto_cfg.data_unit_size = data_unit_size;
337	blk_key->data_unit_size_bits = ilog2(data_unit_size);
338	blk_key->size = mode->keysize;
339	memcpy(blk_key->raw, raw_key, mode->keysize);
340
341	return 0;
342}
343
 
 
 
 
 
 
 
344/*
345 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
346 * request queue it's submitted to supports inline crypto, or the
347 * blk-crypto-fallback is enabled and supports the cfg).
348 */
349bool blk_crypto_config_supported(struct request_queue *q,
350				 const struct blk_crypto_config *cfg)
351{
352	return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
353	       blk_ksm_crypto_cfg_supported(q->ksm, cfg);
354}
355
356/**
357 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
 
358 * @key: A key to use on the device
359 * @q: the request queue for the device
360 *
361 * Upper layers must call this function to ensure that either the hardware
362 * supports the key's crypto settings, or the crypto API fallback has transforms
363 * for the needed mode allocated and ready to go. This function may allocate
364 * an skcipher, and *should not* be called from the data path, since that might
365 * cause a deadlock
366 *
367 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
368 *	   blk-crypto-fallback is either disabled or the needed algorithm
369 *	   is disabled in the crypto API; or another -errno code.
370 */
371int blk_crypto_start_using_key(const struct blk_crypto_key *key,
372			       struct request_queue *q)
373{
374	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
375		return 0;
376	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
377}
378
379/**
380 * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
381 *			    it may have been programmed into
382 * @q: The request queue who's associated inline encryption hardware this key
383 *     might have been programmed into
384 * @key: The key to evict
385 *
386 * Upper layers (filesystems) must call this function to ensure that a key is
387 * evicted from any hardware that it might have been programmed into.  The key
388 * must not be in use by any in-flight IO when this function is called.
389 *
390 * Return: 0 on success or if key is not present in the q's ksm, -err on error.
391 */
392int blk_crypto_evict_key(struct request_queue *q,
393			 const struct blk_crypto_key *key)
394{
395	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
396		return blk_ksm_evict_key(q->ksm, key);
 
 
397
398	/*
399	 * If the request queue's associated inline encryption hardware didn't
400	 * have support for the key, then the key might have been programmed
401	 * into the fallback keyslot manager, so try to evict from there.
402	 */
403	return blk_crypto_fallback_evict_key(key);
404}