Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright 2019 Google LLC
  4 */
  5
  6/*
  7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
  8 */
  9
 10#define pr_fmt(fmt) "blk-crypto: " fmt
 11
 12#include <linux/bio.h>
 13#include <linux/blkdev.h>
 14#include <linux/keyslot-manager.h>
 15#include <linux/module.h>
 16#include <linux/slab.h>
 17
 18#include "blk-crypto-internal.h"
 19
 20const struct blk_crypto_mode blk_crypto_modes[] = {
 21	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
 22		.cipher_str = "xts(aes)",
 23		.keysize = 64,
 24		.ivsize = 16,
 25	},
 26	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
 27		.cipher_str = "essiv(cbc(aes),sha256)",
 28		.keysize = 16,
 29		.ivsize = 16,
 30	},
 31	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
 32		.cipher_str = "adiantum(xchacha12,aes)",
 33		.keysize = 32,
 34		.ivsize = 32,
 35	},
 36};
 37
 38/*
 39 * This number needs to be at least (the number of threads doing IO
 40 * concurrently) * (maximum recursive depth of a bio), so that we don't
 41 * deadlock on crypt_ctx allocations. The default is chosen to be the same
 42 * as the default number of post read contexts in both EXT4 and F2FS.
 43 */
 44static int num_prealloc_crypt_ctxs = 128;
 45
 46module_param(num_prealloc_crypt_ctxs, int, 0444);
 47MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
 48		"Number of bio crypto contexts to preallocate");
 49
 50static struct kmem_cache *bio_crypt_ctx_cache;
 51static mempool_t *bio_crypt_ctx_pool;
 52
 53static int __init bio_crypt_ctx_init(void)
 54{
 55	size_t i;
 56
 57	bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
 58	if (!bio_crypt_ctx_cache)
 59		goto out_no_mem;
 60
 61	bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
 62						      bio_crypt_ctx_cache);
 63	if (!bio_crypt_ctx_pool)
 64		goto out_no_mem;
 65
 66	/* This is assumed in various places. */
 67	BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
 68
 69	/* Sanity check that no algorithm exceeds the defined limits. */
 70	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
 71		BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
 72		BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
 73	}
 74
 75	return 0;
 76out_no_mem:
 77	panic("Failed to allocate mem for bio crypt ctxs\n");
 78}
 79subsys_initcall(bio_crypt_ctx_init);
 80
 81void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
 82		       const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
 83{
 84	struct bio_crypt_ctx *bc;
 85
 86	/*
 87	 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
 88	 * that the mempool_alloc() can't fail.
 89	 */
 90	WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
 91
 92	bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
 93
 94	bc->bc_key = key;
 95	memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
 96
 97	bio->bi_crypt_context = bc;
 98}
 99
100void __bio_crypt_free_ctx(struct bio *bio)
101{
102	mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
103	bio->bi_crypt_context = NULL;
104}
105
106int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
107{
108	dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
109	if (!dst->bi_crypt_context)
110		return -ENOMEM;
111	*dst->bi_crypt_context = *src->bi_crypt_context;
112	return 0;
113}
114EXPORT_SYMBOL_GPL(__bio_crypt_clone);
115
116/* Increments @dun by @inc, treating @dun as a multi-limb integer. */
117void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
118			     unsigned int inc)
119{
120	int i;
121
122	for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
123		dun[i] += inc;
124		/*
125		 * If the addition in this limb overflowed, then we need to
126		 * carry 1 into the next limb. Else the carry is 0.
127		 */
128		if (dun[i] < inc)
129			inc = 1;
130		else
131			inc = 0;
132	}
133}
134
135void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
136{
137	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
138
139	bio_crypt_dun_increment(bc->bc_dun,
140				bytes >> bc->bc_key->data_unit_size_bits);
141}
142
143/*
144 * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
145 * @next_dun, treating the DUNs as multi-limb integers.
146 */
147bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
148				 unsigned int bytes,
149				 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
150{
151	int i;
152	unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
153
154	for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
155		if (bc->bc_dun[i] + carry != next_dun[i])
156			return false;
157		/*
158		 * If the addition in this limb overflowed, then we need to
159		 * carry 1 into the next limb. Else the carry is 0.
160		 */
161		if ((bc->bc_dun[i] + carry) < carry)
162			carry = 1;
163		else
164			carry = 0;
165	}
166
167	/* If the DUN wrapped through 0, don't treat it as contiguous. */
168	return carry == 0;
169}
170
171/*
172 * Checks that two bio crypt contexts are compatible - i.e. that
173 * they are mergeable except for data_unit_num continuity.
174 */
175static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
176				     struct bio_crypt_ctx *bc2)
177{
178	if (!bc1)
179		return !bc2;
180
181	return bc2 && bc1->bc_key == bc2->bc_key;
182}
183
184bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
185{
186	return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
187}
188
189/*
190 * Checks that two bio crypt contexts are compatible, and also
191 * that their data_unit_nums are continuous (and can hence be merged)
192 * in the order @bc1 followed by @bc2.
193 */
194bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
195			     struct bio_crypt_ctx *bc2)
196{
197	if (!bio_crypt_ctx_compatible(bc1, bc2))
198		return false;
199
200	return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
201}
202
203/* Check that all I/O segments are data unit aligned. */
204static bool bio_crypt_check_alignment(struct bio *bio)
205{
206	const unsigned int data_unit_size =
207		bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
208	struct bvec_iter iter;
209	struct bio_vec bv;
210
211	bio_for_each_segment(bv, bio, iter) {
212		if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
213			return false;
214	}
215
216	return true;
217}
218
219blk_status_t __blk_crypto_init_request(struct request *rq)
220{
221	return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
222					&rq->crypt_keyslot);
223}
224
225/**
226 * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
227 *
228 * @rq: The request whose crypto fields to uninitialize.
229 *
230 * Completely uninitializes the crypto fields of a request. If a keyslot has
231 * been programmed into some inline encryption hardware, that keyslot is
232 * released. The rq->crypt_ctx is also freed.
233 */
234void __blk_crypto_free_request(struct request *rq)
235{
236	blk_ksm_put_slot(rq->crypt_keyslot);
237	mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
238	blk_crypto_rq_set_defaults(rq);
239}
240
241/**
242 * __blk_crypto_bio_prep - Prepare bio for inline encryption
243 *
244 * @bio_ptr: pointer to original bio pointer
245 *
246 * If the bio crypt context provided for the bio is supported by the underlying
247 * device's inline encryption hardware, do nothing.
248 *
249 * Otherwise, try to perform en/decryption for this bio by falling back to the
250 * kernel crypto API. When the crypto API fallback is used for encryption,
251 * blk-crypto may choose to split the bio into 2 - the first one that will
252 * continue to be processed and the second one that will be resubmitted via
253 * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
254 * of the aforementioned "first one", and *bio_ptr will be updated to this
255 * bounce bio.
256 *
257 * Caller must ensure bio has bio_crypt_ctx.
258 *
259 * Return: true on success; false on error (and bio->bi_status will be set
260 *	   appropriately, and bio_endio() will have been called so bio
261 *	   submission should abort).
262 */
263bool __blk_crypto_bio_prep(struct bio **bio_ptr)
264{
265	struct bio *bio = *bio_ptr;
266	const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
267
268	/* Error if bio has no data. */
269	if (WARN_ON_ONCE(!bio_has_data(bio))) {
270		bio->bi_status = BLK_STS_IOERR;
271		goto fail;
272	}
273
274	if (!bio_crypt_check_alignment(bio)) {
275		bio->bi_status = BLK_STS_IOERR;
276		goto fail;
277	}
278
279	/*
280	 * Success if device supports the encryption context, or if we succeeded
281	 * in falling back to the crypto API.
282	 */
283	if (blk_ksm_crypto_cfg_supported(bio->bi_bdev->bd_disk->queue->ksm,
284					 &bc_key->crypto_cfg))
285		return true;
286
287	if (blk_crypto_fallback_bio_prep(bio_ptr))
288		return true;
289fail:
290	bio_endio(*bio_ptr);
291	return false;
292}
293
294int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
295			     gfp_t gfp_mask)
296{
297	if (!rq->crypt_ctx) {
298		rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
299		if (!rq->crypt_ctx)
300			return -ENOMEM;
301	}
302	*rq->crypt_ctx = *bio->bi_crypt_context;
303	return 0;
304}
305
306/**
307 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
308 * @blk_key: Pointer to the blk_crypto_key to initialize.
309 * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
310 *	     @crypto_mode; see blk_crypto_modes[].
311 * @crypto_mode: identifier for the encryption algorithm to use
312 * @dun_bytes: number of bytes that will be used to specify the DUN when this
313 *	       key is used
314 * @data_unit_size: the data unit size to use for en/decryption
315 *
316 * Return: 0 on success, -errno on failure.  The caller is responsible for
317 *	   zeroizing both blk_key and raw_key when done with them.
318 */
319int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
320			enum blk_crypto_mode_num crypto_mode,
321			unsigned int dun_bytes,
322			unsigned int data_unit_size)
323{
324	const struct blk_crypto_mode *mode;
325
326	memset(blk_key, 0, sizeof(*blk_key));
327
328	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
329		return -EINVAL;
330
331	mode = &blk_crypto_modes[crypto_mode];
332	if (mode->keysize == 0)
333		return -EINVAL;
334
335	if (dun_bytes == 0 || dun_bytes > mode->ivsize)
336		return -EINVAL;
337
338	if (!is_power_of_2(data_unit_size))
339		return -EINVAL;
340
341	blk_key->crypto_cfg.crypto_mode = crypto_mode;
342	blk_key->crypto_cfg.dun_bytes = dun_bytes;
343	blk_key->crypto_cfg.data_unit_size = data_unit_size;
344	blk_key->data_unit_size_bits = ilog2(data_unit_size);
345	blk_key->size = mode->keysize;
346	memcpy(blk_key->raw, raw_key, mode->keysize);
347
348	return 0;
349}
350
351/*
352 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
353 * request queue it's submitted to supports inline crypto, or the
354 * blk-crypto-fallback is enabled and supports the cfg).
355 */
356bool blk_crypto_config_supported(struct request_queue *q,
357				 const struct blk_crypto_config *cfg)
358{
359	return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
360	       blk_ksm_crypto_cfg_supported(q->ksm, cfg);
361}
362
363/**
364 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
365 * @key: A key to use on the device
366 * @q: the request queue for the device
367 *
368 * Upper layers must call this function to ensure that either the hardware
369 * supports the key's crypto settings, or the crypto API fallback has transforms
370 * for the needed mode allocated and ready to go. This function may allocate
371 * an skcipher, and *should not* be called from the data path, since that might
372 * cause a deadlock
373 *
374 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
375 *	   blk-crypto-fallback is either disabled or the needed algorithm
376 *	   is disabled in the crypto API; or another -errno code.
377 */
378int blk_crypto_start_using_key(const struct blk_crypto_key *key,
379			       struct request_queue *q)
380{
381	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
382		return 0;
383	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
384}
385
386/**
387 * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
388 *			    it may have been programmed into
389 * @q: The request queue who's associated inline encryption hardware this key
390 *     might have been programmed into
391 * @key: The key to evict
392 *
393 * Upper layers (filesystems) must call this function to ensure that a key is
394 * evicted from any hardware that it might have been programmed into.  The key
395 * must not be in use by any in-flight IO when this function is called.
396 *
397 * Return: 0 on success or if key is not present in the q's ksm, -err on error.
398 */
399int blk_crypto_evict_key(struct request_queue *q,
400			 const struct blk_crypto_key *key)
401{
402	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
403		return blk_ksm_evict_key(q->ksm, key);
404
405	/*
406	 * If the request queue's associated inline encryption hardware didn't
407	 * have support for the key, then the key might have been programmed
408	 * into the fallback keyslot manager, so try to evict from there.
409	 */
410	return blk_crypto_fallback_evict_key(key);
411}
412EXPORT_SYMBOL_GPL(blk_crypto_evict_key);