From: Christophe Saout <christophe@saout.de>

Since some people keep complaining that the IV generation mechanisms
supplied in cryptoloop (and now dm-crypt) are insecure, which they somewhat
are, I just hacked a small digest based IV generation mechanism.

It simply hashes the sector number and the key and uses it as IV.

You can specify the encryption mode as "cipher-digest" like aes-md5 or
serpent-sha1 or some other combination.

Consider this as a proposal, I'm not a crypto expert.  Tell me if it
contains other flaws that should be fixed.

At least the "cryptoloop-exploit" Jari Ruusu posted doesn't work anymore.



---

 25-akpm/drivers/md/dm-crypt.c |   90 ++++++++++++++++++++++++++++++++++--------
 1 files changed, 74 insertions(+), 16 deletions(-)

diff -puN drivers/md/dm-crypt.c~dm-crypt-cipher-digest drivers/md/dm-crypt.c
--- 25/drivers/md/dm-crypt.c~dm-crypt-cipher-digest	Thu Feb 19 13:58:22 2004
+++ 25-akpm/drivers/md/dm-crypt.c	Thu Feb 19 13:58:22 2004
@@ -61,11 +61,13 @@ struct crypt_config {
 	/*
 	 * crypto related data
 	 */
-	struct crypto_tfm *tfm;
+	struct crypto_tfm *cipher;
+	struct crypto_tfm *digest;
 	sector_t iv_offset;
 	int (*iv_generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
 	int iv_size;
 	int key_size;
+	int digest_size;
 	u8 key[0];
 };
 
@@ -102,6 +104,35 @@ static int crypt_iv_plain(struct crypt_c
 	return 0;
 }
 
+static int crypt_iv_digest(struct crypt_config *cc, u8 *iv, sector_t sector)
+{
+	static DECLARE_MUTEX(tfm_mutex);
+	struct scatterlist sg[2] = {
+		{
+			.page = virt_to_page(iv),
+			.offset = offset_in_page(iv),
+			.length = sizeof(u64) / sizeof(u8)
+		}, {
+			.page = virt_to_page(cc->key),
+			.offset = offset_in_page(cc->key),
+			.length = cc->key_size
+		}
+	};
+	int i;
+
+	*(u64 *)iv = cpu_to_le64((u64)sector);
+
+	/* digests use the context in the tfm, sigh */
+	down(&tfm_mutex);
+	crypto_digest_digest(cc->digest, sg, 2, iv);
+	up(&tfm_mutex);
+
+	for(i = cc->digest_size; i < cc->iv_size; i += cc->digest_size)
+		memcpy(iv + i, iv, min(cc->digest_size, cc->iv_size - i));
+
+	return 0;
+}
+
 static inline int
 crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                           struct scatterlist *in, unsigned int length,
@@ -116,14 +147,14 @@ crypt_convert_scatterlist(struct crypt_c
 			return r;
 
 		if (write)
-			r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv);
+			r = crypto_cipher_encrypt_iv(cc->cipher, out, in, length, iv);
 		else
-			r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv);
+			r = crypto_cipher_decrypt_iv(cc->cipher, out, in, length, iv);
 	} else {
 		if (write)
-			r = crypto_cipher_encrypt(cc->tfm, out, in, length);
+			r = crypto_cipher_encrypt(cc->cipher, out, in, length);
 		else
-			r = crypto_cipher_decrypt(cc->tfm, out, in, length);
+			r = crypto_cipher_decrypt(cc->cipher, out, in, length);
 	}
 
 	return r;
@@ -436,13 +467,26 @@ static int crypt_ctr(struct dm_target *t
 		return -ENOMEM;
 	}
 
+	cc->digest_size = 0;
+	cc->digest = NULL;
 	if (!mode || strcmp(mode, "plain") == 0)
 		cc->iv_generator = crypt_iv_plain;
 	else if (strcmp(mode, "ecb") == 0)
 		cc->iv_generator = NULL;
 	else {
-		ti->error = "dm-crypt: Invalid chaining mode";
-		goto bad1;
+		tfm = crypto_alloc_tfm(mode, 0);
+		if (!tfm) {
+			ti->error = "dm-crypt: Error allocating digest tfm";
+			goto bad1;
+		}
+		if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST) {
+			ti->error = "dm-crypt: Expected digest algorithm";
+			goto bad1;
+		}
+
+		cc->digest = tfm;
+		cc->digest_size = crypto_tfm_alg_digestsize(tfm);
+		cc->iv_generator = crypt_iv_digest;
 	}
 
 	if (cc->iv_generator)
@@ -455,12 +499,18 @@ static int crypt_ctr(struct dm_target *t
 		ti->error = "dm-crypt: Error allocating crypto tfm";
 		goto bad1;
 	}
+	if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) {
+		ti->error = "dm-crypt: Expected cipher algorithm";
+		goto bad1;
+	}
 
-	if (tfm->crt_u.cipher.cit_decrypt_iv && tfm->crt_u.cipher.cit_encrypt_iv)
-		/* at least a 32 bit sector number should fit in our buffer */
+	if (tfm->crt_u.cipher.cit_decrypt_iv &&
+	    tfm->crt_u.cipher.cit_encrypt_iv) {
+		/* at least a sector number should fit in our buffer */
 		cc->iv_size = max(crypto_tfm_alg_ivsize(tfm),
-		                  (unsigned int)(sizeof(u32) / sizeof(u8)));
-	else {
+		                  (unsigned int)(sizeof(u64) / sizeof(u8)));
+		cc->iv_size = max(cc->iv_size, cc->digest_size);
+	} else {
 		cc->iv_size = 0;
 		if (cc->iv_generator) {
 			DMWARN("dm-crypt: Selected cipher does not support IVs");
@@ -482,7 +532,7 @@ static int crypt_ctr(struct dm_target *t
 		goto bad3;
 	}
 
-	cc->tfm = tfm;
+	cc->cipher = tfm;
 	cc->key_size = key_size;
 	if ((key_size == 0 && strcmp(argv[1], "-") != 0)
 	    || crypt_decode_key(cc->key, argv[1], key_size) < 0) {
@@ -521,6 +571,8 @@ bad3:
 bad2:
 	crypto_free_tfm(tfm);
 bad1:
+	if (cc->digest)
+		crypto_free_tfm(cc->digest);
 	kfree(cc);
 	return -EINVAL;
 }
@@ -532,7 +584,10 @@ static void crypt_dtr(struct dm_target *
 	mempool_destroy(cc->page_pool);
 	mempool_destroy(cc->io_pool);
 
-	crypto_free_tfm(cc->tfm);
+	crypto_free_tfm(cc->cipher);
+	if (cc->digest)
+		crypto_free_tfm(cc->digest);
+
 	dm_put_device(ti, cc->dev);
 	kfree(cc);
 }
@@ -680,11 +735,14 @@ static int crypt_status(struct dm_target
 		break;
 
 	case STATUSTYPE_TABLE:
-		cipher = crypto_tfm_alg_name(cc->tfm);
+		cipher = crypto_tfm_alg_name(cc->cipher);
 
-		switch(cc->tfm->crt_u.cipher.cit_mode) {
+		switch(cc->cipher->crt_u.cipher.cit_mode) {
 		case CRYPTO_TFM_MODE_CBC:
-			mode = "plain";
+			if (cc->digest)
+				mode = crypto_tfm_alg_name(cc->digest);
+			else
+				mode = "plain";
 			break;
 		case CRYPTO_TFM_MODE_ECB:
 			mode = "ecb";

_