/*
* Quick & dirty crypto testing module.
*
* This will only exist until we have a better testing mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
* 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>)
* 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
*
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include "tcrypt.h"
/*
* Need to kmalloc() memory for testing kmap().
*/
#define TVMEMSIZE 16384
#define XBUFSIZE 32768
/*
* Indexes into the xbuf to simulate cross-page access.
*/
#define IDX1 37
#define IDX2 32400
#define IDX3 1
#define IDX4 8193
#define IDX5 22222
#define IDX6 17101
#define IDX7 27333
#define IDX8 3000
/*
* Used by test_cipher()
*/
#define ENCRYPT 1
#define DECRYPT 0
struct tcrypt_result {
struct completion completion;
int err;
};
static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
/*
* Used by test_cipher_speed()
*/
static unsigned int sec;
static int mode;
static char *xbuf;
static char *tvmem;
static char *check[] = {
"des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish",
"twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6",
"arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", "seed", NULL
};
static void hexdump(unsigned char *buf, unsigned int len)
{
while (len--)
printk("%02x", *buf++);
printk("\n");
}
static void tcrypt_complete(struct crypto_async_request *req, int err)
{
struct tcrypt_result *res = req->data;
if (err == -EINPROGRESS)
return;
res->err = err;
complete(&res->completion);
}
static void test_hash(char *algo, struct hash_testvec *template,
unsigned int tcount)
{
unsigned int i, j, k, temp;
struct scatterlist sg[8];
char result[64];
struct crypto_hash *tfm;
struct hash_desc desc;
struct hash_testvec *hash_tv;
unsigned int tsize;
int ret;
printk("\ntesting %s\n", algo);
tsize = sizeof(struct hash_testvec);
tsize *= tcount;
if (tsize > TVMEMSIZE) {
printk("template (%u) too big for tvmem (%u)\n", tsize, TVMEMSIZE);
return;
}
memcpy(tvmem, template, tsize);
hash_tv = (void *)tvmem;
tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
printk("failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
desc.tfm = tfm;
desc.flags = 0;
for (i = 0; i < tcount; i++) {
printk("test %u:\n", i + 1);
memset(result, 0, 64);
sg_init_one(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize);
if (hash_tv[i].ksize) {
ret = crypto_hash_setkey(tfm, hash_tv[i].key,
hash_tv[i].ksize);
if (ret) {
printk("setkey() failed ret=%d\n", ret);
goto out;
}
}
ret = crypto_hash_digest(&desc, sg, hash_tv[i].psize, result);
if (ret) {
printk("digest () failed ret=%d\n", ret);
goto out;
}
hexdump(result, crypto_hash_digestsize(tfm));
printk("%s\n",
memcmp(result, hash_tv[i].digest,
crypto_hash_digestsize(tfm)) ?
"fail" : "pass");
}
printk("testing %s across pages\n", algo);
/* setup the dummy buffer first */
memset(xbuf, 0, XBUFSIZE);
j = 0;
for (i = 0; i < tcount; i++) {
if (hash_tv[i].np) {
j++;
printk("test %u:\n", j);
memset(result, 0, 64);
temp = 0;
sg_init_table(sg, hash_tv[i].np);
for (k = 0; k < hash_tv[i].np; k++) {
memcpy(&xbuf[IDX[k]],
hash_tv[i].plaintext + temp,
hash_tv[i].tap[k]);
temp += hash_tv[i].tap[k];
sg_set_buf(&sg[k], &xbuf[IDX[k]],
hash_tv[i].tap[k]);
}
if (hash_tv[i].ksize) {
ret = crypto_hash_setkey(tfm, hash_tv[i].key,
hash_tv[i].ksize);
if (ret) {
printk("setkey() failed ret=%d\n", ret);
goto out;
}
}
ret = crypto_hash_digest(&desc, sg, hash_tv[i].psize,
result);
if (ret) {
printk("digest () failed ret=%d\n", ret);
goto out;
}
hexdump(result, crypto_hash_digestsize(tfm));
printk("%s\n",
memcmp(result, hash_tv[i].digest,
crypto_hash_digestsize(tfm)) ?
"fail" : "pass");
}
}
out:
crypto_free_hash(tfm);
}
static void test_cipher(char *algo, int enc,
struct cipher_testvec *template, unsigned int tcount)
{
unsigned int ret, i, j, k, temp;
unsigned int tsize;
char *q;
struct crypto_ablkcipher *tfm;
char *key;
struct cipher_testvec *cipher_tv;
struct ablkcipher_request *req;
struct scatterlist sg[8];
const char *e;
struct tcrypt_result result;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
printk("\ntesting %s %s\n", algo, e);
tsize = sizeof (struct cipher_testvec);
tsize *= tcount;
if (tsize > TVMEMSIZE) {
printk("template (%u) too big for tvmem (%u)\n", tsize,
TVMEMSIZE);
return;
}
memcpy(tvmem, template, tsize);
cipher_tv = (void *)tvmem;
init_completion(&result.completion);
tfm = crypto_alloc_ablkcipher(algo, 0, 0);
if (IS_ERR(tfm)) {
printk("failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
printk("failed to allocate request for %s\n", algo);
goto out;
}
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &result);
j = 0;
for (i = 0; i < tcount; i++) {
if (!(cipher_tv[i].np)) {
j++;
printk("test %u (%d bit key):\n",
j, cipher_tv[i].klen * 8);
crypto_ablkcipher_clear_flags(tfm, ~0);
if (cipher_tv[i].wk)
crypto_ablkcipher_set_flags(
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
key = cipher_tv[i].key;
ret = crypto_ablkcipher_setkey(tfm, key,
cipher_tv[i].klen);
if (ret) {
printk("setkey() failed flags=%x\n",
crypto_ablkcipher_get_flags(tfm));
if (!cipher_tv[i].fail)
goto out;
}
sg_init_one(&sg[0], cipher_tv[i].input,
cipher_tv[i].ilen);
ablkcipher_request_set_crypt(req, sg, sg,
cipher_tv[i].ilen,
cipher_tv[i].iv);
ret = enc ?
crypto_ablkcipher_encrypt(req) :
crypto_ablkcipher_decrypt(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !((ret = result.err))) {
INIT_COMPLETION(result.completion);
break;
}
/* fall through */
default:
printk("%s () failed err=%d\n", e, -ret);
goto out;
}
q = kmap(sg_page(&sg[0])) + sg[0].offset;
hexdump(q, cipher_tv[i].rlen);
printk("%s\n",
memcmp(q, cipher_tv[i].result,
cipher_tv[i].rlen) ? "fail" : "pass");
}
}
printk("\ntesting %s %s across pages (chunking)\n", algo, e);
memset(xbuf, 0, XBUFSIZE);
j = 0;
for (i = 0; i < tcount; i++) {
if (cipher_tv[i].np) {
j++;
printk("test %u (%d bit key):\n",
j, cipher_tv[i].klen * 8);
crypto_ablkcipher_clear_flags(tfm, ~0);
if (cipher_tv[i].wk)
crypto_ablkcipher_set_flags(
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
key = cipher_tv[i].key;
ret = crypto_ablkcipher_setkey(tfm, key,
cipher_tv[i].klen);
if (ret) {
printk("setkey() failed flags=%x\n",
crypto_ablkcipher_get_flags(tfm));
if (!cipher_tv[i].fail)
goto out;
}
temp = 0;
sg_init_table(sg, cipher_tv[i].np);
for (k = 0; k < cipher_tv[i].np; k++) {
memcpy(&xbuf[IDX[k]],
cipher_tv[i].input + temp,
cipher_tv[i].tap[k]);
temp += cipher_tv[i].tap[k];
sg_set_buf(&sg[k], &xbuf[IDX[k]],
cipher_tv[i].tap[k]);
}
ablkcipher_request_set_crypt(req, sg, sg,
cipher_tv[i].ilen,
cipher_tv[i].iv);
ret = enc ?
crypto_ablkcipher_encrypt(req) :
crypto_ablkcipher_decrypt(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !((ret = result.err))) {
INIT_COMPLETION(result.completion);
|