Skip to content

Commit

Permalink
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git…
Browse files Browse the repository at this point in the history
…/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "API:
   - Add support for AEAD in simd
   - Add fuzz testing to testmgr
   - Add panic_on_fail module parameter to testmgr
   - Use per-CPU struct instead multiple variables in scompress
   - Change verify API for akcipher

  Algorithms:
   - Convert x86 AEAD algorithms over to simd
   - Forbid 2-key 3DES in FIPS mode
   - Add EC-RDSA (GOST 34.10) algorithm

  Drivers:
   - Set output IV with ctr-aes in crypto4xx
   - Set output IV in rockchip
   - Fix potential length overflow with hashing in sun4i-ss
   - Fix computation error with ctr in vmx
   - Add SM4 protected keys support in ccree
   - Remove long-broken mxc-scc driver
   - Add rfc4106(gcm(aes)) cipher support in cavium/nitrox"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (179 commits)
  crypto: ccree - use a proper le32 type for le32 val
  crypto: ccree - remove set but not used variable 'du_size'
  crypto: ccree - Make cc_sec_disable static
  crypto: ccree - fix spelling mistake "protedcted" -> "protected"
  crypto: caam/qi2 - generate hash keys in-place
  crypto: caam/qi2 - fix DMA mapping of stack memory
  crypto: caam/qi2 - fix zero-length buffer DMA mapping
  crypto: stm32/cryp - update to return iv_out
  crypto: stm32/cryp - remove request mutex protection
  crypto: stm32/cryp - add weak key check for DES
  crypto: atmel - remove set but not used variable 'alg_name'
  crypto: picoxcell - Use dev_get_drvdata()
  crypto: crypto4xx - get rid of redundant using_sd variable
  crypto: crypto4xx - use sync skcipher for fallback
  crypto: crypto4xx - fix cfb and ofb "overran dst buffer" issues
  crypto: crypto4xx - fix ctr-aes missing output IV
  crypto: ecrdsa - select ASN1 and OID_REGISTRY for EC-RDSA
  crypto: ux500 - use ccflags-y instead of CFLAGS_<basename>.o
  crypto: ccree - handle tee fips error during power management resume
  crypto: ccree - add function to handle cryptocell tee fips error
  ...
  • Loading branch information
torvalds committed May 7, 2019
2 parents 7aefd94 + e59f755 commit 81ff5d2
Show file tree
Hide file tree
Showing 322 changed files with 5,973 additions and 4,248 deletions.
1 change: 0 additions & 1 deletion Documentation/crypto/api-samples.rst
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ Code Example For Use of Operational State Memory With SHASH
if (!sdesc)
return ERR_PTR(-ENOMEM);
sdesc->shash.tfm = alg;
sdesc->shash.flags = 0x0;
return sdesc;
}

Expand Down
2 changes: 2 additions & 0 deletions arch/arm/crypto/aes-neonbs-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err;

err = skcipher_walk_virt(&walk, req, true);
if (err)
return err;

crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);

Expand Down
5 changes: 3 additions & 2 deletions arch/arm/crypto/chacha-neon-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

#include <crypto/algapi.h>
#include <crypto/chacha.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
Expand Down Expand Up @@ -93,7 +94,7 @@ static int chacha_neon(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);

if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req);

return chacha_neon_stream_xor(req, ctx, req->iv);
Expand All @@ -107,7 +108,7 @@ static int xchacha_neon(struct skcipher_request *req)
u32 state[16];
u8 real_iv[16];

if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_xchacha_crypt(req);

crypto_chacha_init(state, ctx, req->iv);
Expand Down
5 changes: 3 additions & 2 deletions arch/arm/crypto/crc32-ce-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <linux/string.h>

#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>

#include <asm/hwcap.h>
#include <asm/neon.h>
Expand Down Expand Up @@ -113,7 +114,7 @@ static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
u32 *crc = shash_desc_ctx(desc);
unsigned int l;

if (may_use_simd()) {
if (crypto_simd_usable()) {
if ((u32)data % SCALE_F) {
l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));

Expand Down Expand Up @@ -147,7 +148,7 @@ static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
u32 *crc = shash_desc_ctx(desc);
unsigned int l;

if (may_use_simd()) {
if (crypto_simd_usable()) {
if ((u32)data % SCALE_F) {
l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));

Expand Down
3 changes: 2 additions & 1 deletion arch/arm/crypto/crct10dif-ce-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include <linux/string.h>

#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>

#include <asm/neon.h>
#include <asm/simd.h>
Expand All @@ -36,7 +37,7 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
{
u16 *crc = shash_desc_ctx(desc);

if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin();
*crc = crc_t10dif_pmull(*crc, data, length);
kernel_neon_end();
Expand Down
10 changes: 4 additions & 6 deletions arch/arm/crypto/ghash-ce-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <asm/unaligned.h>
#include <crypto/cryptd.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/gf128mul.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
Expand Down Expand Up @@ -185,7 +186,6 @@ static int ghash_async_init(struct ahash_request *req)
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);

desc->tfm = child;
desc->flags = req->base.flags;
return crypto_shash_init(desc);
}

Expand All @@ -196,7 +196,7 @@ static int ghash_async_update(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;

if (!may_use_simd() ||
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
Expand All @@ -214,7 +214,7 @@ static int ghash_async_final(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;

if (!may_use_simd() ||
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
Expand All @@ -232,7 +232,7 @@ static int ghash_async_digest(struct ahash_request *req)
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;

if (!may_use_simd() ||
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
Expand All @@ -242,7 +242,6 @@ static int ghash_async_digest(struct ahash_request *req)
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);

desc->tfm = child;
desc->flags = req->base.flags;
return shash_ahash_digest(req, desc);
}
}
Expand All @@ -255,7 +254,6 @@ static int ghash_async_import(struct ahash_request *req, const void *in)
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);

desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
desc->flags = req->base.flags;

return crypto_shash_import(desc, in);
}
Expand Down
3 changes: 2 additions & 1 deletion arch/arm/crypto/nhpoly1305-neon-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>

Expand All @@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
if (srclen < 64 || !may_use_simd())
if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);

do {
Expand Down
5 changes: 3 additions & 2 deletions arch/arm/crypto/sha1-ce-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
*/

#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
Expand All @@ -33,7 +34,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_state *sctx = shash_desc_ctx(desc);

if (!may_use_simd() ||
if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return sha1_update_arm(desc, data, len);

Expand All @@ -47,7 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (!may_use_simd())
if (!crypto_simd_usable())
return sha1_finup_arm(desc, data, len, out);

kernel_neon_begin();
Expand Down
5 changes: 3 additions & 2 deletions arch/arm/crypto/sha1_neon_glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
*/

#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
Expand All @@ -39,7 +40,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_state *sctx = shash_desc_ctx(desc);

if (!may_use_simd() ||
if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return sha1_update_arm(desc, data, len);

Expand All @@ -54,7 +55,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (!may_use_simd())
if (!crypto_simd_usable())
return sha1_finup_arm(desc, data, len, out);

kernel_neon_begin();
Expand Down
5 changes: 3 additions & 2 deletions arch/arm/crypto/sha2-ce-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
*/

#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
Expand All @@ -34,7 +35,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha256_state *sctx = shash_desc_ctx(desc);

if (!may_use_simd() ||
if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_arm_update(desc, data, len);

Expand All @@ -49,7 +50,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
static int sha2_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (!may_use_simd())
if (!crypto_simd_usable())
return crypto_sha256_arm_finup(desc, data, len, out);

kernel_neon_begin();
Expand Down
5 changes: 3 additions & 2 deletions arch/arm/crypto/sha256_neon_glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
*/

#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
Expand All @@ -34,7 +35,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
{
struct sha256_state *sctx = shash_desc_ctx(desc);

if (!may_use_simd() ||
if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_arm_update(desc, data, len);

Expand All @@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
static int sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (!may_use_simd())
if (!crypto_simd_usable())
return crypto_sha256_arm_finup(desc, data, len, out);

kernel_neon_begin();
Expand Down
5 changes: 3 additions & 2 deletions arch/arm/crypto/sha512-neon-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
*/

#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha512_base.h>
#include <linux/crypto.h>
Expand All @@ -30,7 +31,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
{
struct sha512_state *sctx = shash_desc_ctx(desc);

if (!may_use_simd() ||
if (!crypto_simd_usable() ||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return sha512_arm_update(desc, data, len);

Expand All @@ -45,7 +46,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
if (!may_use_simd())
if (!crypto_simd_usable())
return sha512_arm_finup(desc, data, len, out);

kernel_neon_begin();
Expand Down
7 changes: 4 additions & 3 deletions arch/arm64/crypto/aes-ce-ccm-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>

Expand Down Expand Up @@ -109,7 +110,7 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
u32 abytes, u32 *macp)
{
if (may_use_simd()) {
if (crypto_simd_usable()) {
kernel_neon_begin();
ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
num_rounds(key));
Expand Down Expand Up @@ -255,7 +256,7 @@ static int ccm_encrypt(struct aead_request *req)

err = skcipher_walk_aead_encrypt(&walk, req, false);

if (may_use_simd()) {
if (crypto_simd_usable()) {
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;

Expand Down Expand Up @@ -313,7 +314,7 @@ static int ccm_decrypt(struct aead_request *req)

err = skcipher_walk_aead_decrypt(&walk, req, false);

if (may_use_simd()) {
if (crypto_simd_usable()) {
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;

Expand Down
5 changes: 3 additions & 2 deletions arch/arm64/crypto/aes-ce-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
Expand Down Expand Up @@ -52,7 +53,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);

if (!may_use_simd()) {
if (!crypto_simd_usable()) {
__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
return;
}
Expand All @@ -66,7 +67,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);

if (!may_use_simd()) {
if (!crypto_simd_usable()) {
__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
return;
}
Expand Down
6 changes: 3 additions & 3 deletions arch/arm64/crypto/aes-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);

if (!may_use_simd())
if (!crypto_simd_usable())
return aes_ctr_encrypt_fallback(ctx, req);

return ctr_encrypt(req);
Expand Down Expand Up @@ -642,7 +642,7 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
{
int rounds = 6 + ctx->key_length / 4;

if (may_use_simd()) {
if (crypto_simd_usable()) {
kernel_neon_begin();
aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
enc_after);
Expand Down Expand Up @@ -707,7 +707,7 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out)
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);

mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0);
mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0);

memcpy(out, ctx->dg, AES_BLOCK_SIZE);

Expand Down
4 changes: 3 additions & 1 deletion arch/arm64/crypto/aes-neonbs-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);

if (!may_use_simd())
if (!crypto_simd_usable())
return aes_ctr_encrypt_fallback(&ctx->fallback, req);

return ctr_encrypt(req);
Expand All @@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err;

err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;

kernel_neon_begin();
neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
Expand Down
Loading

0 comments on commit 81ff5d2

Please sign in to comment.