diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-31 09:45:28 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-07-31 09:45:28 -0700 |
| commit | 44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec (patch) | |
| tree | 504034f60c5510ebeb2c0d1d93a68fba999f2896 /crypto/ahash.c | |
| parent | b4efd62564e96d1edb99eb00dd0ff620dbd1afab (diff) | |
| parent | bf24d64268544379d9a9b5b8efc2bb03967703b3 (diff) | |
Merge tag 'v6.17-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"API:
- Allow hash drivers without fallbacks (e.g., hardware key)
Algorithms:
- Add hmac hardware key support (phmac) on s390
- Re-enable sha384 in FIPS mode
- Disable sha1 in FIPS mode
- Convert zstd to acomp
Drivers:
- Lower priority of qat skcipher and aead
- Convert aspeed to partial block API
- Add iMX8QXP support in caam
- Add rate limiting support for GEN6 devices in qat
- Enable telemetry for GEN6 devices in qat
- Implement full backlog mode for hisilicon/sec2"
* tag 'v6.17-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (116 commits)
crypto: keembay - Use min() to simplify ocs_create_linked_list_from_sg()
crypto: hisilicon/hpre - fix dma unmap sequence
crypto: qat - make adf_dev_autoreset() static
crypto: ccp - reduce stack usage in ccp_run_aes_gcm_cmd
crypto: qat - refactor ring-related debug functions
crypto: qat - fix seq_file position update in adf_ring_next()
crypto: qat - fix DMA direction for compression on GEN2 devices
crypto: jitter - replace ARRAY_SIZE definition with header include
crypto: engine - remove {prepare,unprepare}_crypt_hardware callbacks
crypto: engine - remove request batching support
crypto: qat - flush misc workqueue during device shutdown
crypto: qat - enable rate limiting feature for GEN6 devices
crypto: qat - add compression slice count for rate limiting
crypto: qat - add get_svc_slice_cnt() in device data structure
crypto: qat - add adf_rl_get_num_svc_aes() in rate limiting
crypto: qat - relocate service related functions
crypto: qat - consolidate service enums
crypto: qat - add decompression service for rate limiting
crypto: qat - validate service in rate limiting sysfs api
crypto: hisilicon/sec2 - implement full backlog mode for sec
...
Diffstat (limited to 'crypto/ahash.c')
| -rw-r--r-- | crypto/ahash.c | 39 |
1 files changed, 17 insertions, 22 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c index bc84a07c924c..a227793d2c5b 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -29,19 +29,6 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e -struct crypto_hash_walk { - const char *data; - - unsigned int offset; - unsigned int flags; - - struct page *pg; - unsigned int entrylen; - - unsigned int total; - struct scatterlist *sg; -}; - static int ahash_def_finup(struct ahash_request *req); static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm) @@ -112,8 +99,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) return hash_walk_next(walk); } -static int crypto_hash_walk_first(struct ahash_request *req, - struct crypto_hash_walk *walk) +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk) { walk->total = req->nbytes; walk->entrylen = 0; @@ -133,8 +120,9 @@ static int crypto_hash_walk_first(struct ahash_request *req, return hash_walk_new_entry(walk); } +EXPORT_SYMBOL_GPL(crypto_hash_walk_first); -static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { if ((walk->flags & CRYPTO_AHASH_REQ_VIRT)) return err; @@ -160,11 +148,7 @@ static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) return hash_walk_new_entry(walk); } - -static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) -{ - return !(walk->entrylen | walk->total); -} +EXPORT_SYMBOL_GPL(crypto_hash_walk_done); /* * For an ahash tfm that is using an shash algorithm (instead of an ahash @@ -347,6 +331,12 @@ static int ahash_do_req_chain(struct ahash_request *req, if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE) return -ENOSYS; + if (!crypto_ahash_need_fallback(tfm)) + return -ENOSYS; + + if (crypto_hash_no_export_core(tfm)) + return -ENOSYS; + { u8 state[HASH_MAX_STATESIZE]; @@ -954,6 +944,10 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_reqsize > MAX_SYNC_HASH_REQSIZE) return -EINVAL; + if (base->cra_flags & CRYPTO_ALG_NEED_FALLBACK && + base->cra_flags & CRYPTO_ALG_NO_FALLBACK) + return -EINVAL; + err = hash_prepare_alg(&alg->halg); if (err) return err; @@ -962,7 +956,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) & - (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT)) + (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT) && + !(base->cra_flags & CRYPTO_ALG_NO_FALLBACK)) base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; if (!alg->setkey) |