summaryrefslogtreecommitdiff
path: root/include/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-07-31 09:45:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-07-31 09:45:28 -0700
commit44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec (patch)
tree504034f60c5510ebeb2c0d1d93a68fba999f2896 /include/crypto
parentb4efd62564e96d1edb99eb00dd0ff620dbd1afab (diff)
parentbf24d64268544379d9a9b5b8efc2bb03967703b3 (diff)
Merge tag 'v6.17-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Allow hash drivers without fallbacks (e.g., hardware key) Algorithms: - Add hmac hardware key support (phmac) on s390 - Re-enable sha384 in FIPS mode - Disable sha1 in FIPS mode - Convert zstd to acomp Drivers: - Lower priority of qat skcipher and aead - Convert aspeed to partial block API - Add iMX8QXP support in caam - Add rate limiting support for GEN6 devices in qat - Enable telemetry for GEN6 devices in qat - Implement full backlog mode for hisilicon/sec2" * tag 'v6.17-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (116 commits) crypto: keembay - Use min() to simplify ocs_create_linked_list_from_sg() crypto: hisilicon/hpre - fix dma unmap sequence crypto: qat - make adf_dev_autoreset() static crypto: ccp - reduce stack usage in ccp_run_aes_gcm_cmd crypto: qat - refactor ring-related debug functions crypto: qat - fix seq_file position update in adf_ring_next() crypto: qat - fix DMA direction for compression on GEN2 devices crypto: jitter - replace ARRAY_SIZE definition with header include crypto: engine - remove {prepare,unprepare}_crypt_hardware callbacks crypto: engine - remove request batching support crypto: qat - flush misc workqueue during device shutdown crypto: qat - enable rate limiting feature for GEN6 devices crypto: qat - add compression slice count for rate limiting crypto: qat - add get_svc_slice_cnt() in device data structure crypto: qat - add adf_rl_get_num_svc_aes() in rate limiting crypto: qat - relocate service related functions crypto: qat - consolidate service enums crypto: qat - add decompression service for rate limiting crypto: qat - validate service in rate limiting sysfs api crypto: hisilicon/sec2 - implement full backlog mode for sec ...
Diffstat (limited to 'include/crypto')
-rw-r--r--include/crypto/engine.h1
-rw-r--r--include/crypto/internal/acompress.h5
-rw-r--r--include/crypto/internal/engine.h15
-rw-r--r--include/crypto/internal/hash.h36
4 files changed, 37 insertions, 20 deletions
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 545dbefe3e13..2e60344437da 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -76,7 +76,6 @@ int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
- int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen);
void crypto_engine_exit(struct crypto_engine *engine);
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index ffffd88bbbad..2d97440028ff 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -63,10 +63,7 @@ struct crypto_acomp_stream {
struct crypto_acomp_streams {
/* These must come first because of struct scomp_alg. */
void *(*alloc_ctx)(void);
- union {
- void (*free_ctx)(void *);
- void (*cfree_ctx)(const void *);
- };
+ void (*free_ctx)(void *);
struct crypto_acomp_stream __percpu *streams;
struct work_struct stream_work;
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index b6a4ea2240fc..f19ef376833f 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -21,7 +21,6 @@ struct device;
/*
* struct crypto_engine - crypto hardware engine
* @name: the engine name
- * @idling: the engine is entering idle state
* @busy: request pump is busy
* @running: the engine is on working
* @retry_support: indication that the hardware allows re-execution
@@ -31,14 +30,6 @@ struct device;
* @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
@@ -46,7 +37,6 @@ struct device;
*/
struct crypto_engine {
char name[ENGINE_NAME_LEN];
- bool idling;
bool busy;
bool running;
@@ -58,11 +48,6 @@ struct crypto_engine {
struct crypto_queue queue;
struct device *dev;
- int (*prepare_crypt_hardware)(struct crypto_engine *engine);
- int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*do_batch_requests)(struct crypto_engine *engine);
-
-
struct kthread_worker *kworker;
struct kthread_work pump_requests;
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 0f85c543f80b..6ec5f2f37ccb 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -30,6 +30,20 @@
__##name##_req, (req))
struct ahash_request;
+struct scatterlist;
+
+struct crypto_hash_walk {
+ const char *data;
+
+ unsigned int offset;
+ unsigned int flags;
+
+ struct page *pg;
+ unsigned int entrylen;
+
+ unsigned int total;
+ struct scatterlist *sg;
+};
struct ahash_instance {
void (*free)(struct ahash_instance *inst);
@@ -61,6 +75,15 @@ struct crypto_shash_spawn {
struct crypto_spawn base;
};
+int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
+int crypto_hash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk);
+
+static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
+{
+ return !(walk->entrylen | walk->total);
+}
+
int crypto_register_ahash(struct ahash_alg *alg);
void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
@@ -91,6 +114,12 @@ static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg)
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
+static inline bool crypto_hash_no_export_core(struct crypto_ahash *tfm)
+{
+ return crypto_hash_alg_common(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+}
+
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
@@ -167,6 +196,13 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
tfm->reqsize = reqsize;
}
+static inline bool crypto_ahash_tested(struct crypto_ahash *tfm)
+{
+ struct crypto_tfm *tfm_base = crypto_ahash_tfm(tfm);
+
+ return tfm_base->__crt_alg->cra_flags & CRYPTO_ALG_TESTED;
+}
+
static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash,
unsigned int reqsize)
{