Compare commits

...

4 Commits

Author SHA1 Message Date
Herbert Xu
e8d6fc996f crypto: simd - Return EAGAIN on skcipher stack requests
The simd algorithm is mostly synchronous.  So instead of refusing
all stack requests, allow the synchronous ones to proceed and only
reject the truly asynchronous ones.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2025-03-29 20:00:35 +08:00
Herbert Xu
124d98f8d8 crypto: skcipher - Add folio support
Add support for encrypting/decrypting from and to a folio.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2025-03-29 19:53:12 +08:00
Herbert Xu
2542110b76 crypto: aesni - Use SKCIPHER_REQUEST_ON_STACK
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2025-03-29 19:53:12 +08:00
Herbert Xu
bbd2086c43 crypto: skcipher - Add SKCIPHER_REQUEST_ON_STACK
Add SKCIPHER_REQUEST_ON_STACK which places a generic skcipher request
on the stack.  If an operation on it cannot complete synchrously, it
will fail with EAGAIN.  The request should then be converted to a
dynamically allocated one using SKCIPHER_REQUEST_CLONE.  If memory
allocation fails the request will automatically switch to a fallback
that is synchronous.

If a stack request is given to skcipher_request_free it will simply
be zeroed.

Finally add skcipher_request_alloc_extra which gives the user extra
memory to use in conjunction with the request.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2025-03-29 19:52:51 +08:00
7 changed files with 269 additions and 70 deletions

View File

@@ -247,12 +247,11 @@ static int cts_cbc_encrypt(struct skcipher_request *req)
int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
struct scatterlist *src = req->src, *dst = req->dst;
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
SKCIPHER_REQUEST_ON_STACK(subreq, tfm);
struct skcipher_walk walk;
int err;
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
skcipher_request_set_callback(subreq, skcipher_request_flags(req),
NULL, NULL);
if (req->cryptlen <= AES_BLOCK_SIZE) {
@@ -262,29 +261,29 @@ static int cts_cbc_encrypt(struct skcipher_request *req)
}
if (cbc_blocks > 0) {
skcipher_request_set_crypt(&subreq, req->src, req->dst,
skcipher_request_set_crypt(subreq, req->src, req->dst,
cbc_blocks * AES_BLOCK_SIZE,
req->iv);
err = cbc_encrypt(&subreq);
err = cbc_encrypt(subreq);
if (err)
return err;
if (req->cryptlen == AES_BLOCK_SIZE)
return 0;
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq->cryptlen);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst,
subreq.cryptlen);
subreq->cryptlen);
}
/* handle ciphertext stealing */
skcipher_request_set_crypt(&subreq, src, dst,
skcipher_request_set_crypt(subreq, src, dst,
req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
err = skcipher_walk_virt(&walk, subreq, false);
if (err)
return err;
@@ -303,12 +302,11 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
struct scatterlist *src = req->src, *dst = req->dst;
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
SKCIPHER_REQUEST_ON_STACK(subreq, tfm);
struct skcipher_walk walk;
int err;
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
skcipher_request_set_callback(subreq, skcipher_request_flags(req),
NULL, NULL);
if (req->cryptlen <= AES_BLOCK_SIZE) {
@@ -318,29 +316,29 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
}
if (cbc_blocks > 0) {
skcipher_request_set_crypt(&subreq, req->src, req->dst,
skcipher_request_set_crypt(subreq, req->src, req->dst,
cbc_blocks * AES_BLOCK_SIZE,
req->iv);
err = cbc_decrypt(&subreq);
err = cbc_decrypt(subreq);
if (err)
return err;
if (req->cryptlen == AES_BLOCK_SIZE)
return 0;
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
dst = src = scatterwalk_ffwd(sg_src, req->src, subreq->cryptlen);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst,
subreq.cryptlen);
subreq->cryptlen);
}
/* handle ciphertext stealing */
skcipher_request_set_crypt(&subreq, src, dst,
skcipher_request_set_crypt(subreq, src, dst,
req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
err = skcipher_walk_virt(&walk, subreq, false);
if (err)
return err;
@@ -423,7 +421,7 @@ xts_crypt_slowpath(struct skcipher_request *req, xts_crypt_func crypt_func)
const struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct scatterlist sg_src[2], sg_dst[2];
struct skcipher_request subreq;
SKCIPHER_REQUEST_ON_STACK(subreq, tfm);
struct skcipher_walk walk;
struct scatterlist *src, *dst;
int err;
@@ -435,14 +433,13 @@ xts_crypt_slowpath(struct skcipher_request *req, xts_crypt_func crypt_func)
* which is required for ciphertext stealing.
*/
if (tail) {
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq,
skcipher_request_set_callback(subreq,
skcipher_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(&subreq, req->src, req->dst,
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->cryptlen - tail - AES_BLOCK_SIZE,
req->iv);
req = &subreq;
req = subreq;
}
err = skcipher_walk_virt(&walk, req, false);

View File

@@ -66,15 +66,17 @@ static int simd_skcipher_encrypt(struct skcipher_request *req)
struct skcipher_request *subreq;
struct crypto_skcipher *child;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) {
if (skcipher_req_on_stack(req))
return -EAGAIN;
child = &ctx->cryptd_tfm->base;
} else
child = cryptd_skcipher_child(ctx->cryptd_tfm);
subreq = skcipher_request_ctx(req);
*subreq = *req;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
child = cryptd_skcipher_child(ctx->cryptd_tfm);
skcipher_request_set_tfm(subreq, child);
return crypto_skcipher_encrypt(subreq);
@@ -91,9 +93,11 @@ static int simd_skcipher_decrypt(struct skcipher_request *req)
*subreq = *req;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) {
if (skcipher_req_on_stack(req))
return -EAGAIN;
child = &ctx->cryptd_tfm->base;
else
} else
child = cryptd_skcipher_child(ctx->cryptd_tfm);
skcipher_request_set_tfm(subreq, child);
@@ -388,7 +392,7 @@ static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg,
drvname) >= CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_MOSTLY_SYNC |
(ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
alg->base.cra_priority = ialg->base.cra_priority;
alg->base.cra_blocksize = ialg->base.cra_blocksize;

View File

@@ -39,6 +39,14 @@ static const struct crypto_type crypto_skcipher_type;
static int skcipher_walk_next(struct skcipher_walk *walk);
static inline bool skcipher_has_fb(struct crypto_skcipher *skcipher)
{
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
return skcipher_is_async(skcipher) &&
!(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
}
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
{
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
@@ -439,6 +447,9 @@ int crypto_skcipher_encrypt(struct skcipher_request *req)
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
if (skcipher_req_on_stack(req) && skcipher_is_async(tfm) &&
!skcipher_is_mostly_sync(tfm))
return -EAGAIN;
if (alg->co.base.cra_type != &crypto_skcipher_type)
return crypto_lskcipher_encrypt_sg(req);
return alg->encrypt(req);
@@ -452,6 +463,9 @@ int crypto_skcipher_decrypt(struct skcipher_request *req)
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
if (skcipher_req_on_stack(req) && skcipher_is_async(tfm) &&
!skcipher_is_mostly_sync(tfm))
return -EAGAIN;
if (alg->co.base.cra_type != &crypto_skcipher_type)
return crypto_lskcipher_decrypt_sg(req);
return alg->decrypt(req);
@@ -521,15 +535,22 @@ static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
alg->exit(skcipher);
if (alg->exit)
alg->exit(skcipher);
if (skcipher_has_fb(skcipher))
crypto_free_sync_skcipher(skcipher->fb);
}
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
struct crypto_sync_skcipher *fb = NULL;
int err;
skcipher_set_needkey(skcipher);
skcipher->fb = container_of(skcipher, struct crypto_sync_skcipher, base);
if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
unsigned am = crypto_skcipher_alignmask(skcipher);
@@ -543,13 +564,28 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
return crypto_init_lskcipher_ops_sg(tfm);
}
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
if (skcipher_has_fb(skcipher)) {
fb = crypto_alloc_sync_skcipher(crypto_skcipher_alg_name(skcipher), 0, 0);
if (IS_ERR(fb))
return PTR_ERR(fb);
if (alg->init)
return alg->init(skcipher);
skcipher->fb = fb;
}
skcipher->base.exit = crypto_skcipher_exit_tfm;
if (!alg->init)
return 0;
err = alg->init(skcipher);
if (err)
goto out_free_fb;
return 0;
out_free_fb:
crypto_free_sync_skcipher(fb);
return err;
}
static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
@@ -885,6 +921,46 @@ err_free_inst:
}
EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
void skcipher_request_free(struct skcipher_request *req)
{
bool stack;
if (!req)
return;
stack = skcipher_req_on_stack(req);
skcipher_request_zero(req);
if (stack)
return;
kfree(req);
}
EXPORT_SYMBOL_GPL(skcipher_request_free);
static inline void skcipher_request_set_fallback(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
skcipher_request_set_sync_tfm(req, tfm->fb);
}
struct skcipher_request *skcipher_request_clone(struct skcipher_request *req,
size_t total, gfp_t gfp)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_request *nreq;
nreq = kmalloc(total, gfp);
if (!nreq) {
skcipher_request_set_fallback(req);
return req;
}
memcpy(nreq, req, total);
skcipher_request_set_tfm(nreq, tfm);
skcipher_request_set_callback(nreq, req->base.flags, req->base.complete, req->base.data);
return nreq;
}
EXPORT_SYMBOL_GPL(skcipher_request_clone);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");
MODULE_IMPORT_NS("CRYPTO_INTERNAL");

View File

@@ -16,11 +16,20 @@
* Copyright (c) 2010, Intel Corporation.
*/
#include <crypto/acompress.h>
#include <crypto/aead.h>
#include <crypto/akcipher.h>
#include <crypto/drbg.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/kpp.h>
#include <crypto/rng.h>
#include <crypto/sig.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/once.h>
#include <linux/prandom.h>
@@ -28,14 +37,6 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uio.h>
#include <crypto/rng.h>
#include <crypto/drbg.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/acompress.h>
#include <crypto/sig.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include "internal.h"
@@ -3035,7 +3036,7 @@ static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec,
req->dst != tsgls->dst.sgl_ptr ||
crypto_skcipher_reqtfm(req) != tfm ||
req->base.complete != crypto_req_done ||
req->base.flags != req_flags ||
skcipher_request_flags(req) ^ req_flags ||
req->base.data != &wait) {
pr_err("alg: skcipher: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
@@ -3051,7 +3052,7 @@ static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec,
pr_err("alg: skcipher: changed 'req->base.tfm'\n");
if (req->base.complete != crypto_req_done)
pr_err("alg: skcipher: changed 'req->base.complete'\n");
if (req->base.flags != req_flags)
if (skcipher_request_flags(req) ^ req_flags)
pr_err("alg: skcipher: changed 'req->base.flags'\n");
if (req->base.data != &wait)
pr_err("alg: skcipher: changed 'req->base.data'\n");

View File

@@ -260,7 +260,7 @@ static inline void *skcipher_request_ctx_dma(struct skcipher_request *req)
static inline u32 skcipher_request_flags(struct skcipher_request *req)
{
return req->base.flags;
return crypto_request_flags(&req->base);
}
/* Helpers for simple block cipher modes of operation */

View File

@@ -11,6 +11,7 @@
#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -26,8 +27,6 @@
/* Set this bit if the skcipher operation is not final. */
#define CRYPTO_SKCIPHER_REQ_NOTFINAL 0x00000002
struct scatterlist;
/**
* struct skcipher_request - Symmetric key cipher request
* @cryptlen: Number of bytes to encrypt or decrypt
@@ -45,12 +44,16 @@ struct skcipher_request {
struct scatterlist *src;
struct scatterlist *dst;
struct scatterlist src0;
struct scatterlist dst0;
struct crypto_async_request base;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct crypto_skcipher {
struct crypto_sync_skcipher *fb;
unsigned int reqsize;
struct crypto_tfm base;
@@ -218,13 +221,25 @@ struct lskcipher_alg {
* all users have the correct skcipher tfm for doing on-stack requests.
*/
#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, _tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
MAX_SYNC_SKCIPHER_REQSIZE \
] CRYPTO_MINALIGN_ATTR; \
struct skcipher_request *name = \
(((struct skcipher_request *)__##name##_desc)->base.tfm = \
crypto_sync_skcipher_tfm((_tfm)), \
(void *)__##name##_desc)
SKCIPHER_REQUEST_ON_STACK(name, &(_tfm)->base, 0)
#define SKCIPHER_REQUEST_ON_STACK_1(name, tfm, extra) \
char __##name##_req[ALIGN(sizeof(struct skcipher_request) + \
MAX_SYNC_SKCIPHER_REQSIZE, \
CRYPTO_MINALIGN) + \
extra] CRYPTO_MINALIGN_ATTR; \
struct skcipher_request *name = skcipher_request_on_stack_init( \
__##name##_req, (tfm))
#define SKCIPHER_REQUEST_ON_STACK_0(name, tfm) \
SKCIPHER_REQUEST_ON_STACK_1(name, tfm, 0)
#define SKCIPHER_REQUEST_ON_STACK(name, tfm, ...) \
CONCATENATE(SKCIPHER_REQUEST_ON_STACK_, COUNT_ARGS(__VA_ARGS__))( \
name, tfm, ##__VA_ARGS__)
#define SKCIPHER_REQUEST_CLONE(name, gfp) \
skcipher_request_clone(name, sizeof(__##name##_req), gfp)
/**
* DOC: Symmetric Key Cipher API
@@ -818,6 +833,7 @@ static inline void skcipher_request_set_tfm(struct skcipher_request *req,
struct crypto_skcipher *tfm)
{
req->base.tfm = crypto_skcipher_tfm(tfm);
req->base.flags &= ~CRYPTO_TFM_REQ_ON_STACK;
}
static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req,
@@ -843,28 +859,46 @@ static inline struct skcipher_request *skcipher_request_cast(
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct skcipher_request *skcipher_request_alloc_noprof(
struct crypto_skcipher *tfm, gfp_t gfp)
static inline struct skcipher_request *skcipher_request_alloc_extra_noprof(
struct crypto_skcipher *tfm, size_t extra, gfp_t gfp)
{
struct skcipher_request *req;
unsigned int len;
req = kmalloc_noprof(sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(tfm), gfp);
len = ALIGN(sizeof(*req) + crypto_skcipher_reqsize(tfm), CRYPTO_MINALIGN);
if (check_add_overflow(len, extra, &len))
return NULL;
req = kmalloc_noprof(len, gfp);
if (likely(req))
skcipher_request_set_tfm(req, tfm);
return req;
}
#define skcipher_request_alloc(...) alloc_hooks(skcipher_request_alloc_noprof(__VA_ARGS__))
#define skcipher_request_alloc(tfm, gfp) alloc_hooks(skcipher_request_alloc_extra_noprof(tfm, 0, gfp))
/**
* skcipher_request_free() - zeroize and free request data structure
* @req: request data structure cipher handle to be freed
* skcipher_request_alloc_extra() - allocate request with extra memory
* @tfm: cipher handle to be registered with the request
* @extra: amount of extra memory
* @gfp: memory allocation flag that is handed to kmalloc by the API call.
*
* Allocate the request data structure with extra memory that must be used
* with the skcipher encrypt and decrypt API calls. During the allocation,
* the provided skcipher handle is registered in the request data structure.
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline void skcipher_request_free(struct skcipher_request *req)
#define skcipher_request_alloc_extra(...) alloc_hooks(skcipher_request_alloc_extra_noprof(__VA_ARGS__))
static inline void *skcipher_request_extra(struct skcipher_request *req)
{
kfree_sensitive(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
size_t len;
len = ALIGN(sizeof(*req) + crypto_skcipher_reqsize(tfm), CRYPTO_MINALIGN);
return (void *)((char *)req + len);
}
static inline void skcipher_request_zero(struct skcipher_request *req)
@@ -874,6 +908,12 @@ static inline void skcipher_request_zero(struct skcipher_request *req)
memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm));
}
/**
* skcipher_request_free() - zeroize and free request data structure
* @req: request data structure cipher handle to be freed
*/
void skcipher_request_free(struct skcipher_request *req);
/**
* skcipher_request_set_callback() - set asynchronous callback function
* @req: request handle
@@ -904,9 +944,7 @@ static inline void skcipher_request_set_callback(struct skcipher_request *req,
crypto_completion_t compl,
void *data)
{
req->base.complete = compl;
req->base.data = data;
req->base.flags = flags;
crypto_request_set_callback(&req->base, flags, compl, data);
}
/**
@@ -936,5 +974,55 @@ static inline void skcipher_request_set_crypt(
req->iv = iv;
}
static inline void skcipher_request_set_folio(
struct skcipher_request *req,
struct folio *src, size_t soff, struct folio *dst, size_t doff,
unsigned int cryptlen, void *iv)
{
req->src = &req->src0;
req->dst = &req->dst0;
sg_init_table(req->src, 1);
sg_set_folio(req->src, src, cryptlen, soff);
sg_init_table(req->dst, 1);
sg_set_folio(req->dst, dst, cryptlen, doff);
req->cryptlen = cryptlen;
req->iv = iv;
}
static inline bool skcipher_is_async(struct crypto_skcipher *tfm)
{
return crypto_tfm_is_async(crypto_skcipher_tfm(tfm));
}
static inline bool skcipher_is_mostly_sync(struct crypto_skcipher *tfm)
{
return crypto_tfm_is_mostly_sync(crypto_skcipher_tfm(tfm));
}
static inline struct skcipher_request *skcipher_request_on_stack_init(
char *buf, struct crypto_skcipher *tfm)
{
struct skcipher_request *req;
req = (void *)buf;
skcipher_request_set_tfm(req, tfm);
req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
return req;
}
static inline const char *crypto_skcipher_alg_name(struct crypto_skcipher *tfm)
{
return crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
}
struct skcipher_request *skcipher_request_clone(struct skcipher_request *req,
size_t total, gfp_t gfp);
static inline bool skcipher_req_on_stack(struct skcipher_request *req)
{
return crypto_req_on_stack(&req->base);
}
#endif /* _CRYPTO_SKCIPHER_H */

View File

@@ -128,6 +128,12 @@
/* Set if the algorithm supports request chains and virtual addresses. */
#define CRYPTO_ALG_REQ_CHAIN 0x00040000
/*
* Set if the algorithm is mostly synchronous and can reject on-stack
* requests with EAGAIN when truly asynchronous.
*/
#define CRYPTO_ALG_MOSTLY_SYNC 0x00080000
/*
* Transform masks and values (for crt_flags).
*/
@@ -491,5 +497,32 @@ static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
}
static inline bool crypto_req_on_stack(struct crypto_async_request *req)
{
return req->flags & CRYPTO_TFM_REQ_ON_STACK;
}
static inline void crypto_request_set_callback(
struct crypto_async_request *req, u32 flags,
crypto_completion_t compl, void *data)
{
u32 keep = CRYPTO_TFM_REQ_ON_STACK;
req->complete = compl;
req->data = data;
req->flags &= keep;
req->flags |= flags & ~keep;
}
static inline u32 crypto_request_flags(struct crypto_async_request *req)
{
return req->flags & ~CRYPTO_TFM_REQ_ON_STACK;
}
static inline bool crypto_tfm_is_mostly_sync(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_MOSTLY_SYNC;
}
#endif /* _LINUX_CRYPTO_H */