mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-22 01:10:28 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Merge the crypto tree to pull in the qat adf_init_pf_wq change.
This commit is contained in:
commit
6f6438975d
@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
|
||||
|
||||
req = cast_mcryptd_ctx_to_req(req_ctx);
|
||||
if (irqs_disabled())
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
else {
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
@ -387,16 +387,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
|
||||
req_ctx->child_req.src = req->src;
|
||||
req_ctx->child_req.src_len = req->src_len;
|
||||
req_ctx->child_req.dst = req_ctx->out_sg;
|
||||
req_ctx->child_req.dst_len = ctx->key_size - 1;
|
||||
req_ctx->child_req.dst_len = ctx->key_size ;
|
||||
|
||||
req_ctx->out_buf = kmalloc(ctx->key_size - 1,
|
||||
req_ctx->out_buf = kmalloc(ctx->key_size,
|
||||
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC);
|
||||
if (!req_ctx->out_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
|
||||
ctx->key_size - 1, NULL);
|
||||
ctx->key_size, NULL);
|
||||
|
||||
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
|
||||
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
||||
@ -595,16 +595,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
|
||||
req_ctx->child_req.src = req->src;
|
||||
req_ctx->child_req.src_len = req->src_len;
|
||||
req_ctx->child_req.dst = req_ctx->out_sg;
|
||||
req_ctx->child_req.dst_len = ctx->key_size - 1;
|
||||
req_ctx->child_req.dst_len = ctx->key_size;
|
||||
|
||||
req_ctx->out_buf = kmalloc(ctx->key_size - 1,
|
||||
req_ctx->out_buf = kmalloc(ctx->key_size,
|
||||
(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC);
|
||||
if (!req_ctx->out_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
|
||||
ctx->key_size - 1, NULL);
|
||||
ctx->key_size, NULL);
|
||||
|
||||
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
|
||||
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
|
||||
|
@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
|
||||
struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct ccp_aes_cmac_exp_ctx state;
|
||||
|
||||
/* Don't let anything leak to 'out' */
|
||||
memset(&state, 0, sizeof(state));
|
||||
|
||||
state.null_msg = rctx->null_msg;
|
||||
memcpy(state.iv, rctx->iv, sizeof(state.iv));
|
||||
state.buf_count = rctx->buf_count;
|
||||
|
@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
|
||||
struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
|
||||
struct ccp_sha_exp_ctx state;
|
||||
|
||||
/* Don't let anything leak to 'out' */
|
||||
memset(&state, 0, sizeof(state));
|
||||
|
||||
state.type = rctx->type;
|
||||
state.msg_bits = rctx->msg_bits;
|
||||
state.first = rctx->first;
|
||||
|
@ -146,6 +146,8 @@ int adf_init_aer(void);
|
||||
void adf_exit_aer(void);
|
||||
int adf_init_vf_wq(void);
|
||||
void adf_exit_vf_wq(void);
|
||||
int adf_init_pf_wq(void);
|
||||
void adf_exit_pf_wq(void);
|
||||
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
|
||||
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
|
||||
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
|
||||
|
@ -471,6 +471,9 @@ static int __init adf_register_ctl_device_driver(void)
|
||||
if (adf_init_aer())
|
||||
goto err_aer;
|
||||
|
||||
if (adf_init_pf_wq())
|
||||
goto err_pf_wq;
|
||||
|
||||
if (adf_init_vf_wq())
|
||||
goto err_vf_wq;
|
||||
|
||||
@ -482,6 +485,8 @@ static int __init adf_register_ctl_device_driver(void)
|
||||
err_crypto_register:
|
||||
adf_exit_vf_wq();
|
||||
err_vf_wq:
|
||||
adf_exit_pf_wq();
|
||||
err_pf_wq:
|
||||
adf_exit_aer();
|
||||
err_aer:
|
||||
adf_chr_drv_destroy();
|
||||
@ -495,6 +500,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
|
||||
adf_chr_drv_destroy();
|
||||
adf_exit_aer();
|
||||
adf_exit_vf_wq();
|
||||
adf_exit_pf_wq();
|
||||
qat_crypto_unregister();
|
||||
adf_clean_vf_map(false);
|
||||
mutex_destroy(&adf_ctl_lock);
|
||||
|
@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
|
||||
int i;
|
||||
u32 reg;
|
||||
|
||||
/* Workqueue for PF2VF responses */
|
||||
pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
|
||||
if (!pf2vf_resp_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
|
||||
i++, vf_info++) {
|
||||
/* This ptr will be populated when VFs will be created */
|
||||
@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
|
||||
|
||||
kfree(accel_dev->pf.vf_info);
|
||||
accel_dev->pf.vf_info = NULL;
|
||||
|
||||
if (pf2vf_resp_wq) {
|
||||
destroy_workqueue(pf2vf_resp_wq);
|
||||
pf2vf_resp_wq = NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_disable_sriov);
|
||||
|
||||
@ -298,3 +288,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
|
||||
return numvfs;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_sriov_configure);
|
||||
|
||||
int __init adf_init_pf_wq(void)
|
||||
{
|
||||
/* Workqueue for PF2VF responses */
|
||||
pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
|
||||
|
||||
return !pf2vf_resp_wq ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
void adf_exit_pf_wq(void)
|
||||
{
|
||||
if (pf2vf_resp_wq) {
|
||||
destroy_workqueue(pf2vf_resp_wq);
|
||||
pf2vf_resp_wq = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
|
||||
ptr->eptr = upper_32_bits(dma_addr);
|
||||
}
|
||||
|
||||
static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
|
||||
struct talitos_ptr *src_ptr, bool is_sec1)
|
||||
{
|
||||
dst_ptr->ptr = src_ptr->ptr;
|
||||
if (!is_sec1)
|
||||
dst_ptr->eptr = src_ptr->eptr;
|
||||
}
|
||||
|
||||
static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
|
||||
bool is_sec1)
|
||||
{
|
||||
@ -1093,21 +1101,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
|
||||
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
|
||||
: DMA_TO_DEVICE);
|
||||
|
||||
/* hmac data */
|
||||
desc->ptr[1].len = cpu_to_be16(areq->assoclen);
|
||||
if (sg_count > 1 &&
|
||||
(ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
|
||||
areq->assoclen,
|
||||
&edesc->link_tbl[tbl_off])) > 1) {
|
||||
tbl_off += ret;
|
||||
|
||||
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
|
||||
sizeof(struct talitos_ptr), 0);
|
||||
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
|
||||
|
||||
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
|
||||
edesc->dma_len, DMA_BIDIRECTIONAL);
|
||||
|
||||
tbl_off += ret;
|
||||
} else {
|
||||
to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
|
||||
desc->ptr[1].j_extent = 0;
|
||||
@ -1136,11 +1143,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
|
||||
sg_link_tbl_len += authsize;
|
||||
|
||||
if (sg_count > 1 &&
|
||||
(ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
|
||||
sg_link_tbl_len,
|
||||
&edesc->link_tbl[tbl_off])) > 1) {
|
||||
tbl_off += ret;
|
||||
if (sg_count == 1) {
|
||||
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
|
||||
areq->assoclen, 0);
|
||||
} else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
|
||||
areq->assoclen, sg_link_tbl_len,
|
||||
&edesc->link_tbl[tbl_off])) >
|
||||
1) {
|
||||
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
|
||||
to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
|
||||
tbl_off *
|
||||
@ -1148,8 +1157,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
|
||||
edesc->dma_len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
} else
|
||||
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
|
||||
tbl_off += ret;
|
||||
} else {
|
||||
copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
|
||||
}
|
||||
|
||||
/* cipher out */
|
||||
desc->ptr[5].len = cpu_to_be16(cryptlen);
|
||||
@ -1161,11 +1172,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
|
||||
edesc->icv_ool = false;
|
||||
|
||||
if (sg_count > 1 &&
|
||||
(sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
|
||||
if (sg_count == 1) {
|
||||
to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
|
||||
areq->assoclen, 0);
|
||||
} else if ((sg_count =
|
||||
sg_to_link_tbl_offset(areq->dst, sg_count,
|
||||
areq->assoclen, cryptlen,
|
||||
&edesc->link_tbl[tbl_off])) >
|
||||
1) {
|
||||
&edesc->link_tbl[tbl_off])) > 1) {
|
||||
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
|
||||
|
||||
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
|
||||
@ -1188,8 +1201,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
edesc->dma_len, DMA_BIDIRECTIONAL);
|
||||
|
||||
edesc->icv_ool = true;
|
||||
} else
|
||||
to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
|
||||
} else {
|
||||
copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
|
||||
}
|
||||
|
||||
/* iv out */
|
||||
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
|
||||
@ -2691,21 +2705,11 @@ struct talitos_crypto_alg {
|
||||
struct talitos_alg_template algt;
|
||||
};
|
||||
|
||||
static int talitos_cra_init(struct crypto_tfm *tfm)
|
||||
static int talitos_init_common(struct talitos_ctx *ctx,
|
||||
struct talitos_crypto_alg *talitos_alg)
|
||||
{
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct talitos_crypto_alg *talitos_alg;
|
||||
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct talitos_private *priv;
|
||||
|
||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
|
||||
talitos_alg = container_of(__crypto_ahash_alg(alg),
|
||||
struct talitos_crypto_alg,
|
||||
algt.alg.hash);
|
||||
else
|
||||
talitos_alg = container_of(alg, struct talitos_crypto_alg,
|
||||
algt.alg.crypto);
|
||||
|
||||
/* update context with ptr to dev */
|
||||
ctx->dev = talitos_alg->dev;
|
||||
|
||||
@ -2723,10 +2727,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int talitos_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct talitos_crypto_alg *talitos_alg;
|
||||
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
|
||||
talitos_alg = container_of(__crypto_ahash_alg(alg),
|
||||
struct talitos_crypto_alg,
|
||||
algt.alg.hash);
|
||||
else
|
||||
talitos_alg = container_of(alg, struct talitos_crypto_alg,
|
||||
algt.alg.crypto);
|
||||
|
||||
return talitos_init_common(ctx, talitos_alg);
|
||||
}
|
||||
|
||||
static int talitos_cra_init_aead(struct crypto_aead *tfm)
|
||||
{
|
||||
talitos_cra_init(crypto_aead_tfm(tfm));
|
||||
return 0;
|
||||
struct aead_alg *alg = crypto_aead_alg(tfm);
|
||||
struct talitos_crypto_alg *talitos_alg;
|
||||
struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
talitos_alg = container_of(alg, struct talitos_crypto_alg,
|
||||
algt.alg.aead);
|
||||
|
||||
return talitos_init_common(ctx, talitos_alg);
|
||||
}
|
||||
|
||||
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
|
||||
|
Loading…
Reference in New Issue
Block a user