mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-03 07:41:40 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Merge the crypto tree to pick up arm64 output IV patch.
This commit is contained in:
commit
34cb582139
@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
|
|||||||
cbz w6, .Lcbcencloop
|
cbz w6, .Lcbcencloop
|
||||||
|
|
||||||
ld1 {v0.16b}, [x5] /* get iv */
|
ld1 {v0.16b}, [x5] /* get iv */
|
||||||
enc_prepare w3, x2, x5
|
enc_prepare w3, x2, x6
|
||||||
|
|
||||||
.Lcbcencloop:
|
.Lcbcencloop:
|
||||||
ld1 {v1.16b}, [x1], #16 /* get next pt block */
|
ld1 {v1.16b}, [x1], #16 /* get next pt block */
|
||||||
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
|
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
|
||||||
encrypt_block v0, w3, x2, x5, w6
|
encrypt_block v0, w3, x2, x6, w7
|
||||||
st1 {v0.16b}, [x0], #16
|
st1 {v0.16b}, [x0], #16
|
||||||
subs w4, w4, #1
|
subs w4, w4, #1
|
||||||
bne .Lcbcencloop
|
bne .Lcbcencloop
|
||||||
|
st1 {v0.16b}, [x5] /* return iv */
|
||||||
ret
|
ret
|
||||||
AES_ENDPROC(aes_cbc_encrypt)
|
AES_ENDPROC(aes_cbc_encrypt)
|
||||||
|
|
||||||
@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
|||||||
cbz w6, .LcbcdecloopNx
|
cbz w6, .LcbcdecloopNx
|
||||||
|
|
||||||
ld1 {v7.16b}, [x5] /* get iv */
|
ld1 {v7.16b}, [x5] /* get iv */
|
||||||
dec_prepare w3, x2, x5
|
dec_prepare w3, x2, x6
|
||||||
|
|
||||||
.LcbcdecloopNx:
|
.LcbcdecloopNx:
|
||||||
#if INTERLEAVE >= 2
|
#if INTERLEAVE >= 2
|
||||||
@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
|||||||
.Lcbcdecloop:
|
.Lcbcdecloop:
|
||||||
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
||||||
mov v0.16b, v1.16b /* ...and copy to v0 */
|
mov v0.16b, v1.16b /* ...and copy to v0 */
|
||||||
decrypt_block v0, w3, x2, x5, w6
|
decrypt_block v0, w3, x2, x6, w7
|
||||||
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
|
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
|
||||||
mov v7.16b, v1.16b /* ct is next iv */
|
mov v7.16b, v1.16b /* ct is next iv */
|
||||||
st1 {v0.16b}, [x0], #16
|
st1 {v0.16b}, [x0], #16
|
||||||
@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
|||||||
bne .Lcbcdecloop
|
bne .Lcbcdecloop
|
||||||
.Lcbcdecout:
|
.Lcbcdecout:
|
||||||
FRAME_POP
|
FRAME_POP
|
||||||
|
st1 {v7.16b}, [x5] /* return iv */
|
||||||
ret
|
ret
|
||||||
AES_ENDPROC(aes_cbc_decrypt)
|
AES_ENDPROC(aes_cbc_decrypt)
|
||||||
|
|
||||||
@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
|
|||||||
|
|
||||||
AES_ENTRY(aes_ctr_encrypt)
|
AES_ENTRY(aes_ctr_encrypt)
|
||||||
FRAME_PUSH
|
FRAME_PUSH
|
||||||
cbnz w6, .Lctrfirst /* 1st time around? */
|
cbz w6, .Lctrnotfirst /* 1st time around? */
|
||||||
umov x5, v4.d[1] /* keep swabbed ctr in reg */
|
|
||||||
rev x5, x5
|
|
||||||
#if INTERLEAVE >= 2
|
|
||||||
cmn w5, w4 /* 32 bit overflow? */
|
|
||||||
bcs .Lctrinc
|
|
||||||
add x5, x5, #1 /* increment BE ctr */
|
|
||||||
b .LctrincNx
|
|
||||||
#else
|
|
||||||
b .Lctrinc
|
|
||||||
#endif
|
|
||||||
.Lctrfirst:
|
|
||||||
enc_prepare w3, x2, x6
|
enc_prepare w3, x2, x6
|
||||||
ld1 {v4.16b}, [x5]
|
ld1 {v4.16b}, [x5]
|
||||||
umov x5, v4.d[1] /* keep swabbed ctr in reg */
|
|
||||||
rev x5, x5
|
.Lctrnotfirst:
|
||||||
|
umov x8, v4.d[1] /* keep swabbed ctr in reg */
|
||||||
|
rev x8, x8
|
||||||
#if INTERLEAVE >= 2
|
#if INTERLEAVE >= 2
|
||||||
cmn w5, w4 /* 32 bit overflow? */
|
cmn w8, w4 /* 32 bit overflow? */
|
||||||
bcs .Lctrloop
|
bcs .Lctrloop
|
||||||
.LctrloopNx:
|
.LctrloopNx:
|
||||||
subs w4, w4, #INTERLEAVE
|
subs w4, w4, #INTERLEAVE
|
||||||
@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||||||
#if INTERLEAVE == 2
|
#if INTERLEAVE == 2
|
||||||
mov v0.8b, v4.8b
|
mov v0.8b, v4.8b
|
||||||
mov v1.8b, v4.8b
|
mov v1.8b, v4.8b
|
||||||
rev x7, x5
|
rev x7, x8
|
||||||
add x5, x5, #1
|
add x8, x8, #1
|
||||||
ins v0.d[1], x7
|
ins v0.d[1], x7
|
||||||
rev x7, x5
|
rev x7, x8
|
||||||
add x5, x5, #1
|
add x8, x8, #1
|
||||||
ins v1.d[1], x7
|
ins v1.d[1], x7
|
||||||
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
|
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
|
||||||
do_encrypt_block2x
|
do_encrypt_block2x
|
||||||
@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||||||
st1 {v0.16b-v1.16b}, [x0], #32
|
st1 {v0.16b-v1.16b}, [x0], #32
|
||||||
#else
|
#else
|
||||||
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
|
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
|
||||||
dup v7.4s, w5
|
dup v7.4s, w8
|
||||||
mov v0.16b, v4.16b
|
mov v0.16b, v4.16b
|
||||||
add v7.4s, v7.4s, v8.4s
|
add v7.4s, v7.4s, v8.4s
|
||||||
mov v1.16b, v4.16b
|
mov v1.16b, v4.16b
|
||||||
@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||||||
eor v2.16b, v7.16b, v2.16b
|
eor v2.16b, v7.16b, v2.16b
|
||||||
eor v3.16b, v5.16b, v3.16b
|
eor v3.16b, v5.16b, v3.16b
|
||||||
st1 {v0.16b-v3.16b}, [x0], #64
|
st1 {v0.16b-v3.16b}, [x0], #64
|
||||||
add x5, x5, #INTERLEAVE
|
add x8, x8, #INTERLEAVE
|
||||||
#endif
|
#endif
|
||||||
cbz w4, .LctroutNx
|
rev x7, x8
|
||||||
.LctrincNx:
|
|
||||||
rev x7, x5
|
|
||||||
ins v4.d[1], x7
|
ins v4.d[1], x7
|
||||||
|
cbz w4, .Lctrout
|
||||||
b .LctrloopNx
|
b .LctrloopNx
|
||||||
.LctroutNx:
|
|
||||||
sub x5, x5, #1
|
|
||||||
rev x7, x5
|
|
||||||
ins v4.d[1], x7
|
|
||||||
b .Lctrout
|
|
||||||
.Lctr1x:
|
.Lctr1x:
|
||||||
adds w4, w4, #INTERLEAVE
|
adds w4, w4, #INTERLEAVE
|
||||||
beq .Lctrout
|
beq .Lctrout
|
||||||
@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
|
|||||||
.Lctrloop:
|
.Lctrloop:
|
||||||
mov v0.16b, v4.16b
|
mov v0.16b, v4.16b
|
||||||
encrypt_block v0, w3, x2, x6, w7
|
encrypt_block v0, w3, x2, x6, w7
|
||||||
|
|
||||||
|
adds x8, x8, #1 /* increment BE ctr */
|
||||||
|
rev x7, x8
|
||||||
|
ins v4.d[1], x7
|
||||||
|
bcs .Lctrcarry /* overflow? */
|
||||||
|
|
||||||
|
.Lctrcarrydone:
|
||||||
subs w4, w4, #1
|
subs w4, w4, #1
|
||||||
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
|
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
|
||||||
ld1 {v3.16b}, [x1], #16
|
ld1 {v3.16b}, [x1], #16
|
||||||
eor v3.16b, v0.16b, v3.16b
|
eor v3.16b, v0.16b, v3.16b
|
||||||
st1 {v3.16b}, [x0], #16
|
st1 {v3.16b}, [x0], #16
|
||||||
beq .Lctrout
|
bne .Lctrloop
|
||||||
.Lctrinc:
|
|
||||||
adds x5, x5, #1 /* increment BE ctr */
|
.Lctrout:
|
||||||
rev x7, x5
|
st1 {v4.16b}, [x5] /* return next CTR value */
|
||||||
ins v4.d[1], x7
|
FRAME_POP
|
||||||
bcc .Lctrloop /* no overflow? */
|
ret
|
||||||
|
|
||||||
|
.Lctrhalfblock:
|
||||||
|
ld1 {v3.8b}, [x1]
|
||||||
|
eor v3.8b, v0.8b, v3.8b
|
||||||
|
st1 {v3.8b}, [x0]
|
||||||
|
FRAME_POP
|
||||||
|
ret
|
||||||
|
|
||||||
|
.Lctrcarry:
|
||||||
umov x7, v4.d[0] /* load upper word of ctr */
|
umov x7, v4.d[0] /* load upper word of ctr */
|
||||||
rev x7, x7 /* ... to handle the carry */
|
rev x7, x7 /* ... to handle the carry */
|
||||||
add x7, x7, #1
|
add x7, x7, #1
|
||||||
rev x7, x7
|
rev x7, x7
|
||||||
ins v4.d[0], x7
|
ins v4.d[0], x7
|
||||||
b .Lctrloop
|
b .Lctrcarrydone
|
||||||
.Lctrhalfblock:
|
|
||||||
ld1 {v3.8b}, [x1]
|
|
||||||
eor v3.8b, v0.8b, v3.8b
|
|
||||||
st1 {v3.8b}, [x0]
|
|
||||||
.Lctrout:
|
|
||||||
FRAME_POP
|
|
||||||
ret
|
|
||||||
AES_ENDPROC(aes_ctr_encrypt)
|
AES_ENDPROC(aes_ctr_encrypt)
|
||||||
.ltorg
|
.ltorg
|
||||||
|
|
||||||
|
@ -1024,7 +1024,8 @@ struct {
|
|||||||
const char *basename;
|
const char *basename;
|
||||||
struct simd_skcipher_alg *simd;
|
struct simd_skcipher_alg *simd;
|
||||||
} aesni_simd_skciphers2[] = {
|
} aesni_simd_skciphers2[] = {
|
||||||
#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
|
#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
|
||||||
|
IS_BUILTIN(CONFIG_CRYPTO_PCBC)
|
||||||
{
|
{
|
||||||
.algname = "pcbc(aes)",
|
.algname = "pcbc(aes)",
|
||||||
.drvname = "pcbc-aes-aesni",
|
.drvname = "pcbc-aes-aesni",
|
||||||
@ -1088,9 +1089,9 @@ static void aesni_free_simds(void)
|
|||||||
aesni_simd_skciphers[i]; i++)
|
aesni_simd_skciphers[i]; i++)
|
||||||
simd_skcipher_free(aesni_simd_skciphers[i]);
|
simd_skcipher_free(aesni_simd_skciphers[i]);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
|
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
|
||||||
aesni_simd_skciphers2[i].simd; i++)
|
if (aesni_simd_skciphers2[i].simd)
|
||||||
simd_skcipher_free(aesni_simd_skciphers2[i].simd);
|
simd_skcipher_free(aesni_simd_skciphers2[i].simd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init aesni_init(void)
|
static int __init aesni_init(void)
|
||||||
@ -1171,7 +1172,7 @@ static int __init aesni_init(void)
|
|||||||
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
||||||
err = PTR_ERR(simd);
|
err = PTR_ERR(simd);
|
||||||
if (IS_ERR(simd))
|
if (IS_ERR(simd))
|
||||||
goto unregister_simds;
|
continue;
|
||||||
|
|
||||||
aesni_simd_skciphers2[i].simd = simd;
|
aesni_simd_skciphers2[i].simd = simd;
|
||||||
}
|
}
|
||||||
|
@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
|
|||||||
struct crypto_larval *larval;
|
struct crypto_larval *larval;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
|
||||||
err = crypto_check_alg(alg);
|
err = crypto_check_alg(alg);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
|
|||||||
unlock:
|
unlock:
|
||||||
list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
|
list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
|
||||||
af_alg_free_sg(&rsgl->sgl);
|
af_alg_free_sg(&rsgl->sgl);
|
||||||
|
list_del(&rsgl->list);
|
||||||
if (rsgl != &ctx->first_rsgl)
|
if (rsgl != &ctx->first_rsgl)
|
||||||
sock_kfree_s(sk, rsgl, sizeof(*rsgl));
|
sock_kfree_s(sk, rsgl, sizeof(*rsgl));
|
||||||
list_del(&rsgl->list);
|
|
||||||
}
|
}
|
||||||
INIT_LIST_HEAD(&ctx->list);
|
INIT_LIST_HEAD(&ctx->list);
|
||||||
aead_wmem_wakeup(sk);
|
aead_wmem_wakeup(sk);
|
||||||
|
@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
|
|||||||
static void ccp5_config(struct ccp_device *ccp)
|
static void ccp5_config(struct ccp_device *ccp)
|
||||||
{
|
{
|
||||||
/* Public side */
|
/* Public side */
|
||||||
iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
|
iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ccp5other_config(struct ccp_device *ccp)
|
static void ccp5other_config(struct ccp_device *ccp)
|
||||||
|
@ -238,6 +238,7 @@ struct ccp_dma_chan {
|
|||||||
struct ccp_device *ccp;
|
struct ccp_device *ccp;
|
||||||
|
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
struct list_head created;
|
||||||
struct list_head pending;
|
struct list_head pending;
|
||||||
struct list_head active;
|
struct list_head active;
|
||||||
struct list_head complete;
|
struct list_head complete;
|
||||||
|
@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
|
|||||||
ccp_free_desc_resources(chan->ccp, &chan->complete);
|
ccp_free_desc_resources(chan->ccp, &chan->complete);
|
||||||
ccp_free_desc_resources(chan->ccp, &chan->active);
|
ccp_free_desc_resources(chan->ccp, &chan->active);
|
||||||
ccp_free_desc_resources(chan->ccp, &chan->pending);
|
ccp_free_desc_resources(chan->ccp, &chan->pending);
|
||||||
|
ccp_free_desc_resources(chan->ccp, &chan->created);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&chan->lock, flags);
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
}
|
}
|
||||||
@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
|
|||||||
spin_lock_irqsave(&chan->lock, flags);
|
spin_lock_irqsave(&chan->lock, flags);
|
||||||
|
|
||||||
cookie = dma_cookie_assign(tx_desc);
|
cookie = dma_cookie_assign(tx_desc);
|
||||||
|
list_del(&desc->entry);
|
||||||
list_add_tail(&desc->entry, &chan->pending);
|
list_add_tail(&desc->entry, &chan->pending);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&chan->lock, flags);
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
|
|||||||
|
|
||||||
spin_lock_irqsave(&chan->lock, sflags);
|
spin_lock_irqsave(&chan->lock, sflags);
|
||||||
|
|
||||||
list_add_tail(&desc->entry, &chan->pending);
|
list_add_tail(&desc->entry, &chan->created);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&chan->lock, sflags);
|
spin_unlock_irqrestore(&chan->lock, sflags);
|
||||||
|
|
||||||
@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
|
|||||||
/*TODO: Purge the complete list? */
|
/*TODO: Purge the complete list? */
|
||||||
ccp_free_desc_resources(chan->ccp, &chan->active);
|
ccp_free_desc_resources(chan->ccp, &chan->active);
|
||||||
ccp_free_desc_resources(chan->ccp, &chan->pending);
|
ccp_free_desc_resources(chan->ccp, &chan->pending);
|
||||||
|
ccp_free_desc_resources(chan->ccp, &chan->created);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&chan->lock, flags);
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
|
|
||||||
@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
|
|||||||
chan->ccp = ccp;
|
chan->ccp = ccp;
|
||||||
|
|
||||||
spin_lock_init(&chan->lock);
|
spin_lock_init(&chan->lock);
|
||||||
|
INIT_LIST_HEAD(&chan->created);
|
||||||
INIT_LIST_HEAD(&chan->pending);
|
INIT_LIST_HEAD(&chan->pending);
|
||||||
INIT_LIST_HEAD(&chan->active);
|
INIT_LIST_HEAD(&chan->active);
|
||||||
INIT_LIST_HEAD(&chan->complete);
|
INIT_LIST_HEAD(&chan->complete);
|
||||||
|
@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
|
|||||||
case CRYPTO_ALG_TYPE_AEAD:
|
case CRYPTO_ALG_TYPE_AEAD:
|
||||||
ctx_req.req.aead_req = (struct aead_request *)req;
|
ctx_req.req.aead_req = (struct aead_request *)req;
|
||||||
ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
|
ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
|
||||||
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
|
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
|
||||||
ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
|
ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
|
||||||
if (ctx_req.ctx.reqctx->skb) {
|
if (ctx_req.ctx.reqctx->skb) {
|
||||||
kfree_skb(ctx_req.ctx.reqctx->skb);
|
kfree_skb(ctx_req.ctx.reqctx->skb);
|
||||||
@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
|
|||||||
struct chcr_wr *chcr_req;
|
struct chcr_wr *chcr_req;
|
||||||
struct cpl_rx_phys_dsgl *phys_cpl;
|
struct cpl_rx_phys_dsgl *phys_cpl;
|
||||||
struct phys_sge_parm sg_param;
|
struct phys_sge_parm sg_param;
|
||||||
struct scatterlist *src, *dst;
|
struct scatterlist *src;
|
||||||
struct scatterlist src_sg[2], dst_sg[2];
|
|
||||||
unsigned int frags = 0, transhdr_len;
|
unsigned int frags = 0, transhdr_len;
|
||||||
unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
|
unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
|
||||||
unsigned int kctx_len = 0;
|
unsigned int kctx_len = 0;
|
||||||
@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
|
|||||||
|
|
||||||
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
|
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
|
||||||
goto err;
|
goto err;
|
||||||
src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
|
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
|
||||||
dst = src;
|
reqctx->dst = src;
|
||||||
|
|
||||||
if (req->src != req->dst) {
|
if (req->src != req->dst) {
|
||||||
err = chcr_copy_assoc(req, aeadctx);
|
err = chcr_copy_assoc(req, aeadctx);
|
||||||
if (err)
|
if (err)
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
|
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
|
||||||
|
req->assoclen);
|
||||||
}
|
}
|
||||||
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
|
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
|
||||||
null = 1;
|
null = 1;
|
||||||
assoclen = 0;
|
assoclen = 0;
|
||||||
}
|
}
|
||||||
reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
|
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
|
||||||
(op_type ? -authsize : authsize));
|
(op_type ? -authsize : authsize));
|
||||||
if (reqctx->dst_nents <= 0) {
|
if (reqctx->dst_nents <= 0) {
|
||||||
pr_err("AUTHENC:Invalid Destination sg entries\n");
|
pr_err("AUTHENC:Invalid Destination sg entries\n");
|
||||||
@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
|
|||||||
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
|
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
|
||||||
sg_param.qid = qid;
|
sg_param.qid = qid;
|
||||||
sg_param.align = 0;
|
sg_param.align = 0;
|
||||||
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
|
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
|
||||||
&sg_param))
|
&sg_param))
|
||||||
goto dstmap_fail;
|
goto dstmap_fail;
|
||||||
|
|
||||||
@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
|
|||||||
struct chcr_wr *chcr_req;
|
struct chcr_wr *chcr_req;
|
||||||
struct cpl_rx_phys_dsgl *phys_cpl;
|
struct cpl_rx_phys_dsgl *phys_cpl;
|
||||||
struct phys_sge_parm sg_param;
|
struct phys_sge_parm sg_param;
|
||||||
struct scatterlist *src, *dst;
|
struct scatterlist *src;
|
||||||
struct scatterlist src_sg[2], dst_sg[2];
|
|
||||||
unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
|
unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
|
||||||
unsigned int dst_size = 0, kctx_len;
|
unsigned int dst_size = 0, kctx_len;
|
||||||
unsigned int sub_type;
|
unsigned int sub_type;
|
||||||
@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
|
|||||||
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
|
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
|
||||||
goto err;
|
goto err;
|
||||||
sub_type = get_aead_subtype(tfm);
|
sub_type = get_aead_subtype(tfm);
|
||||||
src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
|
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
|
||||||
dst = src;
|
reqctx->dst = src;
|
||||||
|
|
||||||
if (req->src != req->dst) {
|
if (req->src != req->dst) {
|
||||||
err = chcr_copy_assoc(req, aeadctx);
|
err = chcr_copy_assoc(req, aeadctx);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_err("AAD copy to destination buffer fails\n");
|
pr_err("AAD copy to destination buffer fails\n");
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
|
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
|
||||||
|
req->assoclen);
|
||||||
}
|
}
|
||||||
reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
|
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
|
||||||
(op_type ? -authsize : authsize));
|
(op_type ? -authsize : authsize));
|
||||||
if (reqctx->dst_nents <= 0) {
|
if (reqctx->dst_nents <= 0) {
|
||||||
pr_err("CCM:Invalid Destination sg entries\n");
|
pr_err("CCM:Invalid Destination sg entries\n");
|
||||||
@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
|
|||||||
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
|
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
|
||||||
sg_param.qid = qid;
|
sg_param.qid = qid;
|
||||||
sg_param.align = 0;
|
sg_param.align = 0;
|
||||||
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
|
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
|
||||||
&sg_param))
|
&sg_param))
|
||||||
goto dstmap_fail;
|
goto dstmap_fail;
|
||||||
|
|
||||||
@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
|
|||||||
struct chcr_wr *chcr_req;
|
struct chcr_wr *chcr_req;
|
||||||
struct cpl_rx_phys_dsgl *phys_cpl;
|
struct cpl_rx_phys_dsgl *phys_cpl;
|
||||||
struct phys_sge_parm sg_param;
|
struct phys_sge_parm sg_param;
|
||||||
struct scatterlist *src, *dst;
|
struct scatterlist *src;
|
||||||
struct scatterlist src_sg[2], dst_sg[2];
|
|
||||||
unsigned int frags = 0, transhdr_len;
|
unsigned int frags = 0, transhdr_len;
|
||||||
unsigned int ivsize = AES_BLOCK_SIZE;
|
unsigned int ivsize = AES_BLOCK_SIZE;
|
||||||
unsigned int dst_size = 0, kctx_len;
|
unsigned int dst_size = 0, kctx_len;
|
||||||
@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
|
|||||||
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
|
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
|
src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
|
||||||
dst = src;
|
reqctx->dst = src;
|
||||||
if (req->src != req->dst) {
|
if (req->src != req->dst) {
|
||||||
err = chcr_copy_assoc(req, aeadctx);
|
err = chcr_copy_assoc(req, aeadctx);
|
||||||
if (err)
|
if (err)
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
|
reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
|
||||||
|
req->assoclen);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!req->cryptlen)
|
if (!req->cryptlen)
|
||||||
@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
|
|||||||
crypt_len = AES_BLOCK_SIZE;
|
crypt_len = AES_BLOCK_SIZE;
|
||||||
else
|
else
|
||||||
crypt_len = req->cryptlen;
|
crypt_len = req->cryptlen;
|
||||||
reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
|
reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
|
||||||
(op_type ? -authsize : authsize));
|
(op_type ? -authsize : authsize));
|
||||||
if (reqctx->dst_nents <= 0) {
|
if (reqctx->dst_nents <= 0) {
|
||||||
pr_err("GCM:Invalid Destination sg entries\n");
|
pr_err("GCM:Invalid Destination sg entries\n");
|
||||||
@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
|
|||||||
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
|
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
|
||||||
sg_param.qid = qid;
|
sg_param.qid = qid;
|
||||||
sg_param.align = 0;
|
sg_param.align = 0;
|
||||||
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
|
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
|
||||||
&sg_param))
|
&sg_param))
|
||||||
goto dstmap_fail;
|
goto dstmap_fail;
|
||||||
|
|
||||||
@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
|
|||||||
write_sg_to_skb(skb, &frags, src, req->cryptlen);
|
write_sg_to_skb(skb, &frags, src, req->cryptlen);
|
||||||
} else {
|
} else {
|
||||||
aes_gcm_empty_pld_pad(req->dst, authsize - 1);
|
aes_gcm_empty_pld_pad(req->dst, authsize - 1);
|
||||||
write_sg_to_skb(skb, &frags, dst, crypt_len);
|
write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
|
create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
|
||||||
@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
|||||||
unsigned int ck_size;
|
unsigned int ck_size;
|
||||||
int ret = 0, key_ctx_size = 0;
|
int ret = 0, key_ctx_size = 0;
|
||||||
|
|
||||||
if (get_aead_subtype(aead) ==
|
if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
|
||||||
CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
|
keylen > 3) {
|
||||||
keylen -= 4; /* nonce/salt is present in the last 4 bytes */
|
keylen -= 4; /* nonce/salt is present in the last 4 bytes */
|
||||||
memcpy(aeadctx->salt, key + keylen, 4);
|
memcpy(aeadctx->salt, key + keylen, 4);
|
||||||
}
|
}
|
||||||
|
@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
|
|||||||
int assign_chcr_device(struct chcr_dev **dev)
|
int assign_chcr_device(struct chcr_dev **dev)
|
||||||
{
|
{
|
||||||
struct uld_ctx *u_ctx;
|
struct uld_ctx *u_ctx;
|
||||||
|
int ret = -ENXIO;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Which device to use if multiple devices are available TODO
|
* Which device to use if multiple devices are available TODO
|
||||||
@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev)
|
|||||||
* must go to the same device to maintain the ordering.
|
* must go to the same device to maintain the ordering.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&dev_mutex); /* TODO ? */
|
mutex_lock(&dev_mutex); /* TODO ? */
|
||||||
u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
|
list_for_each_entry(u_ctx, &uld_ctx_list, entry)
|
||||||
if (!u_ctx) {
|
if (u_ctx && u_ctx->dev) {
|
||||||
mutex_unlock(&dev_mutex);
|
*dev = u_ctx->dev;
|
||||||
return -ENXIO;
|
ret = 0;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*dev = u_ctx->dev;
|
|
||||||
mutex_unlock(&dev_mutex);
|
mutex_unlock(&dev_mutex);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_dev_add(struct uld_ctx *u_ctx)
|
static int chcr_dev_add(struct uld_ctx *u_ctx)
|
||||||
@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
|
|||||||
|
|
||||||
static int __init chcr_crypto_init(void)
|
static int __init chcr_crypto_init(void)
|
||||||
{
|
{
|
||||||
if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
|
if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
|
||||||
pr_err("ULD register fail: No chcr crypto support in cxgb4");
|
pr_err("ULD register fail: No chcr crypto support in cxgb4");
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -158,6 +158,9 @@ struct ablk_ctx {
|
|||||||
};
|
};
|
||||||
struct chcr_aead_reqctx {
|
struct chcr_aead_reqctx {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
struct scatterlist *dst;
|
||||||
|
struct scatterlist srcffwd[2];
|
||||||
|
struct scatterlist dstffwd[2];
|
||||||
short int dst_nents;
|
short int dst_nents;
|
||||||
u16 verify;
|
u16 verify;
|
||||||
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
|
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
|
||||||
|
Loading…
Reference in New Issue
Block a user