mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-16 22:10:24 +00:00
bpf: fix checksum fixups on bpf_skb_store_bytes
bpf_skb_store_bytes() invocations above L2 header need BPF_F_RECOMPUTE_CSUM
flag for updates, so that CHECKSUM_COMPLETE will be fixed up along the way.
Where we ran into an issue with bpf_skb_store_bytes() is when we did a
single-byte update on the IPv6 hoplimit despite using BPF_F_RECOMPUTE_CSUM
flag; simple ping via ICMPv6 triggered a hw csum failure as a result. The
underlying issue has been tracked down to a buffer alignment issue.
Meaning, that csum_partial() computations via skb_postpull_rcsum() and
skb_postpush_rcsum() pair invoked had a wrong result since they operated on
an odd address for the hoplimit, while other computations were done on an
even address. This mix doesn't work as-is with skb_postpull_rcsum(),
skb_postpush_rcsum() pair as it always expects at least half-word alignment
of input buffers, which is normally the case. Thus, instead of these helpers
using csum_sub() and (implicitly) csum_add(), we need to use csum_block_sub(),
csum_block_add(), respectively. For unaligned offsets, they rotate the sum
to align it to a half-word boundary again, otherwise they work the same as
csum_sub() and csum_add().
Adding __skb_postpull_rcsum(), __skb_postpush_rcsum() variants that take the
offset as an input and adapting bpf_skb_store_bytes() to them fixes the hw
csum failures again. The skb_postpull_rcsum(), skb_postpush_rcsum() helpers
use a 0 constant for offset so that the compiler optimizes the offset & 1
test away and generates the same code as with csum_sub()/_add().
Fixes: 608cd71a9c
("tc: bpf: generalize pedit action")
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
a2bfe6bf09
commit
479ffcccef
@ -2847,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
|
||||
__skb_linearize(skb) : 0;
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
|
||||
unsigned int off)
|
||||
{
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_block_sub(skb->csum,
|
||||
csum_partial(start, len, 0), off);
|
||||
else if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
skb_checksum_start_offset(skb) < 0)
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_postpull_rcsum - update checksum for received skb after pull
|
||||
* @skb: buffer to update
|
||||
@ -2857,36 +2869,38 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
|
||||
* update the CHECKSUM_COMPLETE checksum, or set ip_summed to
|
||||
* CHECKSUM_NONE so that it can be recomputed from scratch.
|
||||
*/
|
||||
|
||||
static inline void skb_postpull_rcsum(struct sk_buff *skb,
|
||||
const void *start, unsigned int len)
|
||||
{
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
|
||||
else if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
skb_checksum_start_offset(skb) < 0)
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
__skb_postpull_rcsum(skb, start, len, 0);
|
||||
}
|
||||
|
||||
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
|
||||
static __always_inline void
|
||||
__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
|
||||
unsigned int off)
|
||||
{
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_block_add(skb->csum,
|
||||
csum_partial(start, len, 0), off);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_postpush_rcsum - update checksum for received skb after push
|
||||
* @skb: buffer to update
|
||||
* @start: start of data after push
|
||||
* @len: length of data pushed
|
||||
*
|
||||
* After doing a push on a received packet, you need to call this to
|
||||
* update the CHECKSUM_COMPLETE checksum.
|
||||
*/
|
||||
static inline void skb_postpush_rcsum(struct sk_buff *skb,
|
||||
const void *start, unsigned int len)
|
||||
{
|
||||
/* For performing the reverse operation to skb_postpull_rcsum(),
|
||||
* we can instead of ...
|
||||
*
|
||||
* skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
|
||||
*
|
||||
* ... just use this equivalent version here to save a few
|
||||
* instructions. Feeding csum of 0 in csum_partial() and later
|
||||
* on adding skb->csum is equivalent to feed skb->csum in the
|
||||
* first place.
|
||||
*/
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_partial(start, len, skb->csum);
|
||||
__skb_postpush_rcsum(skb, start, len, 0);
|
||||
}
|
||||
|
||||
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
|
||||
|
||||
/**
|
||||
* skb_push_rcsum - push skb and update receive checksum
|
||||
* @skb: buffer to update
|
||||
|
@ -1401,7 +1401,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
|
||||
return -EFAULT;
|
||||
|
||||
if (flags & BPF_F_RECOMPUTE_CSUM)
|
||||
skb_postpull_rcsum(skb, ptr, len);
|
||||
__skb_postpull_rcsum(skb, ptr, len, offset);
|
||||
|
||||
memcpy(ptr, from, len);
|
||||
|
||||
@ -1410,7 +1410,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
|
||||
skb_store_bits(skb, offset, ptr, len);
|
||||
|
||||
if (flags & BPF_F_RECOMPUTE_CSUM)
|
||||
skb_postpush_rcsum(skb, ptr, len);
|
||||
__skb_postpush_rcsum(skb, ptr, len, offset);
|
||||
if (flags & BPF_F_INVALIDATE_HASH)
|
||||
skb_clear_hash(skb);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user