mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-26 03:17:48 +00:00
tcp: avoid order-1 allocations on wifi and tx path
Marc Merlin reported many order-1 allocations failures in TX path on its wireless setup, that dont make any sense with MTU=1500 network, and non SG capable hardware. After investigation, it turns out TCP uses sk_stream_alloc_skb() and used as a convention skb_tailroom(skb) to know how many bytes of data payload could be put in this skb (for non SG capable devices) Note : these skb used kmalloc-4096 (MTU=1500 + MAX_HEADER + sizeof(struct skb_shared_info) being above 2048) Later, mac80211 layer need to add some bytes at the tail of skb (IEEE80211_ENCRYPT_TAILROOM = 18 bytes) and since no more tailroom is available has to call pskb_expand_head() and request order-1 allocations. This patch changes sk_stream_alloc_skb() so that only sk->sk_prot->max_header bytes of headroom are reserved, and use a new skb field, avail_size to hold the data payload limit. This way, order-0 allocations done by TCP stack can leave more than 2 KB of tailroom and no more allocation is performed in mac80211 layer (or any layer needing some tailroom) avail_size is unioned with mark/dropcount, since mark will be set later in IP stack for output packets. Therefore, skb size is unchanged. Reported-by: Marc MERLIN <marc@merlins.org> Tested-by: Marc MERLIN <marc@merlins.org> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
87151b8689
commit
a21d45726a
@ -481,6 +481,7 @@ struct sk_buff {
|
|||||||
union {
|
union {
|
||||||
__u32 mark;
|
__u32 mark;
|
||||||
__u32 dropcount;
|
__u32 dropcount;
|
||||||
|
__u32 avail_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
sk_buff_data_t transport_header;
|
sk_buff_data_t transport_header;
|
||||||
@ -1365,6 +1366,18 @@ static inline int skb_tailroom(const struct sk_buff *skb)
|
|||||||
return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
|
return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* skb_availroom - bytes at buffer end
|
||||||
|
* @skb: buffer to check
|
||||||
|
*
|
||||||
|
* Return the number of bytes of free space at the tail of an sk_buff
|
||||||
|
* allocated by sk_stream_alloc()
|
||||||
|
*/
|
||||||
|
static inline int skb_availroom(const struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* skb_reserve - adjust headroom
|
* skb_reserve - adjust headroom
|
||||||
* @skb: buffer to alter
|
* @skb: buffer to alter
|
||||||
|
@ -701,11 +701,12 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
|
|||||||
skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
|
skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
|
||||||
if (skb) {
|
if (skb) {
|
||||||
if (sk_wmem_schedule(sk, skb->truesize)) {
|
if (sk_wmem_schedule(sk, skb->truesize)) {
|
||||||
|
skb_reserve(skb, sk->sk_prot->max_header);
|
||||||
/*
|
/*
|
||||||
* Make sure that we have exactly size bytes
|
* Make sure that we have exactly size bytes
|
||||||
* available to the caller, no more, no less.
|
* available to the caller, no more, no less.
|
||||||
*/
|
*/
|
||||||
skb_reserve(skb, skb_tailroom(skb) - size);
|
skb->avail_size = size;
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
__kfree_skb(skb);
|
__kfree_skb(skb);
|
||||||
@ -995,10 +996,9 @@ new_segment:
|
|||||||
copy = seglen;
|
copy = seglen;
|
||||||
|
|
||||||
/* Where to copy to? */
|
/* Where to copy to? */
|
||||||
if (skb_tailroom(skb) > 0) {
|
if (skb_availroom(skb) > 0) {
|
||||||
/* We have some space in skb head. Superb! */
|
/* We have some space in skb head. Superb! */
|
||||||
if (copy > skb_tailroom(skb))
|
copy = min_t(int, copy, skb_availroom(skb));
|
||||||
copy = skb_tailroom(skb);
|
|
||||||
err = skb_add_data_nocache(sk, skb, from, copy);
|
err = skb_add_data_nocache(sk, skb, from, copy);
|
||||||
if (err)
|
if (err)
|
||||||
goto do_fault;
|
goto do_fault;
|
||||||
|
@ -2060,7 +2060,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
|
|||||||
/* Punt if not enough space exists in the first SKB for
|
/* Punt if not enough space exists in the first SKB for
|
||||||
* the data in the second
|
* the data in the second
|
||||||
*/
|
*/
|
||||||
if (skb->len > skb_tailroom(to))
|
if (skb->len > skb_availroom(to))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
|
if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
|
||||||
|
Loading…
Reference in New Issue
Block a user