mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-18 15:09:53 +00:00
x86: Align skb w/ start of cacheline on newer core 2/Xeon Arch
x86 architectures can handle unaligned accesses in hardware, and it has been shown that unaligned DMA accesses can be expensive on Nehalem architectures. As such we should overwrite NET_IP_ALIGN to resolve this issue. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: x86@kernel.org Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Acked-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
cb836a977f
commit
ea812ca1b0
@ -457,4 +457,13 @@ static inline void rdtsc_barrier(void)
|
||||
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MCORE2
|
||||
/*
|
||||
* We handle most unaligned accesses in hardware. On the other hand
|
||||
* unaligned DMA can be quite expensive on some Nehalem processors.
|
||||
*
|
||||
* Based on this we disable the IP header alignment in network drivers.
|
||||
*/
|
||||
#define NET_IP_ALIGN 0
|
||||
#endif
|
||||
#endif /* _ASM_X86_SYSTEM_H */
|
||||
|
Loading…
Reference in New Issue
Block a user