mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-27 20:07:09 +00:00
d433673e5f
This change is primarily a preparation to ease the extension of memory limit tracking. The change does reduce the number atomic operation, during freeing of a frag queue. This does introduce a some performance improvement, as these atomic operations are at the core of the performance problems seen on NUMA systems. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
110 lines
2.8 KiB
C
110 lines
2.8 KiB
C
#ifndef __NET_FRAG_H__
|
|
#define __NET_FRAG_H__
|
|
|
|
struct netns_frags {
|
|
int nqueues;
|
|
struct list_head lru_list;
|
|
|
|
/* Its important for performance to keep lru_list and mem on
|
|
* separate cachelines
|
|
*/
|
|
atomic_t mem ____cacheline_aligned_in_smp;
|
|
/* sysctls */
|
|
int timeout;
|
|
int high_thresh;
|
|
int low_thresh;
|
|
};
|
|
|
|
struct inet_frag_queue {
|
|
spinlock_t lock;
|
|
struct timer_list timer; /* when will this queue expire? */
|
|
struct list_head lru_list; /* lru list member */
|
|
struct hlist_node list;
|
|
atomic_t refcnt;
|
|
struct sk_buff *fragments; /* list of received fragments */
|
|
struct sk_buff *fragments_tail;
|
|
ktime_t stamp;
|
|
int len; /* total length of orig datagram */
|
|
int meat;
|
|
__u8 last_in; /* first/last segment arrived? */
|
|
|
|
#define INET_FRAG_COMPLETE 4
|
|
#define INET_FRAG_FIRST_IN 2
|
|
#define INET_FRAG_LAST_IN 1
|
|
|
|
u16 max_size;
|
|
|
|
struct netns_frags *net;
|
|
};
|
|
|
|
#define INETFRAGS_HASHSZ 64
|
|
|
|
struct inet_frags {
|
|
struct hlist_head hash[INETFRAGS_HASHSZ];
|
|
/* This rwlock is a global lock (seperate per IPv4, IPv6 and
|
|
* netfilter). Important to keep this on a seperate cacheline.
|
|
*/
|
|
rwlock_t lock ____cacheline_aligned_in_smp;
|
|
int secret_interval;
|
|
struct timer_list secret_timer;
|
|
u32 rnd;
|
|
int qsize;
|
|
|
|
unsigned int (*hashfn)(struct inet_frag_queue *);
|
|
bool (*match)(struct inet_frag_queue *q, void *arg);
|
|
void (*constructor)(struct inet_frag_queue *q,
|
|
void *arg);
|
|
void (*destructor)(struct inet_frag_queue *);
|
|
void (*skb_free)(struct sk_buff *);
|
|
void (*frag_expire)(unsigned long data);
|
|
};
|
|
|
|
void inet_frags_init(struct inet_frags *);
|
|
void inet_frags_fini(struct inet_frags *);
|
|
|
|
void inet_frags_init_net(struct netns_frags *nf);
|
|
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
|
|
|
|
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
|
|
void inet_frag_destroy(struct inet_frag_queue *q,
|
|
struct inet_frags *f, int *work);
|
|
int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
|
|
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
|
|
struct inet_frags *f, void *key, unsigned int hash)
|
|
__releases(&f->lock);
|
|
|
|
static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
|
|
{
|
|
if (atomic_dec_and_test(&q->refcnt))
|
|
inet_frag_destroy(q, f, NULL);
|
|
}
|
|
|
|
/* Memory Tracking Functions. */
|
|
|
|
static inline int frag_mem_limit(struct netns_frags *nf)
|
|
{
|
|
return atomic_read(&nf->mem);
|
|
}
|
|
|
|
static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
|
|
{
|
|
atomic_sub(i, &q->net->mem);
|
|
}
|
|
|
|
static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
|
|
{
|
|
atomic_add(i, &q->net->mem);
|
|
}
|
|
|
|
static inline void init_frag_mem_limit(struct netns_frags *nf)
|
|
{
|
|
atomic_set(&nf->mem, 0);
|
|
}
|
|
|
|
static inline int sum_frag_mem_limit(struct netns_frags *nf)
|
|
{
|
|
return atomic_read(&nf->mem);
|
|
}
|
|
|
|
#endif
|