net: netif_rx() must disable preemption

Eric Paris reported netif_rx() is calling smp_processor_id() from
preemptible context, in particular when caller is
ip_dev_loopback_xmit().

RPS commit added this smp_processor_id() call, this patch makes sure
preemption is disabled. rps_get_cpus() wants rcu_read_lock() anyway, we
can dot it a bit earlier.

Reported-by: Eric Paris <eparis@redhat.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2010-04-15 00:14:07 -07:00 committed by David S. Miller
parent fea0691526
commit b0e28f1eff

View File

@ -2206,6 +2206,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
* rcu_read_lock must be held on entry.
*/
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
{
@ -2217,8 +2218,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
u8 ip_proto;
u32 addr1, addr2, ports, ihl;
rcu_read_lock();
if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->num_rx_queues)) {
@ -2296,7 +2295,6 @@ got_hash:
}
done:
rcu_read_unlock();
return cpu;
}
@ -2392,7 +2390,7 @@ enqueue:
int netif_rx(struct sk_buff *skb)
{
int cpu;
int ret;
/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
@ -2402,14 +2400,21 @@ int netif_rx(struct sk_buff *skb)
net_timestamp(skb);
#ifdef CONFIG_RPS
cpu = get_rps_cpu(skb->dev, skb);
if (cpu < 0)
cpu = smp_processor_id();
#else
cpu = smp_processor_id();
#endif
{
int cpu;
return enqueue_to_backlog(skb, cpu);
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb);
if (cpu < 0)
cpu = smp_processor_id();
ret = enqueue_to_backlog(skb, cpu);
rcu_read_unlock();
}
#else
ret = enqueue_to_backlog(skb, get_cpu());
put_cpu();
#endif
return ret;
}
EXPORT_SYMBOL(netif_rx);