Merge branch 'sfc-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-2.6

This commit is contained in:
David S. Miller 2011-04-12 17:10:52 -07:00
commit c0212fb146
7 changed files with 30 additions and 31 deletions

View File

@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
* Since we are touching interrupts the caller should hold the suspend lock
* This is for use only during a loopback self-test. It must not
* deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
if (efx_dev_registered(efx))
if (efx_dev_registered(efx) && !efx->port_inhibited)
netif_tx_wake_all_queues(efx->net_dev);
efx_for_each_channel(channel, efx)

View File

@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);

View File

@ -330,7 +330,6 @@ enum efx_rx_alloc_method {
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
* @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@ -360,7 +359,6 @@ struct efx_channel {
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
unsigned int magic_count;
unsigned int irq_count;
unsigned int irq_mod_score;

View File

@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
return ((efx_qword_t *) (channel->eventq.addr)) + index;
return ((efx_qword_t *) (channel->eventq.addr)) +
(index & channel->eventq_mask);
}
/* See if an event is present
@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
channel->eventq_read_ptr & channel->eventq_mask);
efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
channel->channel);
}
@ -908,7 +910,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
if (code == EFX_CHANNEL_MAGIC_TEST(channel))
++channel->magic_count;
; /* ignore */
else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
@ -1015,8 +1017,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
/* Increment read pointer */
read_ptr = (read_ptr + 1) & channel->eventq_mask;
++read_ptr;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
@ -1060,6 +1061,13 @@ out:
return spent;
}
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
bool efx_nic_event_present(struct efx_channel *channel)
{
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
}
/* Allocate buffer table entries for event queue */
int efx_nic_probe_eventq(struct efx_channel *channel)
@ -1165,7 +1173,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr;
unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
do {
efx_qword_t *event = efx_event(channel, read_ptr);
@ -1205,7 +1213,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* it's ok to throw away every non-flush event */
EFX_SET_QWORD(*event);
read_ptr = (read_ptr + 1) & channel->eventq_mask;
++read_ptr;
} while (read_ptr != end_ptr);
channel->eventq_read_ptr = read_ptr;

View File

@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel);
extern void efx_nic_remove_eventq(struct efx_channel *channel);
extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
extern bool efx_nic_event_present(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);

View File

@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests)
{
struct efx_channel *channel;
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
efx->last_irq_cpu = -1;
smp_wmb();
/* ACK each interrupting event queue. Receiving an interrupt due to
* traffic before a test event is raised is considered a pass */
efx_for_each_channel(channel, efx) {
if (channel->work_pending)
efx_process_channel_now(channel);
if (efx->last_irq_cpu >= 0)
goto success;
}
efx_nic_generate_interrupt(efx);
/* Wait for arrival of test interrupt. */
@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
{
struct efx_nic *efx = channel->efx;
unsigned int magic_count, count;
unsigned int read_ptr, count;
tests->eventq_dma[channel->channel] = -1;
tests->eventq_int[channel->channel] = -1;
tests->eventq_poll[channel->channel] = -1;
magic_count = channel->magic_count;
read_ptr = channel->eventq_read_ptr;
channel->efx->last_irq_cpu = -1;
smp_wmb();
@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
do {
schedule_timeout_uninterruptible(HZ / 100);
if (channel->work_pending)
efx_process_channel_now(channel);
if (channel->magic_count != magic_count)
if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
goto eventq_ok;
} while (++count < 2);
@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
}
/* Check to see if event was received even if interrupt wasn't */
efx_process_channel_now(channel);
if (channel->magic_count != magic_count) {
if (efx_nic_event_present(channel)) {
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n", channel->channel);
@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
netif_tx_wake_all_queues(efx->net_dev);
return rc_test;
}

View File

@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
* queue state. */
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled)) {
likely(efx->port_enabled) &&
likely(!efx->port_inhibited)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));