mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-15 06:00:41 +00:00
Merge branch 'amd-xgbe-next'
Tom Lendacky says: ==================== amd-xgbe: AMD XGBE driver updates 2015-05-12 The following series of patches includes functional updates and changes to the driver. - Add additional statistics to be collected and reported - Use the netif_* functions for issuing some debug and informational driver messages - Rx path SKB allocation cleanup/simplification - Remove stand-alone phylib driver and incorporate function into the nic driver - Simplify device tree support while maintaining backwards compatibility - Fix the flow control negotiation logic to properly configure flow control - Remove the checking and setting of the device dma_mask field This patch series is based on net-next. Changes in v2: - Change from using the netif_msg_*/netdev_* combination for issuing messages to the more concise netif_* ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
54eac85057
@ -1,48 +0,0 @@
|
||||
* AMD 10GbE PHY driver (amd-xgbe-phy)
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "amd,xgbe-phy-seattle-v1a" and
|
||||
"ethernet-phy-ieee802.3-c45"
|
||||
- reg: Address and length of the register sets for the device
|
||||
- SerDes Rx/Tx registers
|
||||
- SerDes integration registers (1/2)
|
||||
- SerDes integration registers (2/2)
|
||||
- interrupt-parent: Should be the phandle for the interrupt controller
|
||||
that services interrupts for this device
|
||||
- interrupts: Should contain the amd-xgbe-phy interrupt.
|
||||
|
||||
Optional properties:
|
||||
- amd,speed-set: Speed capabilities of the device
|
||||
0 - 1GbE and 10GbE (default)
|
||||
1 - 2.5GbE and 10GbE
|
||||
|
||||
The following optional properties are represented by an array with each
|
||||
value corresponding to a particular speed. The first array value represents
|
||||
the setting for the 1GbE speed, the second value for the 2.5GbE speed and
|
||||
the third value for the 10GbE speed. All three values are required if the
|
||||
property is used.
|
||||
- amd,serdes-blwc: Baseline wandering correction enablement
|
||||
0 - Off
|
||||
1 - On
|
||||
- amd,serdes-cdr-rate: CDR rate speed selection
|
||||
- amd,serdes-pq-skew: PQ (data sampling) skew
|
||||
- amd,serdes-tx-amp: TX amplitude boost
|
||||
- amd,serdes-dfe-tap-config: DFE taps available to run
|
||||
- amd,serdes-dfe-tap-enable: DFE taps to enable
|
||||
|
||||
Example:
|
||||
xgbe_phy@e1240800 {
|
||||
compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45";
|
||||
reg = <0 0xe1240800 0 0x00400>,
|
||||
<0 0xe1250000 0 0x00060>,
|
||||
<0 0xe1250080 0 0x00004>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 323 4>;
|
||||
amd,speed-set = <0>;
|
||||
amd,serdes-blwc = <1>, <1>, <0>;
|
||||
amd,serdes-cdr-rate = <2>, <2>, <7>;
|
||||
amd,serdes-pq-skew = <10>, <10>, <30>;
|
||||
amd,serdes-tx-amp = <15>, <15>, <10>;
|
||||
amd,serdes-dfe-tap-config = <3>, <3>, <1>;
|
||||
amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
|
||||
};
|
@ -5,12 +5,16 @@ Required properties:
|
||||
- reg: Address and length of the register sets for the device
|
||||
- MAC registers
|
||||
- PCS registers
|
||||
- SerDes Rx/Tx registers
|
||||
- SerDes integration registers (1/2)
|
||||
- SerDes integration registers (2/2)
|
||||
- interrupt-parent: Should be the phandle for the interrupt controller
|
||||
that services interrupts for this device
|
||||
- interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
|
||||
listed is required and is the general device interrupt. If the optional
|
||||
amd,per-channel-interrupt property is specified, then one additional
|
||||
interrupt for each DMA channel supported by the device should be specified
|
||||
interrupt for each DMA channel supported by the device should be specified.
|
||||
The last interrupt listed should be the PCS auto-negotiation interrupt.
|
||||
- clocks:
|
||||
- DMA clock for the amd-xgbe device (used for calculating the
|
||||
correct Rx interrupt watchdog timer value on a DMA channel
|
||||
@ -19,7 +23,6 @@ Required properties:
|
||||
- clock-names: Should be the names of the clocks
|
||||
- "dma_clk" for the DMA clock
|
||||
- "ptp_clk" for the PTP clock
|
||||
- phy-handle: See ethernet.txt file in the same directory
|
||||
- phy-mode: See ethernet.txt file in the same directory
|
||||
|
||||
Optional properties:
|
||||
@ -29,19 +32,46 @@ Optional properties:
|
||||
- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
|
||||
a unique interrupt for each DMA channel - this requires an additional
|
||||
interrupt be configured for each DMA channel
|
||||
- amd,speed-set: Speed capabilities of the device
|
||||
0 - 1GbE and 10GbE (default)
|
||||
1 - 2.5GbE and 10GbE
|
||||
|
||||
The following optional properties are represented by an array with each
|
||||
value corresponding to a particular speed. The first array value represents
|
||||
the setting for the 1GbE speed, the second value for the 2.5GbE speed and
|
||||
the third value for the 10GbE speed. All three values are required if the
|
||||
property is used.
|
||||
- amd,serdes-blwc: Baseline wandering correction enablement
|
||||
0 - Off
|
||||
1 - On
|
||||
- amd,serdes-cdr-rate: CDR rate speed selection
|
||||
- amd,serdes-pq-skew: PQ (data sampling) skew
|
||||
- amd,serdes-tx-amp: TX amplitude boost
|
||||
- amd,serdes-dfe-tap-config: DFE taps available to run
|
||||
- amd,serdes-dfe-tap-enable: DFE taps to enable
|
||||
|
||||
Example:
|
||||
xgbe@e0700000 {
|
||||
compatible = "amd,xgbe-seattle-v1a";
|
||||
reg = <0 0xe0700000 0 0x80000>,
|
||||
<0 0xe0780000 0 0x80000>;
|
||||
<0 0xe0780000 0 0x80000>,
|
||||
<0 0xe1240800 0 0x00400>,
|
||||
<0 0xe1250000 0 0x00060>,
|
||||
<0 0xe1250080 0 0x00004>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 325 4>,
|
||||
<0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
|
||||
<0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>,
|
||||
<0 323 4>;
|
||||
amd,per-channel-interrupt;
|
||||
clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
|
||||
clock-names = "dma_clk", "ptp_clk";
|
||||
phy-handle = <&phy>;
|
||||
phy-mode = "xgmii";
|
||||
mac-address = [ 02 a1 a2 a3 a4 a5 ];
|
||||
amd,speed-set = <0>;
|
||||
amd,serdes-blwc = <1>, <1>, <0>;
|
||||
amd,serdes-cdr-rate = <2>, <2>, <7>;
|
||||
amd,serdes-pq-skew = <10>, <10>, <30>;
|
||||
amd,serdes-tx-amp = <15>, <15>, <10>;
|
||||
amd,serdes-dfe-tap-config = <3>, <3>, <1>;
|
||||
amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
|
||||
};
|
||||
|
@ -652,7 +652,6 @@ M: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/amd/xgbe/
|
||||
F: drivers/net/phy/amd-xgbe-phy.c
|
||||
|
||||
AMS (Apple Motion Sensor) DRIVER
|
||||
M: Michael Hanselmann <linux-kernel@hansmi.ch>
|
||||
|
@ -179,10 +179,8 @@ config SUNLANCE
|
||||
|
||||
config AMD_XGBE
|
||||
tristate "AMD 10GbE Ethernet driver"
|
||||
depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
|
||||
depends on ((OF_NET && OF_ADDRESS) || ACPI) && HAS_IOMEM && HAS_DMA
|
||||
depends on ARM64 || COMPILE_TEST
|
||||
select PHYLIB
|
||||
select AMD_XGBE_PHY
|
||||
select BITREVERSE
|
||||
select CRC32
|
||||
select PTP_1588_CLOCK
|
||||
|
@ -857,6 +857,48 @@
|
||||
*/
|
||||
#define PCS_MMD_SELECT 0xff
|
||||
|
||||
/* SerDes integration register offsets */
|
||||
#define SIR0_KR_RT_1 0x002c
|
||||
#define SIR0_STATUS 0x0040
|
||||
#define SIR1_SPEED 0x0000
|
||||
|
||||
/* SerDes integration register entry bit positions and sizes */
|
||||
#define SIR0_KR_RT_1_RESET_INDEX 11
|
||||
#define SIR0_KR_RT_1_RESET_WIDTH 1
|
||||
#define SIR0_STATUS_RX_READY_INDEX 0
|
||||
#define SIR0_STATUS_RX_READY_WIDTH 1
|
||||
#define SIR0_STATUS_TX_READY_INDEX 8
|
||||
#define SIR0_STATUS_TX_READY_WIDTH 1
|
||||
#define SIR1_SPEED_CDR_RATE_INDEX 12
|
||||
#define SIR1_SPEED_CDR_RATE_WIDTH 4
|
||||
#define SIR1_SPEED_DATARATE_INDEX 4
|
||||
#define SIR1_SPEED_DATARATE_WIDTH 2
|
||||
#define SIR1_SPEED_PLLSEL_INDEX 3
|
||||
#define SIR1_SPEED_PLLSEL_WIDTH 1
|
||||
#define SIR1_SPEED_RATECHANGE_INDEX 6
|
||||
#define SIR1_SPEED_RATECHANGE_WIDTH 1
|
||||
#define SIR1_SPEED_TXAMP_INDEX 8
|
||||
#define SIR1_SPEED_TXAMP_WIDTH 4
|
||||
#define SIR1_SPEED_WORDMODE_INDEX 0
|
||||
#define SIR1_SPEED_WORDMODE_WIDTH 3
|
||||
|
||||
/* SerDes RxTx register offsets */
|
||||
#define RXTX_REG6 0x0018
|
||||
#define RXTX_REG20 0x0050
|
||||
#define RXTX_REG22 0x0058
|
||||
#define RXTX_REG114 0x01c8
|
||||
#define RXTX_REG129 0x0204
|
||||
|
||||
/* SerDes RxTx register entry bit positions and sizes */
|
||||
#define RXTX_REG6_RESETB_RXD_INDEX 8
|
||||
#define RXTX_REG6_RESETB_RXD_WIDTH 1
|
||||
#define RXTX_REG20_BLWC_ENA_INDEX 2
|
||||
#define RXTX_REG20_BLWC_ENA_WIDTH 1
|
||||
#define RXTX_REG114_PQ_REG_INDEX 9
|
||||
#define RXTX_REG114_PQ_REG_WIDTH 7
|
||||
#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
|
||||
#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
|
||||
|
||||
/* Descriptor/Packet entry bit positions and sizes */
|
||||
#define RX_PACKET_ERRORS_CRC_INDEX 2
|
||||
#define RX_PACKET_ERRORS_CRC_WIDTH 1
|
||||
@ -973,10 +1015,47 @@
|
||||
#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
|
||||
|
||||
/* MDIO undefined or vendor specific registers */
|
||||
#ifndef MDIO_PMA_10GBR_PMD_CTRL
|
||||
#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_PMA_10GBR_FECCTRL
|
||||
#define MDIO_PMA_10GBR_FECCTRL 0x00ab
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_AN_XNP
|
||||
#define MDIO_AN_XNP 0x0016
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_AN_LPX
|
||||
#define MDIO_AN_LPX 0x0019
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_AN_COMP_STAT
|
||||
#define MDIO_AN_COMP_STAT 0x0030
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_AN_INTMASK
|
||||
#define MDIO_AN_INTMASK 0x8001
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_AN_INT
|
||||
#define MDIO_AN_INT 0x8002
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_CTRL1_SPEED1G
|
||||
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
|
||||
#endif
|
||||
|
||||
/* MDIO mask values */
|
||||
#define XGBE_XNP_MCF_NULL_MESSAGE 0x001
|
||||
#define XGBE_XNP_ACK_PROCESSED BIT(12)
|
||||
#define XGBE_XNP_MP_FORMATTED BIT(13)
|
||||
#define XGBE_XNP_NP_EXCHANGE BIT(15)
|
||||
|
||||
#define XGBE_KR_TRAINING_START BIT(0)
|
||||
#define XGBE_KR_TRAINING_ENABLE BIT(1)
|
||||
|
||||
/* Bit setting and getting macros
|
||||
* The get macro will extract the current bit field value from within
|
||||
* the variable
|
||||
@ -1118,6 +1197,82 @@ do { \
|
||||
#define XPCS_IOREAD(_pdata, _off) \
|
||||
ioread32((_pdata)->xpcs_regs + (_off))
|
||||
|
||||
/* Macros for building, reading or writing register values or bits
|
||||
* within the register values of SerDes integration registers.
|
||||
*/
|
||||
#define XSIR_GET_BITS(_var, _prefix, _field) \
|
||||
GET_BITS((_var), \
|
||||
_prefix##_##_field##_INDEX, \
|
||||
_prefix##_##_field##_WIDTH)
|
||||
|
||||
#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
|
||||
SET_BITS((_var), \
|
||||
_prefix##_##_field##_INDEX, \
|
||||
_prefix##_##_field##_WIDTH, (_val))
|
||||
|
||||
#define XSIR0_IOREAD(_pdata, _reg) \
|
||||
ioread16((_pdata)->sir0_regs + _reg)
|
||||
|
||||
#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
|
||||
GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
|
||||
_reg##_##_field##_INDEX, \
|
||||
_reg##_##_field##_WIDTH)
|
||||
|
||||
#define XSIR0_IOWRITE(_pdata, _reg, _val) \
|
||||
iowrite16((_val), (_pdata)->sir0_regs + _reg)
|
||||
|
||||
#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
|
||||
do { \
|
||||
u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
|
||||
SET_BITS(reg_val, \
|
||||
_reg##_##_field##_INDEX, \
|
||||
_reg##_##_field##_WIDTH, (_val)); \
|
||||
XSIR0_IOWRITE((_pdata), _reg, reg_val); \
|
||||
} while (0)
|
||||
|
||||
#define XSIR1_IOREAD(_pdata, _reg) \
|
||||
ioread16((_pdata)->sir1_regs + _reg)
|
||||
|
||||
#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
|
||||
GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
|
||||
_reg##_##_field##_INDEX, \
|
||||
_reg##_##_field##_WIDTH)
|
||||
|
||||
#define XSIR1_IOWRITE(_pdata, _reg, _val) \
|
||||
iowrite16((_val), (_pdata)->sir1_regs + _reg)
|
||||
|
||||
#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
|
||||
do { \
|
||||
u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
|
||||
SET_BITS(reg_val, \
|
||||
_reg##_##_field##_INDEX, \
|
||||
_reg##_##_field##_WIDTH, (_val)); \
|
||||
XSIR1_IOWRITE((_pdata), _reg, reg_val); \
|
||||
} while (0)
|
||||
|
||||
/* Macros for building, reading or writing register values or bits
|
||||
* within the register values of SerDes RxTx registers.
|
||||
*/
|
||||
#define XRXTX_IOREAD(_pdata, _reg) \
|
||||
ioread16((_pdata)->rxtx_regs + _reg)
|
||||
|
||||
#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
|
||||
GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
|
||||
_reg##_##_field##_INDEX, \
|
||||
_reg##_##_field##_WIDTH)
|
||||
|
||||
#define XRXTX_IOWRITE(_pdata, _reg, _val) \
|
||||
iowrite16((_val), (_pdata)->rxtx_regs + _reg)
|
||||
|
||||
#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
|
||||
do { \
|
||||
u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
|
||||
SET_BITS(reg_val, \
|
||||
_reg##_##_field##_INDEX, \
|
||||
_reg##_##_field##_WIDTH, (_val)); \
|
||||
XRXTX_IOWRITE((_pdata), _reg, reg_val); \
|
||||
} while (0)
|
||||
|
||||
/* Macros for building, reading or writing register values or bits
|
||||
* using MDIO. Different from above because of the use of standardized
|
||||
* Linux include values. No shifting is performed with the bit
|
||||
|
@ -150,9 +150,12 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
|
||||
tc_ets = 0;
|
||||
tc_ets_weight = 0;
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
|
||||
ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
|
||||
DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
|
||||
netif_dbg(pdata, drv, netdev,
|
||||
"TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
|
||||
ets->tc_tx_bw[i], ets->tc_rx_bw[i],
|
||||
ets->tc_tsa[i]);
|
||||
netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
|
||||
ets->prio_tc[i]);
|
||||
|
||||
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
|
||||
(i >= pdata->hw_feat.tc_cnt))
|
||||
@ -214,8 +217,9 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
|
||||
DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
|
||||
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
|
||||
netif_dbg(pdata, drv, netdev,
|
||||
"cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
|
||||
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
|
||||
|
||||
if (!pdata->pfc) {
|
||||
pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
|
||||
@ -238,9 +242,10 @@ static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
|
||||
|
||||
static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
u8 support = xgbe_dcb_getdcbx(netdev);
|
||||
|
||||
DBGPR(" DCBX=%#hhx\n", dcbx);
|
||||
netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);
|
||||
|
||||
if (dcbx & ~support)
|
||||
return 1;
|
||||
|
@ -208,8 +208,9 @@ static int xgbe_init_ring(struct xgbe_prv_data *pdata,
|
||||
if (!ring->rdata)
|
||||
return -ENOMEM;
|
||||
|
||||
DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
|
||||
ring->rdesc, ring->rdesc_dma, ring->rdata);
|
||||
netif_dbg(pdata, drv, pdata->netdev,
|
||||
"rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
|
||||
ring->rdesc, &ring->rdesc_dma, ring->rdata);
|
||||
|
||||
DBGPR("<--xgbe_init_ring\n");
|
||||
|
||||
@ -226,7 +227,9 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
|
||||
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++) {
|
||||
DBGPR(" %s - tx_ring:\n", channel->name);
|
||||
netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
|
||||
channel->name);
|
||||
|
||||
ret = xgbe_init_ring(pdata, channel->tx_ring,
|
||||
pdata->tx_desc_count);
|
||||
if (ret) {
|
||||
@ -235,12 +238,14 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
|
||||
goto err_ring;
|
||||
}
|
||||
|
||||
DBGPR(" %s - rx_ring:\n", channel->name);
|
||||
netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
|
||||
channel->name);
|
||||
|
||||
ret = xgbe_init_ring(pdata, channel->rx_ring,
|
||||
pdata->rx_desc_count);
|
||||
if (ret) {
|
||||
netdev_alert(pdata->netdev,
|
||||
"error initializing Tx ring\n");
|
||||
"error initializing Rx ring\n");
|
||||
goto err_ring;
|
||||
}
|
||||
}
|
||||
@ -476,8 +481,6 @@ static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
|
||||
|
||||
if (rdata->state_saved) {
|
||||
rdata->state_saved = 0;
|
||||
rdata->state.incomplete = 0;
|
||||
rdata->state.context_next = 0;
|
||||
rdata->state.skb = NULL;
|
||||
rdata->state.len = 0;
|
||||
rdata->state.error = 0;
|
||||
@ -518,8 +521,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
||||
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
|
||||
|
||||
if (tso) {
|
||||
DBGPR(" TSO packet\n");
|
||||
|
||||
/* Map the TSO header */
|
||||
skb_dma = dma_map_single(pdata->dev, skb->data,
|
||||
packet->header_len, DMA_TO_DEVICE);
|
||||
@ -529,6 +530,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
||||
}
|
||||
rdata->skb_dma = skb_dma;
|
||||
rdata->skb_dma_len = packet->header_len;
|
||||
netif_dbg(pdata, tx_queued, pdata->netdev,
|
||||
"skb header: index=%u, dma=%pad, len=%u\n",
|
||||
cur_index, &skb_dma, packet->header_len);
|
||||
|
||||
offset = packet->header_len;
|
||||
|
||||
@ -550,8 +554,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
||||
}
|
||||
rdata->skb_dma = skb_dma;
|
||||
rdata->skb_dma_len = len;
|
||||
DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
|
||||
cur_index, skb_dma, len);
|
||||
netif_dbg(pdata, tx_queued, pdata->netdev,
|
||||
"skb data: index=%u, dma=%pad, len=%u\n",
|
||||
cur_index, &skb_dma, len);
|
||||
|
||||
datalen -= len;
|
||||
offset += len;
|
||||
@ -563,7 +568,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
DBGPR(" mapping frag %u\n", i);
|
||||
netif_dbg(pdata, tx_queued, pdata->netdev,
|
||||
"mapping frag %u\n", i);
|
||||
|
||||
frag = &skb_shinfo(skb)->frags[i];
|
||||
offset = 0;
|
||||
@ -582,8 +588,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
|
||||
rdata->skb_dma = skb_dma;
|
||||
rdata->skb_dma_len = len;
|
||||
rdata->mapped_as_page = 1;
|
||||
DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
|
||||
cur_index, skb_dma, len);
|
||||
netif_dbg(pdata, tx_queued, pdata->netdev,
|
||||
"skb frag: index=%u, dma=%pad, len=%u\n",
|
||||
cur_index, &skb_dma, len);
|
||||
|
||||
datalen -= len;
|
||||
offset += len;
|
||||
|
@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
|
||||
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
|
||||
return 0;
|
||||
|
||||
DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
|
||||
netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
|
||||
enable ? "entering" : "leaving");
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
|
||||
|
||||
return 0;
|
||||
@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
|
||||
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
|
||||
return 0;
|
||||
|
||||
DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
|
||||
netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
|
||||
enable ? "entering" : "leaving");
|
||||
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
|
||||
|
||||
return 0;
|
||||
@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
|
||||
mac_addr[0] = ha->addr[4];
|
||||
mac_addr[1] = ha->addr[5];
|
||||
|
||||
DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
|
||||
*mac_reg);
|
||||
netif_dbg(pdata, drv, pdata->netdev,
|
||||
"adding mac address %pM at %#x\n",
|
||||
ha->addr, *mac_reg);
|
||||
|
||||
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
|
||||
}
|
||||
@ -907,23 +910,6 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
|
||||
else
|
||||
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
|
||||
|
||||
/* If the PCS is changing modes, match the MAC speed to it */
|
||||
if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
|
||||
((mmd_address & 0xffff) == MDIO_CTRL2)) {
|
||||
struct phy_device *phydev = pdata->phydev;
|
||||
|
||||
if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
|
||||
/* KX mode */
|
||||
if (phydev->supported & SUPPORTED_1000baseKX_Full)
|
||||
xgbe_set_gmii_speed(pdata);
|
||||
else
|
||||
xgbe_set_gmii_2500_speed(pdata);
|
||||
} else {
|
||||
/* KR mode */
|
||||
xgbe_set_xgmii_speed(pdata);
|
||||
}
|
||||
}
|
||||
|
||||
/* The PCS registers are accessed using mmio. The underlying APB3
|
||||
* management interface uses indirect addressing to access the MMD
|
||||
* register sets. This requires accessing of the PCS register in two
|
||||
@ -1322,7 +1308,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
|
||||
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
|
||||
switch (ets->tc_tsa[i]) {
|
||||
case IEEE_8021QAZ_TSA_STRICT:
|
||||
DBGPR(" TC%u using SP\n", i);
|
||||
netif_dbg(pdata, drv, pdata->netdev,
|
||||
"TC%u using SP\n", i);
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
|
||||
MTL_TSA_SP);
|
||||
break;
|
||||
@ -1330,7 +1317,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
|
||||
weight = total_weight * ets->tc_tx_bw[i] / 100;
|
||||
weight = clamp(weight, min_weight, total_weight);
|
||||
|
||||
DBGPR(" TC%u using DWRR (weight %u)\n", i, weight);
|
||||
netif_dbg(pdata, drv, pdata->netdev,
|
||||
"TC%u using DWRR (weight %u)\n", i, weight);
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
|
||||
MTL_TSA_ETS);
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
|
||||
@ -1359,7 +1347,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
|
||||
}
|
||||
mask &= 0xff;
|
||||
|
||||
DBGPR(" TC%u PFC mask=%#x\n", tc, mask);
|
||||
netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
|
||||
tc, mask);
|
||||
reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
|
||||
reg_val = XGMAC_IOREAD(pdata, reg);
|
||||
|
||||
@ -1457,8 +1446,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
|
||||
/* Create a context descriptor if this is a TSO packet */
|
||||
if (tso_context || vlan_context) {
|
||||
if (tso_context) {
|
||||
DBGPR(" TSO context descriptor, mss=%u\n",
|
||||
packet->mss);
|
||||
netif_dbg(pdata, tx_queued, pdata->netdev,
|
||||
"TSO context descriptor, mss=%u\n",
|
||||
packet->mss);
|
||||
|
||||
/* Set the MSS size */
|
||||
XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
|
||||
@ -1476,8 +1466,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
|
||||
}
|
||||
|
||||
if (vlan_context) {
|
||||
DBGPR(" VLAN context descriptor, ctag=%u\n",
|
||||
packet->vlan_ctag);
|
||||
netif_dbg(pdata, tx_queued, pdata->netdev,
|
||||
"VLAN context descriptor, ctag=%u\n",
|
||||
packet->vlan_ctag);
|
||||
|
||||
/* Mark it as a CONTEXT descriptor */
|
||||
XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
|
||||
@ -1533,6 +1524,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
|
||||
packet->tcp_payload_len);
|
||||
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
|
||||
packet->tcp_header_len / 4);
|
||||
|
||||
pdata->ext_stats.tx_tso_packets++;
|
||||
} else {
|
||||
/* Enable CRC and Pad Insertion */
|
||||
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
|
||||
@ -1594,9 +1587,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
|
||||
rdesc = rdata->rdesc;
|
||||
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
|
||||
|
||||
#ifdef XGMAC_ENABLE_TX_DESC_DUMP
|
||||
xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
|
||||
#endif
|
||||
if (netif_msg_tx_queued(pdata))
|
||||
xgbe_dump_tx_desc(pdata, ring, start_index,
|
||||
packet->rdesc_count, 1);
|
||||
|
||||
/* Make sure ownership is written to the descriptor */
|
||||
dma_wmb();
|
||||
@ -1618,11 +1611,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
|
||||
|
||||
static int xgbe_dev_read(struct xgbe_channel *channel)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = channel->pdata;
|
||||
struct xgbe_ring *ring = channel->rx_ring;
|
||||
struct xgbe_ring_data *rdata;
|
||||
struct xgbe_ring_desc *rdesc;
|
||||
struct xgbe_packet_data *packet = &ring->packet_data;
|
||||
struct net_device *netdev = channel->pdata->netdev;
|
||||
struct net_device *netdev = pdata->netdev;
|
||||
unsigned int err, etlt, l34t;
|
||||
|
||||
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
|
||||
@ -1637,9 +1631,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
|
||||
/* Make sure descriptor fields are read after reading the OWN bit */
|
||||
dma_rmb();
|
||||
|
||||
#ifdef XGMAC_ENABLE_RX_DESC_DUMP
|
||||
xgbe_dump_rx_desc(ring, rdesc, ring->cur);
|
||||
#endif
|
||||
if (netif_msg_rx_status(pdata))
|
||||
xgbe_dump_rx_desc(pdata, ring, ring->cur);
|
||||
|
||||
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
|
||||
/* Timestamp Context Descriptor */
|
||||
@ -1661,9 +1654,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
|
||||
CONTEXT_NEXT, 1);
|
||||
|
||||
/* Get the header length */
|
||||
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
|
||||
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
|
||||
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
|
||||
RX_NORMAL_DESC2, HL);
|
||||
if (rdata->rx.hdr_len)
|
||||
pdata->ext_stats.rx_split_header_packets++;
|
||||
}
|
||||
|
||||
/* Get the RSS hash */
|
||||
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
|
||||
@ -1700,14 +1696,14 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
|
||||
INCOMPLETE, 0);
|
||||
|
||||
/* Set checksum done indicator as appropriate */
|
||||
if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
|
||||
if (netdev->features & NETIF_F_RXCSUM)
|
||||
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
|
||||
CSUM_DONE, 1);
|
||||
|
||||
/* Check for errors (only valid in last descriptor) */
|
||||
err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
|
||||
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
|
||||
DBGPR(" err=%u, etlt=%#x\n", err, etlt);
|
||||
netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
|
||||
|
||||
if (!err || !etlt) {
|
||||
/* No error if err is 0 or etlt is 0 */
|
||||
@ -1718,7 +1714,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
|
||||
packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
|
||||
RX_NORMAL_DESC0,
|
||||
OVT);
|
||||
DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
|
||||
netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
|
||||
packet->vlan_ctag);
|
||||
}
|
||||
} else {
|
||||
if ((etlt == 0x05) || (etlt == 0x06))
|
||||
@ -2026,9 +2023,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
|
||||
for (i = 0; i < pdata->tx_q_count; i++)
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
|
||||
|
||||
netdev_notice(pdata->netdev,
|
||||
"%d Tx hardware queues, %d byte fifo per queue\n",
|
||||
pdata->tx_q_count, ((fifo_size + 1) * 256));
|
||||
netif_info(pdata, drv, pdata->netdev,
|
||||
"%d Tx hardware queues, %d byte fifo per queue\n",
|
||||
pdata->tx_q_count, ((fifo_size + 1) * 256));
|
||||
}
|
||||
|
||||
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
|
||||
@ -2042,9 +2039,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
|
||||
for (i = 0; i < pdata->rx_q_count; i++)
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
|
||||
|
||||
netdev_notice(pdata->netdev,
|
||||
"%d Rx hardware queues, %d byte fifo per queue\n",
|
||||
pdata->rx_q_count, ((fifo_size + 1) * 256));
|
||||
netif_info(pdata, drv, pdata->netdev,
|
||||
"%d Rx hardware queues, %d byte fifo per queue\n",
|
||||
pdata->rx_q_count, ((fifo_size + 1) * 256));
|
||||
}
|
||||
|
||||
static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
|
||||
@ -2063,14 +2060,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
|
||||
|
||||
for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
|
||||
for (j = 0; j < qptc; j++) {
|
||||
DBGPR(" TXq%u mapped to TC%u\n", queue, i);
|
||||
netif_dbg(pdata, drv, pdata->netdev,
|
||||
"TXq%u mapped to TC%u\n", queue, i);
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
|
||||
Q2TCMAP, i);
|
||||
pdata->q2tc_map[queue++] = i;
|
||||
}
|
||||
|
||||
if (i < qptc_extra) {
|
||||
DBGPR(" TXq%u mapped to TC%u\n", queue, i);
|
||||
netif_dbg(pdata, drv, pdata->netdev,
|
||||
"TXq%u mapped to TC%u\n", queue, i);
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
|
||||
Q2TCMAP, i);
|
||||
pdata->q2tc_map[queue++] = i;
|
||||
@ -2088,13 +2087,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
|
||||
for (i = 0, prio = 0; i < prio_queues;) {
|
||||
mask = 0;
|
||||
for (j = 0; j < ppq; j++) {
|
||||
DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
|
||||
netif_dbg(pdata, drv, pdata->netdev,
|
||||
"PRIO%u mapped to RXq%u\n", prio, i);
|
||||
mask |= (1 << prio);
|
||||
pdata->prio2q_map[prio++] = i;
|
||||
}
|
||||
|
||||
if (i < ppq_extra) {
|
||||
DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
|
||||
netif_dbg(pdata, drv, pdata->netdev,
|
||||
"PRIO%u mapped to RXq%u\n", prio, i);
|
||||
mask |= (1 << prio);
|
||||
pdata->prio2q_map[prio++] = i;
|
||||
}
|
||||
|
@ -183,9 +183,10 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
|
||||
channel->rx_ring = rx_ring++;
|
||||
}
|
||||
|
||||
DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
|
||||
channel->name, channel->queue_index, channel->dma_regs,
|
||||
channel->dma_irq, channel->tx_ring, channel->rx_ring);
|
||||
netif_dbg(pdata, drv, pdata->netdev,
|
||||
"%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
|
||||
channel->name, channel->dma_regs, channel->dma_irq,
|
||||
channel->tx_ring, channel->rx_ring);
|
||||
}
|
||||
|
||||
pdata->channel = channel_mem;
|
||||
@ -235,7 +236,8 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
|
||||
struct xgbe_prv_data *pdata = channel->pdata;
|
||||
|
||||
if (count > xgbe_tx_avail_desc(ring)) {
|
||||
DBGPR(" Tx queue stopped, not enough descriptors available\n");
|
||||
netif_info(pdata, drv, pdata->netdev,
|
||||
"Tx queue stopped, not enough descriptors available\n");
|
||||
netif_stop_subqueue(pdata->netdev, channel->queue_index);
|
||||
ring->tx.queue_stopped = 1;
|
||||
|
||||
@ -330,7 +332,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
|
||||
if (!dma_isr)
|
||||
goto isr_done;
|
||||
|
||||
DBGPR(" DMA_ISR = %08x\n", dma_isr);
|
||||
netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
|
||||
|
||||
for (i = 0; i < pdata->channel_count; i++) {
|
||||
if (!(dma_isr & (1 << i)))
|
||||
@ -339,7 +341,8 @@ static irqreturn_t xgbe_isr(int irq, void *data)
|
||||
channel = pdata->channel + i;
|
||||
|
||||
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
|
||||
DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
|
||||
netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
|
||||
i, dma_ch_isr);
|
||||
|
||||
/* The TI or RI interrupt bits may still be set even if using
|
||||
* per channel DMA interrupts. Check to be sure those are not
|
||||
@ -386,8 +389,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
|
||||
}
|
||||
}
|
||||
|
||||
DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
|
||||
|
||||
isr_done:
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -436,43 +437,61 @@ static void xgbe_tx_timer(unsigned long data)
|
||||
DBGPR("<--xgbe_tx_timer\n");
|
||||
}
|
||||
|
||||
static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
|
||||
static void xgbe_service(struct work_struct *work)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = container_of(work,
|
||||
struct xgbe_prv_data,
|
||||
service_work);
|
||||
|
||||
pdata->phy_if.phy_status(pdata);
|
||||
}
|
||||
|
||||
static void xgbe_service_timer(unsigned long data)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
|
||||
|
||||
schedule_work(&pdata->service_work);
|
||||
|
||||
mod_timer(&pdata->service_timer, jiffies + HZ);
|
||||
}
|
||||
|
||||
static void xgbe_init_timers(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_channel *channel;
|
||||
unsigned int i;
|
||||
|
||||
DBGPR("-->xgbe_init_tx_timers\n");
|
||||
setup_timer(&pdata->service_timer, xgbe_service_timer,
|
||||
(unsigned long)pdata);
|
||||
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++) {
|
||||
if (!channel->tx_ring)
|
||||
break;
|
||||
|
||||
DBGPR(" %s adding tx timer\n", channel->name);
|
||||
setup_timer(&channel->tx_timer, xgbe_tx_timer,
|
||||
(unsigned long)channel);
|
||||
}
|
||||
|
||||
DBGPR("<--xgbe_init_tx_timers\n");
|
||||
}
|
||||
|
||||
static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
|
||||
static void xgbe_start_timers(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
mod_timer(&pdata->service_timer, jiffies + HZ);
|
||||
}
|
||||
|
||||
static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_channel *channel;
|
||||
unsigned int i;
|
||||
|
||||
DBGPR("-->xgbe_stop_tx_timers\n");
|
||||
del_timer_sync(&pdata->service_timer);
|
||||
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++) {
|
||||
if (!channel->tx_ring)
|
||||
break;
|
||||
|
||||
DBGPR(" %s deleting tx timer\n", channel->name);
|
||||
del_timer_sync(&channel->tx_timer);
|
||||
}
|
||||
|
||||
DBGPR("<--xgbe_stop_tx_timers\n");
|
||||
}
|
||||
|
||||
void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
|
||||
@ -759,112 +778,12 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
|
||||
DBGPR("<--xgbe_free_rx_data\n");
|
||||
}
|
||||
|
||||
static void xgbe_adjust_link(struct net_device *netdev)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
struct phy_device *phydev = pdata->phydev;
|
||||
int new_state = 0;
|
||||
|
||||
if (!phydev)
|
||||
return;
|
||||
|
||||
if (phydev->link) {
|
||||
/* Flow control support */
|
||||
if (pdata->pause_autoneg) {
|
||||
if (phydev->pause || phydev->asym_pause) {
|
||||
pdata->tx_pause = 1;
|
||||
pdata->rx_pause = 1;
|
||||
} else {
|
||||
pdata->tx_pause = 0;
|
||||
pdata->rx_pause = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (pdata->tx_pause != pdata->phy_tx_pause) {
|
||||
hw_if->config_tx_flow_control(pdata);
|
||||
pdata->phy_tx_pause = pdata->tx_pause;
|
||||
}
|
||||
|
||||
if (pdata->rx_pause != pdata->phy_rx_pause) {
|
||||
hw_if->config_rx_flow_control(pdata);
|
||||
pdata->phy_rx_pause = pdata->rx_pause;
|
||||
}
|
||||
|
||||
/* Speed support */
|
||||
if (phydev->speed != pdata->phy_speed) {
|
||||
new_state = 1;
|
||||
|
||||
switch (phydev->speed) {
|
||||
case SPEED_10000:
|
||||
hw_if->set_xgmii_speed(pdata);
|
||||
break;
|
||||
|
||||
case SPEED_2500:
|
||||
hw_if->set_gmii_2500_speed(pdata);
|
||||
break;
|
||||
|
||||
case SPEED_1000:
|
||||
hw_if->set_gmii_speed(pdata);
|
||||
break;
|
||||
}
|
||||
pdata->phy_speed = phydev->speed;
|
||||
}
|
||||
|
||||
if (phydev->link != pdata->phy_link) {
|
||||
new_state = 1;
|
||||
pdata->phy_link = 1;
|
||||
}
|
||||
} else if (pdata->phy_link) {
|
||||
new_state = 1;
|
||||
pdata->phy_link = 0;
|
||||
pdata->phy_speed = SPEED_UNKNOWN;
|
||||
}
|
||||
|
||||
if (new_state)
|
||||
phy_print_status(phydev);
|
||||
}
|
||||
|
||||
static int xgbe_phy_init(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct net_device *netdev = pdata->netdev;
|
||||
struct phy_device *phydev = pdata->phydev;
|
||||
int ret;
|
||||
|
||||
pdata->phy_link = -1;
|
||||
pdata->phy_speed = SPEED_UNKNOWN;
|
||||
pdata->phy_tx_pause = pdata->tx_pause;
|
||||
pdata->phy_rx_pause = pdata->rx_pause;
|
||||
|
||||
ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
|
||||
pdata->phy_mode);
|
||||
if (ret) {
|
||||
netdev_err(netdev, "phy_connect_direct failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!phydev->drv || (phydev->drv->phy_id == 0)) {
|
||||
netdev_err(netdev, "phy_id not valid\n");
|
||||
ret = -ENODEV;
|
||||
goto err_phy_connect;
|
||||
}
|
||||
DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
|
||||
dev_name(&phydev->dev), phydev->link);
|
||||
|
||||
return 0;
|
||||
|
||||
err_phy_connect:
|
||||
phy_disconnect(phydev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
if (!pdata->phydev)
|
||||
return;
|
||||
|
||||
phy_disconnect(pdata->phydev);
|
||||
return pdata->phy_if.phy_reset(pdata);
|
||||
}
|
||||
|
||||
int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
|
||||
@ -889,13 +808,14 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
|
||||
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
xgbe_stop_timers(pdata);
|
||||
flush_workqueue(pdata->dev_workqueue);
|
||||
|
||||
hw_if->powerdown_tx(pdata);
|
||||
hw_if->powerdown_rx(pdata);
|
||||
|
||||
xgbe_napi_disable(pdata, 0);
|
||||
|
||||
phy_stop(pdata->phydev);
|
||||
|
||||
pdata->power_down = 1;
|
||||
|
||||
spin_unlock_irqrestore(&pdata->lock, flags);
|
||||
@ -924,8 +844,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
|
||||
|
||||
pdata->power_down = 0;
|
||||
|
||||
phy_start(pdata->phydev);
|
||||
|
||||
xgbe_napi_enable(pdata, 0);
|
||||
|
||||
hw_if->powerup_tx(pdata);
|
||||
@ -936,6 +854,8 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
|
||||
|
||||
netif_tx_start_all_queues(netdev);
|
||||
|
||||
xgbe_start_timers(pdata);
|
||||
|
||||
spin_unlock_irqrestore(&pdata->lock, flags);
|
||||
|
||||
DBGPR("<--xgbe_powerup\n");
|
||||
@ -946,6 +866,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
|
||||
static int xgbe_start(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
struct xgbe_phy_if *phy_if = &pdata->phy_if;
|
||||
struct net_device *netdev = pdata->netdev;
|
||||
int ret;
|
||||
|
||||
@ -953,7 +874,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
||||
|
||||
hw_if->init(pdata);
|
||||
|
||||
phy_start(pdata->phydev);
|
||||
ret = phy_if->phy_start(pdata);
|
||||
if (ret)
|
||||
goto err_phy;
|
||||
|
||||
xgbe_napi_enable(pdata, 1);
|
||||
|
||||
@ -964,10 +887,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
||||
hw_if->enable_tx(pdata);
|
||||
hw_if->enable_rx(pdata);
|
||||
|
||||
xgbe_init_tx_timers(pdata);
|
||||
|
||||
netif_tx_start_all_queues(netdev);
|
||||
|
||||
xgbe_start_timers(pdata);
|
||||
schedule_work(&pdata->service_work);
|
||||
|
||||
DBGPR("<--xgbe_start\n");
|
||||
|
||||
return 0;
|
||||
@ -975,8 +899,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
||||
err_napi:
|
||||
xgbe_napi_disable(pdata, 1);
|
||||
|
||||
phy_stop(pdata->phydev);
|
||||
phy_if->phy_stop(pdata);
|
||||
|
||||
err_phy:
|
||||
hw_if->exit(pdata);
|
||||
|
||||
return ret;
|
||||
@ -985,6 +910,7 @@ err_napi:
|
||||
static void xgbe_stop(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
struct xgbe_phy_if *phy_if = &pdata->phy_if;
|
||||
struct xgbe_channel *channel;
|
||||
struct net_device *netdev = pdata->netdev;
|
||||
struct netdev_queue *txq;
|
||||
@ -994,7 +920,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
|
||||
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
xgbe_stop_tx_timers(pdata);
|
||||
xgbe_stop_timers(pdata);
|
||||
flush_workqueue(pdata->dev_workqueue);
|
||||
|
||||
hw_if->disable_tx(pdata);
|
||||
hw_if->disable_rx(pdata);
|
||||
@ -1003,7 +930,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
|
||||
|
||||
xgbe_napi_disable(pdata, 1);
|
||||
|
||||
phy_stop(pdata->phydev);
|
||||
phy_if->phy_stop(pdata);
|
||||
|
||||
hw_if->exit(pdata);
|
||||
|
||||
@ -1374,7 +1301,7 @@ static int xgbe_open(struct net_device *netdev)
|
||||
ret = clk_prepare_enable(pdata->sysclk);
|
||||
if (ret) {
|
||||
netdev_alert(netdev, "dma clk_prepare_enable failed\n");
|
||||
goto err_phy_init;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(pdata->ptpclk);
|
||||
@ -1399,14 +1326,17 @@ static int xgbe_open(struct net_device *netdev)
|
||||
if (ret)
|
||||
goto err_channels;
|
||||
|
||||
/* Initialize the device restart and Tx timestamp work struct */
|
||||
INIT_WORK(&pdata->service_work, xgbe_service);
|
||||
INIT_WORK(&pdata->restart_work, xgbe_restart);
|
||||
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
|
||||
xgbe_init_timers(pdata);
|
||||
|
||||
ret = xgbe_start(pdata);
|
||||
if (ret)
|
||||
goto err_rings;
|
||||
|
||||
clear_bit(XGBE_DOWN, &pdata->dev_state);
|
||||
|
||||
DBGPR("<--xgbe_open\n");
|
||||
|
||||
return 0;
|
||||
@ -1423,9 +1353,6 @@ err_ptpclk:
|
||||
err_sysclk:
|
||||
clk_disable_unprepare(pdata->sysclk);
|
||||
|
||||
err_phy_init:
|
||||
xgbe_phy_exit(pdata);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1449,8 +1376,7 @@ static int xgbe_close(struct net_device *netdev)
|
||||
clk_disable_unprepare(pdata->ptpclk);
|
||||
clk_disable_unprepare(pdata->sysclk);
|
||||
|
||||
/* Release the phy */
|
||||
xgbe_phy_exit(pdata);
|
||||
set_bit(XGBE_DOWN, &pdata->dev_state);
|
||||
|
||||
DBGPR("<--xgbe_close\n");
|
||||
|
||||
@ -1478,7 +1404,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
ret = NETDEV_TX_OK;
|
||||
|
||||
if (skb->len == 0) {
|
||||
netdev_err(netdev, "empty skb received from stack\n");
|
||||
netif_err(pdata, tx_err, netdev,
|
||||
"empty skb received from stack\n");
|
||||
dev_kfree_skb_any(skb);
|
||||
goto tx_netdev_return;
|
||||
}
|
||||
@ -1494,7 +1421,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
||||
ret = xgbe_prep_tso(skb, packet);
|
||||
if (ret) {
|
||||
netdev_err(netdev, "error processing TSO packet\n");
|
||||
netif_err(pdata, tx_err, netdev,
|
||||
"error processing TSO packet\n");
|
||||
dev_kfree_skb_any(skb);
|
||||
goto tx_netdev_return;
|
||||
}
|
||||
@ -1513,9 +1441,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
/* Configure required descriptor fields for transmission */
|
||||
hw_if->dev_xmit(channel);
|
||||
|
||||
#ifdef XGMAC_ENABLE_TX_PKT_DUMP
|
||||
xgbe_print_pkt(netdev, skb, true);
|
||||
#endif
|
||||
if (netif_msg_pktdata(pdata))
|
||||
xgbe_print_pkt(netdev, skb, true);
|
||||
|
||||
/* Stop the queue in advance if there may not be enough descriptors */
|
||||
xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
|
||||
@ -1710,7 +1637,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
|
||||
(pdata->q2tc_map[queue] == i))
|
||||
queue++;
|
||||
|
||||
DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1);
|
||||
netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
|
||||
i, offset, queue - 1);
|
||||
netdev_set_tc_queue(netdev, i, queue - offset, offset);
|
||||
offset = queue;
|
||||
}
|
||||
@ -1820,9 +1748,10 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
|
||||
lower_32_bits(rdata->rdesc_dma));
|
||||
}
|
||||
|
||||
static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
|
||||
static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
|
||||
struct napi_struct *napi,
|
||||
struct xgbe_ring_data *rdata,
|
||||
unsigned int *len)
|
||||
unsigned int len)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u8 *packet;
|
||||
@ -1832,14 +1761,31 @@ static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
|
||||
if (!skb)
|
||||
return NULL;
|
||||
|
||||
/* Start with the header buffer which may contain just the header
|
||||
* or the header plus data
|
||||
*/
|
||||
dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
|
||||
rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
|
||||
|
||||
packet = page_address(rdata->rx.hdr.pa.pages) +
|
||||
rdata->rx.hdr.pa.pages_offset;
|
||||
copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
|
||||
copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
|
||||
copy_len = min(rdata->rx.hdr.dma_len, copy_len);
|
||||
skb_copy_to_linear_data(skb, packet, copy_len);
|
||||
skb_put(skb, copy_len);
|
||||
|
||||
*len -= copy_len;
|
||||
len -= copy_len;
|
||||
if (len) {
|
||||
/* Add the remaining data as a frag */
|
||||
dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
|
||||
rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
rdata->rx.buf.pa.pages,
|
||||
rdata->rx.buf.pa.pages_offset,
|
||||
len, rdata->rx.buf.dma_len);
|
||||
rdata->rx.buf.pa.pages = NULL;
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
@ -1877,9 +1823,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
|
||||
* bit */
|
||||
dma_rmb();
|
||||
|
||||
#ifdef XGMAC_ENABLE_TX_DESC_DUMP
|
||||
xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
|
||||
#endif
|
||||
if (netif_msg_tx_done(pdata))
|
||||
xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
|
||||
|
||||
if (hw_if->is_last_desc(rdesc)) {
|
||||
tx_packets += rdata->tx.packets;
|
||||
@ -1922,7 +1867,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
||||
struct sk_buff *skb;
|
||||
struct skb_shared_hwtstamps *hwtstamps;
|
||||
unsigned int incomplete, error, context_next, context;
|
||||
unsigned int len, put_len, max_len;
|
||||
unsigned int len, rdesc_len, max_len;
|
||||
unsigned int received = 0;
|
||||
int packet_count = 0;
|
||||
|
||||
@ -1932,6 +1877,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
||||
if (!ring)
|
||||
return 0;
|
||||
|
||||
incomplete = 0;
|
||||
context_next = 0;
|
||||
|
||||
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
|
||||
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
@ -1941,15 +1889,11 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
||||
|
||||
/* First time in loop see if we need to restore state */
|
||||
if (!received && rdata->state_saved) {
|
||||
incomplete = rdata->state.incomplete;
|
||||
context_next = rdata->state.context_next;
|
||||
skb = rdata->state.skb;
|
||||
error = rdata->state.error;
|
||||
len = rdata->state.len;
|
||||
} else {
|
||||
memset(packet, 0, sizeof(*packet));
|
||||
incomplete = 0;
|
||||
context_next = 0;
|
||||
skb = NULL;
|
||||
error = 0;
|
||||
len = 0;
|
||||
@ -1983,29 +1927,23 @@ read_again:
|
||||
|
||||
if (error || packet->errors) {
|
||||
if (packet->errors)
|
||||
DBGPR("Error in received packet\n");
|
||||
netif_err(pdata, rx_err, netdev,
|
||||
"error in received packet\n");
|
||||
dev_kfree_skb(skb);
|
||||
goto next_packet;
|
||||
}
|
||||
|
||||
if (!context) {
|
||||
put_len = rdata->rx.len - len;
|
||||
len += put_len;
|
||||
/* Length is cumulative, get this descriptor's length */
|
||||
rdesc_len = rdata->rx.len - len;
|
||||
len += rdesc_len;
|
||||
|
||||
if (!skb) {
|
||||
dma_sync_single_for_cpu(pdata->dev,
|
||||
rdata->rx.hdr.dma,
|
||||
rdata->rx.hdr.dma_len,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
skb = xgbe_create_skb(napi, rdata, &put_len);
|
||||
if (!skb) {
|
||||
if (rdesc_len && !skb) {
|
||||
skb = xgbe_create_skb(pdata, napi, rdata,
|
||||
rdesc_len);
|
||||
if (!skb)
|
||||
error = 1;
|
||||
goto skip_data;
|
||||
}
|
||||
}
|
||||
|
||||
if (put_len) {
|
||||
} else if (rdesc_len) {
|
||||
dma_sync_single_for_cpu(pdata->dev,
|
||||
rdata->rx.buf.dma,
|
||||
rdata->rx.buf.dma_len,
|
||||
@ -2014,12 +1952,12 @@ read_again:
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
rdata->rx.buf.pa.pages,
|
||||
rdata->rx.buf.pa.pages_offset,
|
||||
put_len, rdata->rx.buf.dma_len);
|
||||
rdesc_len,
|
||||
rdata->rx.buf.dma_len);
|
||||
rdata->rx.buf.pa.pages = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
skip_data:
|
||||
if (incomplete || context_next)
|
||||
goto read_again;
|
||||
|
||||
@ -2033,14 +1971,14 @@ skip_data:
|
||||
max_len += VLAN_HLEN;
|
||||
|
||||
if (skb->len > max_len) {
|
||||
DBGPR("packet length exceeds configured MTU\n");
|
||||
netif_err(pdata, rx_err, netdev,
|
||||
"packet length exceeds configured MTU\n");
|
||||
dev_kfree_skb(skb);
|
||||
goto next_packet;
|
||||
}
|
||||
|
||||
#ifdef XGMAC_ENABLE_RX_PKT_DUMP
|
||||
xgbe_print_pkt(netdev, skb, false);
|
||||
#endif
|
||||
if (netif_msg_pktdata(pdata))
|
||||
xgbe_print_pkt(netdev, skb, false);
|
||||
|
||||
skb_checksum_none_assert(skb);
|
||||
if (XGMAC_GET_BITS(packet->attributes,
|
||||
@ -2082,8 +2020,6 @@ next_packet:
|
||||
if (received && (incomplete || context_next)) {
|
||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||
rdata->state_saved = 1;
|
||||
rdata->state.incomplete = incomplete;
|
||||
rdata->state.context_next = context_next;
|
||||
rdata->state.skb = skb;
|
||||
rdata->state.len = len;
|
||||
rdata->state.error = error;
|
||||
@ -2164,8 +2100,8 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
|
||||
return processed;
|
||||
}
|
||||
|
||||
void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
|
||||
unsigned int count, unsigned int flag)
|
||||
void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
|
||||
unsigned int idx, unsigned int count, unsigned int flag)
|
||||
{
|
||||
struct xgbe_ring_data *rdata;
|
||||
struct xgbe_ring_desc *rdesc;
|
||||
@ -2173,20 +2109,29 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
|
||||
while (count--) {
|
||||
rdata = XGBE_GET_DESC_DATA(ring, idx);
|
||||
rdesc = rdata->rdesc;
|
||||
pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
|
||||
(flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
|
||||
le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
|
||||
le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
|
||||
netdev_dbg(pdata->netdev,
|
||||
"TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
|
||||
(flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
|
||||
le32_to_cpu(rdesc->desc0),
|
||||
le32_to_cpu(rdesc->desc1),
|
||||
le32_to_cpu(rdesc->desc2),
|
||||
le32_to_cpu(rdesc->desc3));
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
|
||||
void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
|
||||
void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
|
||||
unsigned int idx)
|
||||
{
|
||||
pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
|
||||
le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
|
||||
le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
|
||||
struct xgbe_ring_data *rdata;
|
||||
struct xgbe_ring_desc *rdesc;
|
||||
|
||||
rdata = XGBE_GET_DESC_DATA(ring, idx);
|
||||
rdesc = rdata->rdesc;
|
||||
netdev_dbg(pdata->netdev,
|
||||
"RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
|
||||
idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
|
||||
le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
|
||||
}
|
||||
|
||||
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
|
||||
@ -2196,21 +2141,21 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
|
||||
unsigned char buffer[128];
|
||||
unsigned int i, j;
|
||||
|
||||
netdev_alert(netdev, "\n************** SKB dump ****************\n");
|
||||
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
|
||||
|
||||
netdev_alert(netdev, "%s packet of %d bytes\n",
|
||||
(tx_rx ? "TX" : "RX"), skb->len);
|
||||
netdev_dbg(netdev, "%s packet of %d bytes\n",
|
||||
(tx_rx ? "TX" : "RX"), skb->len);
|
||||
|
||||
netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
|
||||
netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
|
||||
netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
|
||||
netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
|
||||
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
|
||||
netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
|
||||
|
||||
for (i = 0, j = 0; i < skb->len;) {
|
||||
j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
|
||||
buf[i++]);
|
||||
|
||||
if ((i % 32) == 0) {
|
||||
netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
|
||||
netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
|
||||
j = 0;
|
||||
} else if ((i % 16) == 0) {
|
||||
buffer[j++] = ' ';
|
||||
@ -2220,7 +2165,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
|
||||
}
|
||||
}
|
||||
if (i % 32)
|
||||
netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
|
||||
netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
|
||||
|
||||
netdev_alert(netdev, "\n************** SKB dump ****************\n");
|
||||
netdev_dbg(netdev, "\n************** SKB dump ****************\n");
|
||||
}
|
||||
|
@ -133,6 +133,12 @@ struct xgbe_stats {
|
||||
offsetof(struct xgbe_prv_data, mmc_stats._var), \
|
||||
}
|
||||
|
||||
#define XGMAC_EXT_STAT(_string, _var) \
|
||||
{ _string, \
|
||||
FIELD_SIZEOF(struct xgbe_ext_stats, _var), \
|
||||
offsetof(struct xgbe_prv_data, ext_stats._var), \
|
||||
}
|
||||
|
||||
static const struct xgbe_stats xgbe_gstring_stats[] = {
|
||||
XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
|
||||
XGMAC_MMC_STAT("tx_packets", txframecount_gb),
|
||||
@ -140,6 +146,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
|
||||
XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
|
||||
XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
|
||||
XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
|
||||
XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
|
||||
XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
|
||||
XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
|
||||
XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
|
||||
@ -171,6 +178,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
|
||||
XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
|
||||
XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
|
||||
XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
|
||||
XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
|
||||
};
|
||||
|
||||
#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
|
||||
@ -239,9 +247,9 @@ static void xgbe_get_pauseparam(struct net_device *netdev,
|
||||
|
||||
DBGPR("-->xgbe_get_pauseparam\n");
|
||||
|
||||
pause->autoneg = pdata->pause_autoneg;
|
||||
pause->tx_pause = pdata->tx_pause;
|
||||
pause->rx_pause = pdata->rx_pause;
|
||||
pause->autoneg = pdata->phy.pause_autoneg;
|
||||
pause->tx_pause = pdata->phy.tx_pause;
|
||||
pause->rx_pause = pdata->phy.rx_pause;
|
||||
|
||||
DBGPR("<--xgbe_get_pauseparam\n");
|
||||
}
|
||||
@ -250,7 +258,6 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
|
||||
struct ethtool_pauseparam *pause)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
struct phy_device *phydev = pdata->phydev;
|
||||
int ret = 0;
|
||||
|
||||
DBGPR("-->xgbe_set_pauseparam\n");
|
||||
@ -258,21 +265,26 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
|
||||
DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
|
||||
pause->autoneg, pause->tx_pause, pause->rx_pause);
|
||||
|
||||
pdata->pause_autoneg = pause->autoneg;
|
||||
if (pause->autoneg) {
|
||||
phydev->advertising |= ADVERTISED_Pause;
|
||||
phydev->advertising |= ADVERTISED_Asym_Pause;
|
||||
if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
|
||||
return -EINVAL;
|
||||
|
||||
} else {
|
||||
phydev->advertising &= ~ADVERTISED_Pause;
|
||||
phydev->advertising &= ~ADVERTISED_Asym_Pause;
|
||||
pdata->phy.pause_autoneg = pause->autoneg;
|
||||
pdata->phy.tx_pause = pause->tx_pause;
|
||||
pdata->phy.rx_pause = pause->rx_pause;
|
||||
|
||||
pdata->tx_pause = pause->tx_pause;
|
||||
pdata->rx_pause = pause->rx_pause;
|
||||
pdata->phy.advertising &= ~ADVERTISED_Pause;
|
||||
pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
|
||||
|
||||
if (pause->rx_pause) {
|
||||
pdata->phy.advertising |= ADVERTISED_Pause;
|
||||
pdata->phy.advertising |= ADVERTISED_Asym_Pause;
|
||||
}
|
||||
|
||||
if (pause->tx_pause)
|
||||
pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
|
||||
|
||||
if (netif_running(netdev))
|
||||
ret = phy_start_aneg(phydev);
|
||||
ret = pdata->phy_if.phy_config_aneg(pdata);
|
||||
|
||||
DBGPR("<--xgbe_set_pauseparam\n");
|
||||
|
||||
@ -283,36 +295,39 @@ static int xgbe_get_settings(struct net_device *netdev,
|
||||
struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
int ret;
|
||||
|
||||
DBGPR("-->xgbe_get_settings\n");
|
||||
|
||||
if (!pdata->phydev)
|
||||
return -ENODEV;
|
||||
cmd->phy_address = pdata->phy.address;
|
||||
|
||||
ret = phy_ethtool_gset(pdata->phydev, cmd);
|
||||
cmd->supported = pdata->phy.supported;
|
||||
cmd->advertising = pdata->phy.advertising;
|
||||
cmd->lp_advertising = pdata->phy.lp_advertising;
|
||||
|
||||
cmd->autoneg = pdata->phy.autoneg;
|
||||
ethtool_cmd_speed_set(cmd, pdata->phy.speed);
|
||||
cmd->duplex = pdata->phy.duplex;
|
||||
|
||||
cmd->port = PORT_NONE;
|
||||
cmd->transceiver = XCVR_INTERNAL;
|
||||
|
||||
DBGPR("<--xgbe_get_settings\n");
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgbe_set_settings(struct net_device *netdev,
|
||||
struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
struct phy_device *phydev = pdata->phydev;
|
||||
u32 speed;
|
||||
int ret;
|
||||
|
||||
DBGPR("-->xgbe_set_settings\n");
|
||||
|
||||
if (!pdata->phydev)
|
||||
return -ENODEV;
|
||||
|
||||
speed = ethtool_cmd_speed(cmd);
|
||||
|
||||
if (cmd->phy_address != phydev->addr)
|
||||
if (cmd->phy_address != pdata->phy.address)
|
||||
return -EINVAL;
|
||||
|
||||
if ((cmd->autoneg != AUTONEG_ENABLE) &&
|
||||
@ -333,23 +348,23 @@ static int xgbe_set_settings(struct net_device *netdev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cmd->advertising &= phydev->supported;
|
||||
cmd->advertising &= pdata->phy.supported;
|
||||
if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
|
||||
return -EINVAL;
|
||||
|
||||
ret = 0;
|
||||
phydev->autoneg = cmd->autoneg;
|
||||
phydev->speed = speed;
|
||||
phydev->duplex = cmd->duplex;
|
||||
phydev->advertising = cmd->advertising;
|
||||
pdata->phy.autoneg = cmd->autoneg;
|
||||
pdata->phy.speed = speed;
|
||||
pdata->phy.duplex = cmd->duplex;
|
||||
pdata->phy.advertising = cmd->advertising;
|
||||
|
||||
if (cmd->autoneg == AUTONEG_ENABLE)
|
||||
phydev->advertising |= ADVERTISED_Autoneg;
|
||||
pdata->phy.advertising |= ADVERTISED_Autoneg;
|
||||
else
|
||||
phydev->advertising &= ~ADVERTISED_Autoneg;
|
||||
pdata->phy.advertising &= ~ADVERTISED_Autoneg;
|
||||
|
||||
if (netif_running(netdev))
|
||||
ret = phy_start_aneg(phydev);
|
||||
ret = pdata->phy_if.phy_config_aneg(pdata);
|
||||
|
||||
DBGPR("<--xgbe_set_settings\n");
|
||||
|
||||
|
@ -124,9 +124,11 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_net.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/mdio.h>
|
||||
|
||||
#include "xgbe.h"
|
||||
#include "xgbe-common.h"
|
||||
@ -136,6 +138,49 @@ MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(XGBE_DRV_VERSION);
|
||||
MODULE_DESCRIPTION(XGBE_DRV_DESC);
|
||||
|
||||
static int debug = -1;
|
||||
module_param(debug, int, S_IWUSR | S_IRUGO);
|
||||
MODULE_PARM_DESC(debug, " Network interface message level setting");
|
||||
|
||||
static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
|
||||
NETIF_MSG_IFUP);
|
||||
|
||||
static const u32 xgbe_serdes_blwc[] = {
|
||||
XGBE_SPEED_1000_BLWC,
|
||||
XGBE_SPEED_2500_BLWC,
|
||||
XGBE_SPEED_10000_BLWC,
|
||||
};
|
||||
|
||||
static const u32 xgbe_serdes_cdr_rate[] = {
|
||||
XGBE_SPEED_1000_CDR,
|
||||
XGBE_SPEED_2500_CDR,
|
||||
XGBE_SPEED_10000_CDR,
|
||||
};
|
||||
|
||||
static const u32 xgbe_serdes_pq_skew[] = {
|
||||
XGBE_SPEED_1000_PQ,
|
||||
XGBE_SPEED_2500_PQ,
|
||||
XGBE_SPEED_10000_PQ,
|
||||
};
|
||||
|
||||
static const u32 xgbe_serdes_tx_amp[] = {
|
||||
XGBE_SPEED_1000_TXAMP,
|
||||
XGBE_SPEED_2500_TXAMP,
|
||||
XGBE_SPEED_10000_TXAMP,
|
||||
};
|
||||
|
||||
static const u32 xgbe_serdes_dfe_tap_cfg[] = {
|
||||
XGBE_SPEED_1000_DFE_TAP_CONFIG,
|
||||
XGBE_SPEED_2500_DFE_TAP_CONFIG,
|
||||
XGBE_SPEED_10000_DFE_TAP_CONFIG,
|
||||
};
|
||||
|
||||
static const u32 xgbe_serdes_dfe_tap_ena[] = {
|
||||
XGBE_SPEED_1000_DFE_TAP_ENABLE,
|
||||
XGBE_SPEED_2500_DFE_TAP_ENABLE,
|
||||
XGBE_SPEED_10000_DFE_TAP_ENABLE,
|
||||
};
|
||||
|
||||
static void xgbe_default_config(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
DBGPR("-->xgbe_default_config\n");
|
||||
@ -153,8 +198,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
|
||||
pdata->rx_pause = 1;
|
||||
pdata->phy_speed = SPEED_UNKNOWN;
|
||||
pdata->power_down = 0;
|
||||
pdata->default_autoneg = AUTONEG_ENABLE;
|
||||
pdata->default_speed = SPEED_10000;
|
||||
|
||||
DBGPR("<--xgbe_default_config\n");
|
||||
}
|
||||
@ -162,6 +205,7 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
|
||||
static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
xgbe_init_function_ptrs_dev(&pdata->hw_if);
|
||||
xgbe_init_function_ptrs_phy(&pdata->phy_if);
|
||||
xgbe_init_function_ptrs_desc(&pdata->desc_if);
|
||||
}
|
||||
|
||||
@ -248,23 +292,82 @@ static int xgbe_of_support(struct xgbe_prv_data *pdata)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct device *dev = pdata->dev;
|
||||
struct device_node *phy_node;
|
||||
struct platform_device *phy_pdev;
|
||||
|
||||
phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
|
||||
if (phy_node) {
|
||||
/* Old style device tree:
|
||||
* The XGBE and PHY resources are separate
|
||||
*/
|
||||
phy_pdev = of_find_device_by_node(phy_node);
|
||||
of_node_put(phy_node);
|
||||
} else {
|
||||
/* New style device tree:
|
||||
* The XGBE and PHY resources are grouped together with
|
||||
* the PHY resources listed last
|
||||
*/
|
||||
get_device(dev);
|
||||
phy_pdev = pdata->pdev;
|
||||
}
|
||||
|
||||
return phy_pdev;
|
||||
}
|
||||
#else /* CONFIG_OF */
|
||||
static int xgbe_of_support(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif /*CONFIG_OF */
|
||||
|
||||
static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
static unsigned int xgbe_resource_count(struct platform_device *pdev,
|
||||
unsigned int type)
|
||||
{
|
||||
unsigned int count;
|
||||
int i;
|
||||
|
||||
for (i = 0, count = 0; i < pdev->num_resources; i++) {
|
||||
struct resource *res = &pdev->resource[i];
|
||||
|
||||
if (type == resource_type(res))
|
||||
count++;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct platform_device *phy_pdev;
|
||||
|
||||
if (pdata->use_acpi) {
|
||||
get_device(pdata->dev);
|
||||
phy_pdev = pdata->pdev;
|
||||
} else {
|
||||
phy_pdev = xgbe_of_get_phy_pdev(pdata);
|
||||
}
|
||||
|
||||
return phy_pdev;
|
||||
}
|
||||
|
||||
static int xgbe_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct xgbe_prv_data *pdata;
|
||||
struct xgbe_hw_if *hw_if;
|
||||
struct xgbe_desc_if *desc_if;
|
||||
struct net_device *netdev;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device *dev = &pdev->dev, *phy_dev;
|
||||
struct platform_device *phy_pdev;
|
||||
struct resource *res;
|
||||
const char *phy_mode;
|
||||
unsigned int i;
|
||||
unsigned int i, phy_memnum, phy_irqnum;
|
||||
int ret;
|
||||
|
||||
DBGPR("--> xgbe_probe\n");
|
||||
@ -289,9 +392,36 @@ static int xgbe_probe(struct platform_device *pdev)
|
||||
mutex_init(&pdata->rss_mutex);
|
||||
spin_lock_init(&pdata->tstamp_lock);
|
||||
|
||||
pdata->msg_enable = netif_msg_init(debug, default_msg_level);
|
||||
|
||||
set_bit(XGBE_DOWN, &pdata->dev_state);
|
||||
|
||||
/* Check if we should use ACPI or DT */
|
||||
pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
|
||||
|
||||
phy_pdev = xgbe_get_phy_pdev(pdata);
|
||||
if (!phy_pdev) {
|
||||
dev_err(dev, "unable to obtain phy device\n");
|
||||
ret = -EINVAL;
|
||||
goto err_phydev;
|
||||
}
|
||||
phy_dev = &phy_pdev->dev;
|
||||
|
||||
if (pdev == phy_pdev) {
|
||||
/* New style device tree or ACPI:
|
||||
* The XGBE and PHY resources are grouped together with
|
||||
* the PHY resources listed last
|
||||
*/
|
||||
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
|
||||
phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
|
||||
} else {
|
||||
/* Old style device tree:
|
||||
* The XGBE and PHY resources are separate
|
||||
*/
|
||||
phy_memnum = 0;
|
||||
phy_irqnum = 0;
|
||||
}
|
||||
|
||||
/* Set and validate the number of descriptors for a ring */
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
|
||||
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
|
||||
@ -318,7 +448,8 @@ static int xgbe_probe(struct platform_device *pdev)
|
||||
ret = PTR_ERR(pdata->xgmac_regs);
|
||||
goto err_io;
|
||||
}
|
||||
DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
|
||||
if (netif_msg_probe(pdata))
|
||||
dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
pdata->xpcs_regs = devm_ioremap_resource(dev, res);
|
||||
@ -327,7 +458,38 @@ static int xgbe_probe(struct platform_device *pdev)
|
||||
ret = PTR_ERR(pdata->xpcs_regs);
|
||||
goto err_io;
|
||||
}
|
||||
DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
|
||||
if (netif_msg_probe(pdata))
|
||||
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
|
||||
|
||||
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
|
||||
pdata->rxtx_regs = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(pdata->rxtx_regs)) {
|
||||
dev_err(dev, "rxtx ioremap failed\n");
|
||||
ret = PTR_ERR(pdata->rxtx_regs);
|
||||
goto err_io;
|
||||
}
|
||||
if (netif_msg_probe(pdata))
|
||||
dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
|
||||
|
||||
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
|
||||
pdata->sir0_regs = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(pdata->sir0_regs)) {
|
||||
dev_err(dev, "sir0 ioremap failed\n");
|
||||
ret = PTR_ERR(pdata->sir0_regs);
|
||||
goto err_io;
|
||||
}
|
||||
if (netif_msg_probe(pdata))
|
||||
dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
|
||||
|
||||
res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
|
||||
pdata->sir1_regs = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(pdata->sir1_regs)) {
|
||||
dev_err(dev, "sir1 ioremap failed\n");
|
||||
ret = PTR_ERR(pdata->sir1_regs);
|
||||
goto err_io;
|
||||
}
|
||||
if (netif_msg_probe(pdata))
|
||||
dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
|
||||
|
||||
/* Retrieve the MAC address */
|
||||
ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
|
||||
@ -355,6 +517,115 @@ static int xgbe_probe(struct platform_device *pdev)
|
||||
if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
|
||||
pdata->per_channel_irq = 1;
|
||||
|
||||
/* Retrieve the PHY speedset */
|
||||
ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
|
||||
&pdata->speed_set);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
|
||||
goto err_io;
|
||||
}
|
||||
|
||||
switch (pdata->speed_set) {
|
||||
case XGBE_SPEEDSET_1000_10000:
|
||||
case XGBE_SPEEDSET_2500_10000:
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
|
||||
ret = -EINVAL;
|
||||
goto err_io;
|
||||
}
|
||||
|
||||
/* Retrieve the PHY configuration properties */
|
||||
if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
|
||||
ret = device_property_read_u32_array(phy_dev,
|
||||
XGBE_BLWC_PROPERTY,
|
||||
pdata->serdes_blwc,
|
||||
XGBE_SPEEDS);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid %s property\n",
|
||||
XGBE_BLWC_PROPERTY);
|
||||
goto err_io;
|
||||
}
|
||||
} else {
|
||||
memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
|
||||
sizeof(pdata->serdes_blwc));
|
||||
}
|
||||
|
||||
if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
|
||||
ret = device_property_read_u32_array(phy_dev,
|
||||
XGBE_CDR_RATE_PROPERTY,
|
||||
pdata->serdes_cdr_rate,
|
||||
XGBE_SPEEDS);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid %s property\n",
|
||||
XGBE_CDR_RATE_PROPERTY);
|
||||
goto err_io;
|
||||
}
|
||||
} else {
|
||||
memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
|
||||
sizeof(pdata->serdes_cdr_rate));
|
||||
}
|
||||
|
||||
if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
|
||||
ret = device_property_read_u32_array(phy_dev,
|
||||
XGBE_PQ_SKEW_PROPERTY,
|
||||
pdata->serdes_pq_skew,
|
||||
XGBE_SPEEDS);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid %s property\n",
|
||||
XGBE_PQ_SKEW_PROPERTY);
|
||||
goto err_io;
|
||||
}
|
||||
} else {
|
||||
memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
|
||||
sizeof(pdata->serdes_pq_skew));
|
||||
}
|
||||
|
||||
if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
|
||||
ret = device_property_read_u32_array(phy_dev,
|
||||
XGBE_TX_AMP_PROPERTY,
|
||||
pdata->serdes_tx_amp,
|
||||
XGBE_SPEEDS);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid %s property\n",
|
||||
XGBE_TX_AMP_PROPERTY);
|
||||
goto err_io;
|
||||
}
|
||||
} else {
|
||||
memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
|
||||
sizeof(pdata->serdes_tx_amp));
|
||||
}
|
||||
|
||||
if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
|
||||
ret = device_property_read_u32_array(phy_dev,
|
||||
XGBE_DFE_CFG_PROPERTY,
|
||||
pdata->serdes_dfe_tap_cfg,
|
||||
XGBE_SPEEDS);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid %s property\n",
|
||||
XGBE_DFE_CFG_PROPERTY);
|
||||
goto err_io;
|
||||
}
|
||||
} else {
|
||||
memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
|
||||
sizeof(pdata->serdes_dfe_tap_cfg));
|
||||
}
|
||||
|
||||
if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
|
||||
ret = device_property_read_u32_array(phy_dev,
|
||||
XGBE_DFE_ENA_PROPERTY,
|
||||
pdata->serdes_dfe_tap_ena,
|
||||
XGBE_SPEEDS);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid %s property\n",
|
||||
XGBE_DFE_ENA_PROPERTY);
|
||||
goto err_io;
|
||||
}
|
||||
} else {
|
||||
memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
|
||||
sizeof(pdata->serdes_dfe_tap_ena));
|
||||
}
|
||||
|
||||
/* Obtain device settings unique to ACPI/OF */
|
||||
if (pdata->use_acpi)
|
||||
ret = xgbe_acpi_support(pdata);
|
||||
@ -382,17 +653,23 @@ static int xgbe_probe(struct platform_device *pdev)
|
||||
}
|
||||
pdata->dev_irq = ret;
|
||||
|
||||
/* Get the auto-negotiation interrupt */
|
||||
ret = platform_get_irq(phy_pdev, phy_irqnum++);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "platform_get_irq phy 0 failed\n");
|
||||
goto err_io;
|
||||
}
|
||||
pdata->an_irq = ret;
|
||||
|
||||
netdev->irq = pdata->dev_irq;
|
||||
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
|
||||
memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
|
||||
|
||||
/* Set all the function pointers */
|
||||
xgbe_init_all_fptrs(pdata);
|
||||
hw_if = &pdata->hw_if;
|
||||
desc_if = &pdata->desc_if;
|
||||
|
||||
/* Issue software reset to device */
|
||||
hw_if->exit(pdata);
|
||||
pdata->hw_if.exit(pdata);
|
||||
|
||||
/* Populate the hardware features */
|
||||
xgbe_get_all_hw_features(pdata);
|
||||
@ -401,8 +678,6 @@ static int xgbe_probe(struct platform_device *pdev)
|
||||
xgbe_default_config(pdata);
|
||||
|
||||
/* Set the DMA mask */
|
||||
if (!dev->dma_mask)
|
||||
dev->dma_mask = &dev->coherent_dma_mask;
|
||||
ret = dma_set_mask_and_coherent(dev,
|
||||
DMA_BIT_MASK(pdata->hw_feat.dma_width));
|
||||
if (ret) {
|
||||
@ -447,16 +722,8 @@ static int xgbe_probe(struct platform_device *pdev)
|
||||
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
|
||||
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
|
||||
|
||||
/* Prepare to regsiter with MDIO */
|
||||
pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
|
||||
if (!pdata->mii_bus_id) {
|
||||
dev_err(dev, "failed to allocate mii bus id\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_io;
|
||||
}
|
||||
ret = xgbe_mdio_register(pdata);
|
||||
if (ret)
|
||||
goto err_bus_id;
|
||||
/* Call MDIO/PHY initialization routine */
|
||||
pdata->phy_if.phy_init(pdata);
|
||||
|
||||
/* Set device operations */
|
||||
netdev->netdev_ops = xgbe_get_netdev_ops();
|
||||
@ -501,26 +768,52 @@ static int xgbe_probe(struct platform_device *pdev)
|
||||
ret = register_netdev(netdev);
|
||||
if (ret) {
|
||||
dev_err(dev, "net device registration failed\n");
|
||||
goto err_reg_netdev;
|
||||
goto err_io;
|
||||
}
|
||||
|
||||
/* Create the PHY/ANEG name based on netdev name */
|
||||
snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
|
||||
netdev_name(netdev));
|
||||
|
||||
/* Create workqueues */
|
||||
pdata->dev_workqueue =
|
||||
create_singlethread_workqueue(netdev_name(netdev));
|
||||
if (!pdata->dev_workqueue) {
|
||||
netdev_err(netdev, "device workqueue creation failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_netdev;
|
||||
}
|
||||
|
||||
pdata->an_workqueue =
|
||||
create_singlethread_workqueue(pdata->an_name);
|
||||
if (!pdata->an_workqueue) {
|
||||
netdev_err(netdev, "phy workqueue creation failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_wq;
|
||||
}
|
||||
|
||||
xgbe_ptp_register(pdata);
|
||||
|
||||
xgbe_debugfs_init(pdata);
|
||||
|
||||
platform_device_put(phy_pdev);
|
||||
|
||||
netdev_notice(netdev, "net device enabled\n");
|
||||
|
||||
DBGPR("<-- xgbe_probe\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_reg_netdev:
|
||||
xgbe_mdio_unregister(pdata);
|
||||
err_wq:
|
||||
destroy_workqueue(pdata->dev_workqueue);
|
||||
|
||||
err_bus_id:
|
||||
kfree(pdata->mii_bus_id);
|
||||
err_netdev:
|
||||
unregister_netdev(netdev);
|
||||
|
||||
err_io:
|
||||
platform_device_put(phy_pdev);
|
||||
|
||||
err_phydev:
|
||||
free_netdev(netdev);
|
||||
|
||||
err_alloc:
|
||||
@ -540,12 +833,14 @@ static int xgbe_remove(struct platform_device *pdev)
|
||||
|
||||
xgbe_ptp_unregister(pdata);
|
||||
|
||||
flush_workqueue(pdata->an_workqueue);
|
||||
destroy_workqueue(pdata->an_workqueue);
|
||||
|
||||
flush_workqueue(pdata->dev_workqueue);
|
||||
destroy_workqueue(pdata->dev_workqueue);
|
||||
|
||||
unregister_netdev(netdev);
|
||||
|
||||
xgbe_mdio_unregister(pdata);
|
||||
|
||||
kfree(pdata->mii_bus_id);
|
||||
|
||||
free_netdev(netdev);
|
||||
|
||||
DBGPR("<--xgbe_remove\n");
|
||||
@ -557,16 +852,17 @@ static int xgbe_remove(struct platform_device *pdev)
|
||||
static int xgbe_suspend(struct device *dev)
|
||||
{
|
||||
struct net_device *netdev = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
int ret = 0;
|
||||
|
||||
DBGPR("-->xgbe_suspend\n");
|
||||
|
||||
if (!netif_running(netdev)) {
|
||||
DBGPR("<--xgbe_dev_suspend\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (netif_running(netdev))
|
||||
ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
|
||||
|
||||
ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
|
||||
pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
|
||||
pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
|
||||
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
|
||||
|
||||
DBGPR("<--xgbe_suspend\n");
|
||||
|
||||
@ -576,16 +872,16 @@ static int xgbe_suspend(struct device *dev)
|
||||
static int xgbe_resume(struct device *dev)
|
||||
{
|
||||
struct net_device *netdev = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
int ret = 0;
|
||||
|
||||
DBGPR("-->xgbe_resume\n");
|
||||
|
||||
if (!netif_running(netdev)) {
|
||||
DBGPR("<--xgbe_dev_resume\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
|
||||
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
|
||||
|
||||
ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
|
||||
if (netif_running(netdev))
|
||||
ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
|
||||
|
||||
DBGPR("<--xgbe_resume\n");
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -129,7 +129,7 @@
|
||||
#include <net/dcbnl.h>
|
||||
|
||||
#define XGBE_DRV_NAME "amd-xgbe"
|
||||
#define XGBE_DRV_VERSION "1.0.0-a"
|
||||
#define XGBE_DRV_VERSION "1.0.2"
|
||||
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
|
||||
|
||||
/* Descriptor related defines */
|
||||
@ -178,14 +178,17 @@
|
||||
#define XGMAC_JUMBO_PACKET_MTU 9000
|
||||
#define XGMAC_MAX_JUMBO_PACKET 9018
|
||||
|
||||
/* MDIO bus phy name */
|
||||
#define XGBE_PHY_NAME "amd_xgbe_phy"
|
||||
#define XGBE_PRTAD 0
|
||||
|
||||
/* Common property names */
|
||||
#define XGBE_MAC_ADDR_PROPERTY "mac-address"
|
||||
#define XGBE_PHY_MODE_PROPERTY "phy-mode"
|
||||
#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
|
||||
#define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
|
||||
#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
|
||||
#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
|
||||
#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
|
||||
#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
|
||||
#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
|
||||
#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
|
||||
|
||||
/* Device-tree clock names */
|
||||
#define XGBE_DMA_CLOCK "dma_clk"
|
||||
@ -241,6 +244,49 @@
|
||||
#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
|
||||
#define XGBE_RSS_HASH_KEY_TYPE 1
|
||||
|
||||
/* Auto-negotiation */
|
||||
#define XGBE_AN_MS_TIMEOUT 500
|
||||
#define XGBE_LINK_TIMEOUT 10
|
||||
|
||||
#define XGBE_AN_INT_CMPLT 0x01
|
||||
#define XGBE_AN_INC_LINK 0x02
|
||||
#define XGBE_AN_PG_RCV 0x04
|
||||
#define XGBE_AN_INT_MASK 0x07
|
||||
|
||||
/* Rate-change complete wait/retry count */
|
||||
#define XGBE_RATECHANGE_COUNT 500
|
||||
|
||||
/* Default SerDes settings */
|
||||
#define XGBE_SPEED_10000_BLWC 0
|
||||
#define XGBE_SPEED_10000_CDR 0x7
|
||||
#define XGBE_SPEED_10000_PLL 0x1
|
||||
#define XGBE_SPEED_10000_PQ 0x12
|
||||
#define XGBE_SPEED_10000_RATE 0x0
|
||||
#define XGBE_SPEED_10000_TXAMP 0xa
|
||||
#define XGBE_SPEED_10000_WORD 0x7
|
||||
#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
|
||||
#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
|
||||
|
||||
#define XGBE_SPEED_2500_BLWC 1
|
||||
#define XGBE_SPEED_2500_CDR 0x2
|
||||
#define XGBE_SPEED_2500_PLL 0x0
|
||||
#define XGBE_SPEED_2500_PQ 0xa
|
||||
#define XGBE_SPEED_2500_RATE 0x1
|
||||
#define XGBE_SPEED_2500_TXAMP 0xf
|
||||
#define XGBE_SPEED_2500_WORD 0x1
|
||||
#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
|
||||
#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
|
||||
|
||||
#define XGBE_SPEED_1000_BLWC 1
|
||||
#define XGBE_SPEED_1000_CDR 0x2
|
||||
#define XGBE_SPEED_1000_PLL 0x0
|
||||
#define XGBE_SPEED_1000_PQ 0xa
|
||||
#define XGBE_SPEED_1000_RATE 0x3
|
||||
#define XGBE_SPEED_1000_TXAMP 0xf
|
||||
#define XGBE_SPEED_1000_WORD 0x1
|
||||
#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
|
||||
#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
|
||||
|
||||
struct xgbe_prv_data;
|
||||
|
||||
struct xgbe_packet_data {
|
||||
@ -334,8 +380,6 @@ struct xgbe_ring_data {
|
||||
*/
|
||||
unsigned int state_saved;
|
||||
struct {
|
||||
unsigned int incomplete;
|
||||
unsigned int context_next;
|
||||
struct sk_buff *skb;
|
||||
unsigned int len;
|
||||
unsigned int error;
|
||||
@ -414,6 +458,13 @@ struct xgbe_channel {
|
||||
struct xgbe_ring *rx_ring;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
enum xgbe_state {
|
||||
XGBE_DOWN,
|
||||
XGBE_LINK,
|
||||
XGBE_LINK_INIT,
|
||||
XGBE_LINK_ERR,
|
||||
};
|
||||
|
||||
enum xgbe_int {
|
||||
XGMAC_INT_DMA_CH_SR_TI,
|
||||
XGMAC_INT_DMA_CH_SR_TPS,
|
||||
@ -445,6 +496,57 @@ enum xgbe_mtl_fifo_size {
|
||||
XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
|
||||
};
|
||||
|
||||
enum xgbe_speed {
|
||||
XGBE_SPEED_1000 = 0,
|
||||
XGBE_SPEED_2500,
|
||||
XGBE_SPEED_10000,
|
||||
XGBE_SPEEDS,
|
||||
};
|
||||
|
||||
enum xgbe_an {
|
||||
XGBE_AN_READY = 0,
|
||||
XGBE_AN_PAGE_RECEIVED,
|
||||
XGBE_AN_INCOMPAT_LINK,
|
||||
XGBE_AN_COMPLETE,
|
||||
XGBE_AN_NO_LINK,
|
||||
XGBE_AN_ERROR,
|
||||
};
|
||||
|
||||
enum xgbe_rx {
|
||||
XGBE_RX_BPA = 0,
|
||||
XGBE_RX_XNP,
|
||||
XGBE_RX_COMPLETE,
|
||||
XGBE_RX_ERROR,
|
||||
};
|
||||
|
||||
enum xgbe_mode {
|
||||
XGBE_MODE_KR = 0,
|
||||
XGBE_MODE_KX,
|
||||
};
|
||||
|
||||
enum xgbe_speedset {
|
||||
XGBE_SPEEDSET_1000_10000 = 0,
|
||||
XGBE_SPEEDSET_2500_10000,
|
||||
};
|
||||
|
||||
struct xgbe_phy {
|
||||
u32 supported;
|
||||
u32 advertising;
|
||||
u32 lp_advertising;
|
||||
|
||||
int address;
|
||||
|
||||
int autoneg;
|
||||
int speed;
|
||||
int duplex;
|
||||
|
||||
int link;
|
||||
|
||||
int pause_autoneg;
|
||||
int tx_pause;
|
||||
int rx_pause;
|
||||
};
|
||||
|
||||
struct xgbe_mmc_stats {
|
||||
/* Tx Stats */
|
||||
u64 txoctetcount_gb;
|
||||
@ -492,6 +594,11 @@ struct xgbe_mmc_stats {
|
||||
u64 rxwatchdogerror;
|
||||
};
|
||||
|
||||
struct xgbe_ext_stats {
|
||||
u64 tx_tso_packets;
|
||||
u64 rx_split_header_packets;
|
||||
};
|
||||
|
||||
struct xgbe_hw_if {
|
||||
int (*tx_complete)(struct xgbe_ring_desc *);
|
||||
|
||||
@ -591,6 +698,20 @@ struct xgbe_hw_if {
|
||||
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
|
||||
};
|
||||
|
||||
struct xgbe_phy_if {
|
||||
/* For initial PHY setup */
|
||||
void (*phy_init)(struct xgbe_prv_data *);
|
||||
|
||||
/* For PHY support when setting device up/down */
|
||||
int (*phy_reset)(struct xgbe_prv_data *);
|
||||
int (*phy_start)(struct xgbe_prv_data *);
|
||||
void (*phy_stop)(struct xgbe_prv_data *);
|
||||
|
||||
/* For PHY support while device is up */
|
||||
void (*phy_status)(struct xgbe_prv_data *);
|
||||
int (*phy_config_aneg)(struct xgbe_prv_data *);
|
||||
};
|
||||
|
||||
struct xgbe_desc_if {
|
||||
int (*alloc_ring_resources)(struct xgbe_prv_data *);
|
||||
void (*free_ring_resources)(struct xgbe_prv_data *);
|
||||
@ -660,6 +781,9 @@ struct xgbe_prv_data {
|
||||
/* XGMAC/XPCS related mmio registers */
|
||||
void __iomem *xgmac_regs; /* XGMAC CSRs */
|
||||
void __iomem *xpcs_regs; /* XPCS MMD registers */
|
||||
void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
|
||||
void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
|
||||
void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
|
||||
|
||||
/* Overall device lock */
|
||||
spinlock_t lock;
|
||||
@ -670,10 +794,14 @@ struct xgbe_prv_data {
|
||||
/* RSS addressing mutex */
|
||||
struct mutex rss_mutex;
|
||||
|
||||
/* Flags representing xgbe_state */
|
||||
unsigned long dev_state;
|
||||
|
||||
int dev_irq;
|
||||
unsigned int per_channel_irq;
|
||||
|
||||
struct xgbe_hw_if hw_if;
|
||||
struct xgbe_phy_if phy_if;
|
||||
struct xgbe_desc_if desc_if;
|
||||
|
||||
/* AXI DMA settings */
|
||||
@ -682,6 +810,11 @@ struct xgbe_prv_data {
|
||||
unsigned int arcache;
|
||||
unsigned int awcache;
|
||||
|
||||
/* Service routine support */
|
||||
struct workqueue_struct *dev_workqueue;
|
||||
struct work_struct service_work;
|
||||
struct timer_list service_timer;
|
||||
|
||||
/* Rings for Tx/Rx on a DMA channel */
|
||||
struct xgbe_channel *channel;
|
||||
unsigned int channel_count;
|
||||
@ -729,27 +862,12 @@ struct xgbe_prv_data {
|
||||
u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
|
||||
u32 rss_options;
|
||||
|
||||
/* MDIO settings */
|
||||
struct module *phy_module;
|
||||
char *mii_bus_id;
|
||||
struct mii_bus *mii;
|
||||
int mdio_mmd;
|
||||
struct phy_device *phydev;
|
||||
int default_autoneg;
|
||||
int default_speed;
|
||||
|
||||
/* Current PHY settings */
|
||||
phy_interface_t phy_mode;
|
||||
int phy_link;
|
||||
int phy_speed;
|
||||
unsigned int phy_tx_pause;
|
||||
unsigned int phy_rx_pause;
|
||||
|
||||
/* Netdev related settings */
|
||||
unsigned char mac_addr[ETH_ALEN];
|
||||
netdev_features_t netdev_features;
|
||||
struct napi_struct napi;
|
||||
struct xgbe_mmc_stats mmc_stats;
|
||||
struct xgbe_ext_stats ext_stats;
|
||||
|
||||
/* Filtering support */
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
@ -787,6 +905,54 @@ struct xgbe_prv_data {
|
||||
/* Keeps track of power mode */
|
||||
unsigned int power_down;
|
||||
|
||||
/* Network interface message level setting */
|
||||
u32 msg_enable;
|
||||
|
||||
/* Current PHY settings */
|
||||
phy_interface_t phy_mode;
|
||||
int phy_link;
|
||||
int phy_speed;
|
||||
|
||||
/* MDIO/PHY related settings */
|
||||
struct xgbe_phy phy;
|
||||
int mdio_mmd;
|
||||
unsigned long link_check;
|
||||
|
||||
char an_name[IFNAMSIZ + 32];
|
||||
struct workqueue_struct *an_workqueue;
|
||||
|
||||
int an_irq;
|
||||
struct work_struct an_irq_work;
|
||||
|
||||
unsigned int speed_set;
|
||||
|
||||
/* SerDes UEFI configurable settings.
|
||||
* Switching between modes/speeds requires new values for some
|
||||
* SerDes settings. The values can be supplied as device
|
||||
* properties in array format. The first array entry is for
|
||||
* 1GbE, second for 2.5GbE and third for 10GbE
|
||||
*/
|
||||
u32 serdes_blwc[XGBE_SPEEDS];
|
||||
u32 serdes_cdr_rate[XGBE_SPEEDS];
|
||||
u32 serdes_pq_skew[XGBE_SPEEDS];
|
||||
u32 serdes_tx_amp[XGBE_SPEEDS];
|
||||
u32 serdes_dfe_tap_cfg[XGBE_SPEEDS];
|
||||
u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
|
||||
|
||||
/* Auto-negotiation state machine support */
|
||||
struct mutex an_mutex;
|
||||
enum xgbe_an an_result;
|
||||
enum xgbe_an an_state;
|
||||
enum xgbe_rx kr_state;
|
||||
enum xgbe_rx kx_state;
|
||||
struct work_struct an_work;
|
||||
unsigned int an_supported;
|
||||
unsigned int parallel_detect;
|
||||
unsigned int fec_ability;
|
||||
unsigned long an_start;
|
||||
|
||||
unsigned int lpm_ctrl; /* CTRL1 for resume */
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *xgbe_debugfs;
|
||||
|
||||
@ -800,6 +966,7 @@ struct xgbe_prv_data {
|
||||
/* Function prototypes*/
|
||||
|
||||
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
|
||||
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
|
||||
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
|
||||
struct net_device_ops *xgbe_get_netdev_ops(void);
|
||||
struct ethtool_ops *xgbe_get_ethtool_ops(void);
|
||||
@ -807,14 +974,11 @@ struct ethtool_ops *xgbe_get_ethtool_ops(void);
|
||||
const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
|
||||
#endif
|
||||
|
||||
int xgbe_mdio_register(struct xgbe_prv_data *);
|
||||
void xgbe_mdio_unregister(struct xgbe_prv_data *);
|
||||
void xgbe_dump_phy_registers(struct xgbe_prv_data *);
|
||||
void xgbe_ptp_register(struct xgbe_prv_data *);
|
||||
void xgbe_ptp_unregister(struct xgbe_prv_data *);
|
||||
void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
|
||||
unsigned int);
|
||||
void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
|
||||
void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
|
||||
unsigned int, unsigned int, unsigned int);
|
||||
void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
|
||||
unsigned int);
|
||||
void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
|
||||
void xgbe_get_all_hw_features(struct xgbe_prv_data *);
|
||||
@ -831,18 +995,6 @@ static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
|
||||
static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
|
||||
#if 0
|
||||
#define XGMAC_ENABLE_TX_DESC_DUMP
|
||||
#define XGMAC_ENABLE_RX_DESC_DUMP
|
||||
#endif
|
||||
|
||||
/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
|
||||
#if 0
|
||||
#define XGMAC_ENABLE_TX_PKT_DUMP
|
||||
#define XGMAC_ENABLE_RX_PKT_DUMP
|
||||
#endif
|
||||
|
||||
/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
|
||||
#if 0
|
||||
#define YDEBUG
|
||||
@ -852,10 +1004,8 @@ static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
|
||||
/* For debug prints */
|
||||
#ifdef YDEBUG
|
||||
#define DBGPR(x...) pr_alert(x)
|
||||
#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
|
||||
#else
|
||||
#define DBGPR(x...) do { } while (0)
|
||||
#define DBGPHY_REGS(x...) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef YDEBUG_MDIO
|
||||
|
@ -24,13 +24,6 @@ config AMD_PHY
|
||||
---help---
|
||||
Currently supports the am79c874
|
||||
|
||||
config AMD_XGBE_PHY
|
||||
tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
|
||||
depends on (OF || ACPI) && HAS_IOMEM
|
||||
depends on ARM64 || COMPILE_TEST
|
||||
---help---
|
||||
Currently supports the AMD 10GbE PHY
|
||||
|
||||
config MARVELL_PHY
|
||||
tristate "Drivers for Marvell PHYs"
|
||||
---help---
|
||||
|
@ -33,5 +33,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
|
||||
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
|
||||
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
|
||||
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
|
||||
obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o
|
||||
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user