ALT Linux Bugzilla
– Attachment 2143 Details for
Bug 12221
[4.0.1] forcedeth & MAC
New bug
|
Search
|
[?]
|
Help
Register
|
Log In
[x]
|
Forgot Password
Login:
[x]
|
EN
|
RU
[patch]
linux-2.6.18-forcedeth-0.60.patch
linux-2.6.18-forcedeth-0.60.patch (text/plain), 134.56 KB, created by
led
on 2007-08-12 20:17:32 MSD
(
hide
)
Description:
linux-2.6.18-forcedeth-0.60.patch
Filename:
MIME Type:
Creator:
led
Created:
2007-08-12 20:17:32 MSD
Size:
134.56 KB
patch
obsolete
>diff -urN kernel-source-2.6.18.orig/drivers/net/forcedeth.c kernel-source-2.6.18/drivers/net/forcedeth.c >--- kernel-source-2.6.18.orig/drivers/net/forcedeth.c 2006-09-20 06:42:06 +0300 >+++ kernel-source-2.6.18/drivers/net/forcedeth.c 2007-08-12 02:39:41 +0300 >@@ -109,6 +109,10 @@ > * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. > * 0.55: 22 Mar 2006: Add flow control (pause frame). > * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. >+ * 0.57: 14 May 2006: Moved mac address writes to nv_probe and nv_remove. >+ * 0.58: 20 May 2006: Optimized rx and tx data paths. >+ * 0.59: 31 May 2006: Added support for sideband management unit. >+ * 0.60: 31 May 2006: Added support for recoverable error. > * > * Known bugs: > * We suspect that on some hardware no TX done interrupts are generated. >@@ -120,7 +124,7 @@ > * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few > * superfluous timer interrupts from the nic. > */ >-#define FORCEDETH_VERSION "0.56" >+#define FORCEDETH_VERSION "0.60" > #define DRV_NAME "forcedeth" > > #include <linux/module.h> >@@ -168,11 +172,23 @@ > #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ > #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ > #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ >+#define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */ >+ >+#define NVIDIA_ETHERNET_ID(deviceid,nv_driver_data) {\ >+ .vendor = PCI_VENDOR_ID_NVIDIA, \ >+ .device = deviceid, \ >+ .subvendor = PCI_ANY_ID, \ >+ .subdevice = PCI_ANY_ID, \ >+ .driver_data = nv_driver_data, \ >+ }, >+ >+#define Mv_LED_Control 16 >+#define Mv_Page_Address 22 > > enum { > NvRegIrqStatus = 0x000, > #define NVREG_IRQSTAT_MIIEVENT 0x040 >-#define NVREG_IRQSTAT_MASK 0x1ff >+#define NVREG_IRQSTAT_MASK 0x81ff > NvRegIrqMask = 0x004, > #define NVREG_IRQ_RX_ERROR 0x0001 > #define NVREG_IRQ_RX 0x0002 >@@ -183,15 +199,16 @@ > #define NVREG_IRQ_LINK 0x0040 > #define NVREG_IRQ_RX_FORCED 0x0080 > #define NVREG_IRQ_TX_FORCED 0x0100 >+#define NVREG_IRQ_RECOVER_ERROR 0x8000 > #define NVREG_IRQMASK_THROUGHPUT 0x00df > #define NVREG_IRQMASK_CPU 0x0040 > #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) > #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) >-#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) >+#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) > > #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ > NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ >- NVREG_IRQ_TX_FORCED)) >+ NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR)) > > NvRegUnknownSetupReg6 = 0x008, > #define NVREG_UNKSETUP6_VAL 3 >@@ -216,6 +233,16 @@ > #define NVREG_MAC_RESET_ASSERT 0x0F3 > NvRegTransmitterControl = 0x084, > #define NVREG_XMITCTL_START 0x01 >+#define NVREG_XMITCTL_MGMT_ST 0x40000000 >+#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 >+#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 >+#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 >+#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 >+#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 >+#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 >+#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 >+#define NVREG_XMITCTL_HOST_LOADED 0x00004000 >+#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 > NvRegTransmitterStatus = 0x088, > #define NVREG_XMITSTAT_BUSY 0x01 > >@@ -231,6 +258,7 @@ > #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE > NvRegReceiverControl = 0x094, > #define NVREG_RCVCTL_START 0x01 >+#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 > NvRegReceiverStatus = 0x98, > #define NVREG_RCVSTAT_BUSY 0x01 > >@@ -241,7 +269,7 @@ > #define NVREG_RNDSEED_FORCE3 0x7400 > > NvRegTxDeferral = 0xA0, >-#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f >+#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f > #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f > #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f > NvRegRxDeferral = 0xA4, >@@ -262,7 +290,8 @@ > NvRegRingSizes = 0x108, > #define NVREG_RINGSZ_TXSHIFT 0 > #define NVREG_RINGSZ_RXSHIFT 16 >- NvRegUnknownTransmitterReg = 0x10c, >+ NvRegTransmitPoll = 0x10c, >+#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 > NvRegLinkSpeed = 0x110, > #define NVREG_LINKSPEED_FORCE 0x10000 > #define NVREG_LINKSPEED_10 1000 >@@ -283,8 +312,8 @@ > #define NVREG_TXRXCTL_RESET 0x0010 > #define NVREG_TXRXCTL_RXCHECK 0x0400 > #define NVREG_TXRXCTL_DESC_1 0 >-#define NVREG_TXRXCTL_DESC_2 0x02100 >-#define NVREG_TXRXCTL_DESC_3 0x02200 >+#define NVREG_TXRXCTL_DESC_2 0x002100 >+#define NVREG_TXRXCTL_DESC_3 0xc02200 > #define NVREG_TXRXCTL_VLANSTRIP 0x00040 > #define NVREG_TXRXCTL_VLANINS 0x00080 > NvRegTxRingPhysAddrHigh = 0x148, >@@ -297,8 +326,8 @@ > #define NVREG_MIISTAT_LINKCHANGE 0x0008 > #define NVREG_MIISTAT_MASK 0x000f > #define NVREG_MIISTAT_MASK2 0x000f >- NvRegUnknownSetupReg4 = 0x184, >-#define NVREG_UNKSETUP4_VAL 8 >+ NvRegMIIMask = 0x184, >+#define NVREG_MII_LINKCHANGE 0x0008 > > NvRegAdapterControl = 0x188, > #define NVREG_ADAPTCTL_START 0x02 >@@ -328,6 +357,7 @@ > #define NVREG_WAKEUPFLAGS_ENABLE 0x1111 > > NvRegPatternCRC = 0x204, >+#define NV_UNKNOWN_VAL 0x01 > NvRegPatternMask = 0x208, > NvRegPowerCap = 0x268, > #define NVREG_POWERCAP_D3SUPP (1<<30) >@@ -368,6 +398,7 @@ > NvRegTxPause = 0x2e0, > NvRegRxPause = 0x2e4, > NvRegRxDropFrame = 0x2e8, >+ > NvRegVlanControl = 0x300, > #define NVREG_VLANCONTROL_ENABLE 0x2000 > NvRegMSIXMap0 = 0x3e0, >@@ -409,7 +440,7 @@ > #define NV_TX_CARRIERLOST (1<<27) > #define NV_TX_LATECOLLISION (1<<28) > #define NV_TX_UNDERFLOW (1<<29) >-#define NV_TX_ERROR (1<<30) >+#define NV_TX_ERROR (1<<30) /* logical OR of all errors */ > #define NV_TX_VALID (1<<31) > > #define NV_TX2_LASTPACKET (1<<29) >@@ -420,7 +451,7 @@ > #define NV_TX2_LATECOLLISION (1<<27) > #define NV_TX2_UNDERFLOW (1<<28) > /* error and valid are the same for both */ >-#define NV_TX2_ERROR (1<<30) >+#define NV_TX2_ERROR (1<<30) /* logical OR of all errors */ > #define NV_TX2_VALID (1<<31) > #define NV_TX2_TSO (1<<28) > #define NV_TX2_TSO_SHIFT 14 >@@ -441,7 +472,7 @@ > #define NV_RX_CRCERR (1<<27) > #define NV_RX_OVERFLOW (1<<28) > #define NV_RX_FRAMINGERR (1<<29) >-#define NV_RX_ERROR (1<<30) >+#define NV_RX_ERROR (1<<30) /* logical OR of all errors */ > #define NV_RX_AVAIL (1<<31) > > #define NV_RX2_CHECKSUMMASK (0x1C000000) >@@ -458,7 +489,7 @@ > #define NV_RX2_OVERFLOW (1<<23) > #define NV_RX2_FRAMINGERR (1<<24) > /* error and avail are the same for both */ >-#define NV_RX2_ERROR (1<<30) >+#define NV_RX2_ERROR (1<<30) /* logical OR of all errors */ > #define NV_RX2_AVAIL (1<<31) > > #define NV_RX3_VLAN_TAG_PRESENT (1<<16) >@@ -492,12 +523,12 @@ > #define NV_WATCHDOG_TIMEO (5*HZ) > > #define RX_RING_DEFAULT 128 >-#define TX_RING_DEFAULT 256 >-#define RX_RING_MIN 128 >-#define TX_RING_MIN 64 >+#define TX_RING_DEFAULT 64 >+#define RX_RING_MIN RX_RING_DEFAULT >+#define TX_RING_MIN TX_RING_DEFAULT > #define RING_MAX_DESC_VER_1 1024 > #define RING_MAX_DESC_VER_2_3 16384 >-/* >+/* > * Difference between the get and put pointers for the tx ring. > * This is used to throttle the amount of data outstanding in the > * tx ring. >@@ -518,7 +549,7 @@ > #define LINK_TIMEOUT (3*HZ) > #define STATS_INTERVAL (10*HZ) > >-/* >+/* > * desc_ver values: > * The nic supports three different descriptor types: > * - DESC_VER_1: Original >@@ -532,16 +563,37 @@ > /* PHY defines */ > #define PHY_OUI_MARVELL 0x5043 > #define PHY_OUI_CICADA 0x03f1 >+#define PHY_OUI_VITESSE 0x01c1 > #define PHYID1_OUI_MASK 0x03ff > #define PHYID1_OUI_SHFT 6 > #define PHYID2_OUI_MASK 0xfc00 > #define PHYID2_OUI_SHFT 10 >-#define PHY_INIT1 0x0f000 >-#define PHY_INIT2 0x0e00 >-#define PHY_INIT3 0x01000 >-#define PHY_INIT4 0x0200 >-#define PHY_INIT5 0x0004 >-#define PHY_INIT6 0x02000 >+#define PHYID2_MODEL_MASK 0x03f0 >+#define PHY_MODEL_MARVELL_E3016 0x220 >+#define PHY_MARVELL_E3016_INITMASK 0x0300 >+#define PHY_CICADA_INIT1 0x0f000 >+#define PHY_CICADA_INIT2 0x0e00 >+#define PHY_CICADA_INIT3 0x01000 >+#define PHY_CICADA_INIT4 0x0200 >+#define PHY_CICADA_INIT5 0x0004 >+#define PHY_CICADA_INIT6 0x02000 >+#define PHY_VITESSE_INIT_REG1 0x1f >+#define PHY_VITESSE_INIT_REG2 0x10 >+#define PHY_VITESSE_INIT_REG3 0x11 >+#define PHY_VITESSE_INIT_REG4 0x12 >+#define PHY_VITESSE_INIT_MSK1 0xc >+#define PHY_VITESSE_INIT_MSK2 0x0180 >+#define PHY_VITESSE_INIT1 0x52b5 >+#define PHY_VITESSE_INIT2 0xaf8a >+#define PHY_VITESSE_INIT3 0x8 >+#define PHY_VITESSE_INIT4 0x8f8a >+#define PHY_VITESSE_INIT5 0xaf86 >+#define PHY_VITESSE_INIT6 0x8f86 >+#define PHY_VITESSE_INIT7 0xaf82 >+#define PHY_VITESSE_INIT8 0x0100 >+#define PHY_VITESSE_INIT9 0x8f82 >+#define PHY_VITESSE_INIT10 0x0 >+ > #define PHY_GIGABIT 0x0100 > > #define PHY_TIMEOUT 0x1 >@@ -573,72 +625,93 @@ > #define NV_MSI_X_VECTOR_OTHER 0x2 > > /* statistics */ >+#define NV_STATS_COUNT_SW 10 >+ >+#define NVLAN_DISABLE_ALL_FEATURES do { \ >+ msi = NV_MSI_INT_DISABLED; \ >+ msix = NV_MSIX_INT_DISABLED; \ >+ scatter_gather = NV_SCATTER_GATHER_DISABLED; \ >+ tso_offload = NV_TSO_DISABLED; \ >+ tx_checksum_offload = NV_TX_CHECKSUM_DISABLED; \ >+ rx_checksum_offload = NV_RX_CHECKSUM_DISABLED; \ >+ tx_flow_control = NV_TX_FLOW_CONTROL_DISABLED; \ >+ rx_flow_control = NV_RX_FLOW_CONTROL_DISABLED; \ >+ wol = NV_WOL_DISABLED; \ >+ tagging_8021pq = NV_8021PQ_DISABLED; \ >+} while (0) >+ > struct nv_ethtool_str { > char name[ETH_GSTRING_LEN]; > }; > > static const struct nv_ethtool_str nv_estats_str[] = { >+ { "tx_dropped" }, >+ { "tx_fifo_errors" }, >+ { "tx_carrier_errors" }, >+ { "tx_packets" }, > { "tx_bytes" }, >+ { "rx_crc_errors" }, >+ { "rx_over_errors" }, >+ { "rx_errors_total" }, >+ { "rx_packets" }, >+ { "rx_bytes" }, >+ >+ /* hardware counters */ > { "tx_zero_rexmt" }, > { "tx_one_rexmt" }, > { "tx_many_rexmt" }, > { "tx_late_collision" }, >- { "tx_fifo_errors" }, >- { "tx_carrier_errors" }, > { "tx_excess_deferral" }, > { "tx_retry_error" }, >- { "tx_deferral" }, >- { "tx_packets" }, >- { "tx_pause" }, > { "rx_frame_error" }, > { "rx_extra_byte" }, > { "rx_late_collision" }, > { "rx_runt" }, > { "rx_frame_too_long" }, >- { "rx_over_errors" }, >- { "rx_crc_errors" }, > { "rx_frame_align_error" }, > { "rx_length_error" }, > { "rx_unicast" }, > { "rx_multicast" }, > { "rx_broadcast" }, >- { "rx_bytes" }, >+ { "tx_deferral" }, >+ { "tx_pause" }, > { "rx_pause" }, >- { "rx_drop_frame" }, >- { "rx_packets" }, >- { "rx_errors_total" } >+ { "rx_drop_frame" } > }; > > struct nv_ethtool_stats { >+ u64 tx_dropped; >+ u64 tx_fifo_errors; >+ u64 tx_carrier_errors; >+ u64 tx_packets; > u64 tx_bytes; >+ u64 rx_crc_errors; >+ u64 rx_over_errors; >+ u64 rx_errors_total; >+ u64 rx_packets; >+ u64 rx_bytes; >+ >+ /* hardware counters */ > u64 tx_zero_rexmt; > u64 tx_one_rexmt; > u64 tx_many_rexmt; > u64 tx_late_collision; >- u64 tx_fifo_errors; >- u64 tx_carrier_errors; > u64 tx_excess_deferral; > u64 tx_retry_error; >- u64 tx_deferral; >- u64 tx_packets; >- u64 tx_pause; > u64 rx_frame_error; > u64 rx_extra_byte; > u64 rx_late_collision; > u64 rx_runt; > u64 rx_frame_too_long; >- u64 rx_over_errors; >- u64 rx_crc_errors; > u64 rx_frame_align_error; > u64 rx_length_error; > u64 rx_unicast; > u64 rx_multicast; > u64 rx_broadcast; >- u64 rx_bytes; >+ u64 tx_deferral; >+ u64 tx_pause; > u64 rx_pause; > u64 rx_drop_frame; >- u64 rx_packets; >- u64 rx_errors_total; > }; > > /* diagnostics */ >@@ -667,20 +740,62 @@ > { 0,0 } > }; > >+struct nv_skb_map { >+ struct sk_buff *skb; >+ dma_addr_t dma; >+ unsigned int dma_len; >+}; >+ > /* > * SMP locking: > * All hardware access under dev->priv->lock, except the performance > * critical parts: > * - rx is (pseudo-) lockless: it relies on the single-threading provided > * by the arch code for interrupts. >- * - tx setup is lockless: it relies on netif_tx_lock. Actual submission >+ * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission > * needs dev->priv->lock :-( >- * - set_multicast_list: preparation lockless, relies on netif_tx_lock. >+ * - set_multicast_list: preparation lockless, relies on dev->xmit_lock. > */ > > /* in dev: base, irq */ > struct fe_priv { >+ >+ /* fields used in fast path are grouped together >+ for better cache performance >+ */ > spinlock_t lock; >+ void __iomem *base; >+ struct pci_dev *pci_dev; >+ u32 txrxctl_bits; >+ int stop_tx; >+ int need_linktimer; >+ unsigned long link_timeout; >+ u32 irqmask; >+ u32 msi_flags; >+ >+ unsigned int rx_buf_sz; >+ struct vlan_group *vlangrp; >+ int tx_ring_size; >+ int rx_csum; >+ >+ /* >+ * rx specific fields in fast path >+ */ >+ ring_type get_rx __attribute__((aligned(L1_CACHE_BYTES))); >+ ring_type put_rx, first_rx, last_rx; >+ struct nv_skb_map *get_rx_ctx, *put_rx_ctx; >+ struct nv_skb_map *first_rx_ctx, *last_rx_ctx; >+ >+ /* >+ * tx specific fields in fast path >+ */ >+ ring_type get_tx __attribute__((aligned(L1_CACHE_BYTES))); >+ ring_type put_tx, first_tx, last_tx; >+ struct nv_skb_map *get_tx_ctx, *put_tx_ctx; >+ struct nv_skb_map *first_tx_ctx, *last_tx_ctx; >+ >+ struct nv_skb_map *rx_skb; >+ struct nv_skb_map *tx_skb; > > /* General data: > * Locking: spin_lock(&np->lock); */ >@@ -694,64 +809,46 @@ > int phyaddr; > int wolenabled; > unsigned int phy_oui; >+ unsigned int phy_model; > u16 gigabit; > int intr_test; >+ int recover_error; > > /* General data: RO fields */ > dma_addr_t ring_addr; >- struct pci_dev *pci_dev; > u32 orig_mac[2]; >- u32 irqmask; > u32 desc_ver; >- u32 txrxctl_bits; > u32 vlanctl_bits; > u32 driver_data; > u32 register_size; >- >- void __iomem *base; >+ u32 mac_in_use; > > /* rx specific fields. > * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); > */ > ring_type rx_ring; >- unsigned int cur_rx, refill_rx; >- struct sk_buff **rx_skbuff; >- dma_addr_t *rx_dma; >- unsigned int rx_buf_sz; > unsigned int pkt_limit; > struct timer_list oom_kick; > struct timer_list nic_poll; > struct timer_list stats_poll; > u32 nic_poll_irq; > int rx_ring_size; >- >- /* media detection workaround. >- * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); >- */ >- int need_linktimer; >- unsigned long link_timeout; >+ u32 rx_len_errors; > /* > * tx specific fields. > */ > ring_type tx_ring; >- unsigned int next_tx, nic_tx; >- struct sk_buff **tx_skbuff; >- dma_addr_t *tx_dma; >- unsigned int *tx_dma_len; > u32 tx_flags; >- int tx_ring_size; > int tx_limit_start; > int tx_limit_stop; > >- /* vlan fields */ >- struct vlan_group *vlangrp; > > /* msi/msi-x fields */ >- u32 msi_flags; > struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; > > /* flow control */ > u32 pause_flags; >+ u32 led_stats[3]; > }; > > /* >@@ -762,12 +859,12 @@ > > /* > * Optimization can be either throuput mode or cpu mode >- * >+ * > * Throughput Mode: Every tx and rx packet will generate an interrupt. > * CPU Mode: Interrupts are controlled by a timer. > */ > enum { >- NV_OPTIMIZATION_MODE_THROUGHPUT, >+ NV_OPTIMIZATION_MODE_THROUGHPUT, > NV_OPTIMIZATION_MODE_CPU > }; > static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; >@@ -788,16 +885,112 @@ > NV_MSI_INT_DISABLED, > NV_MSI_INT_ENABLED > }; >+ >+#ifdef CONFIG_PCI_MSI > static int msi = NV_MSI_INT_ENABLED; >+#else >+static int msi = NV_MSI_INT_DISABLED; >+#endif > > /* > * MSIX interrupts > */ > enum { >- NV_MSIX_INT_DISABLED, >+ NV_MSIX_INT_DISABLED, > NV_MSIX_INT_ENABLED > }; >+ >+#ifdef CONFIG_PCI_MSI > static int msix = NV_MSIX_INT_ENABLED; >+#else >+static int msix = NV_MSIX_INT_DISABLED; >+#endif >+/* >+ * PHY Speed and Duplex >+ */ >+enum { >+ NV_SPEED_DUPLEX_AUTO, >+ NV_SPEED_DUPLEX_10_HALF_DUPLEX, >+ NV_SPEED_DUPLEX_10_FULL_DUPLEX, >+ NV_SPEED_DUPLEX_100_HALF_DUPLEX, >+ NV_SPEED_DUPLEX_100_FULL_DUPLEX, >+ NV_SPEED_DUPLEX_1000_FULL_DUPLEX >+}; >+static int speed_duplex = NV_SPEED_DUPLEX_AUTO; >+ >+/* >+ * PHY autonegotiation >+ */ >+static int autoneg = AUTONEG_ENABLE; >+ >+/* >+ * Scatter gather >+ */ >+enum { >+ NV_SCATTER_GATHER_DISABLED, >+ NV_SCATTER_GATHER_ENABLED >+}; >+static int scatter_gather = NV_SCATTER_GATHER_ENABLED; >+ >+/* >+ * TCP Segmentation Offload (TSO) >+ */ >+enum { >+ NV_TSO_DISABLED, >+ NV_TSO_ENABLED >+}; >+static int tso_offload = NV_TSO_ENABLED; >+ >+/* >+ * MTU settings >+ */ >+static int mtu = ETH_DATA_LEN; >+ >+/* >+ * Tx checksum offload >+ */ >+enum { >+ NV_TX_CHECKSUM_DISABLED, >+ NV_TX_CHECKSUM_ENABLED >+}; >+static int tx_checksum_offload = NV_TX_CHECKSUM_ENABLED; >+ >+/* >+ * Rx checksum offload >+ */ >+enum { >+ NV_RX_CHECKSUM_DISABLED, >+ NV_RX_CHECKSUM_ENABLED >+}; >+static int rx_checksum_offload = NV_RX_CHECKSUM_ENABLED; >+ >+/* >+ * Tx ring size >+ */ >+static int tx_ring_size = TX_RING_DEFAULT; >+ >+/* >+ * Rx ring size >+ */ >+static int rx_ring_size = RX_RING_DEFAULT; >+ >+/* >+ * Tx flow control >+ */ >+enum { >+ NV_TX_FLOW_CONTROL_DISABLED, >+ NV_TX_FLOW_CONTROL_ENABLED >+}; >+static int tx_flow_control = NV_TX_FLOW_CONTROL_ENABLED; >+ >+/* >+ * Rx flow control >+ */ >+enum { >+ NV_RX_FLOW_CONTROL_DISABLED, >+ NV_RX_FLOW_CONTROL_ENABLED >+}; >+static int rx_flow_control = NV_RX_FLOW_CONTROL_ENABLED; > > /* > * DMA 64bit >@@ -808,14 +1001,64 @@ > }; > static int dma_64bit = NV_DMA_64BIT_ENABLED; > >+/* >+ * Wake On Lan >+ */ >+enum { >+ NV_WOL_DISABLED, >+ NV_WOL_ENABLED >+}; >+static int wol = NV_WOL_DISABLED; >+ >+/* >+ * Tagging 802.1pq >+ */ >+enum { >+ NV_8021PQ_DISABLED, >+ NV_8021PQ_ENABLED >+}; >+static int tagging_8021pq = NV_8021PQ_ENABLED; >+ >+static void nv_msleep(unsigned int msecs) >+{ >+ msleep(msecs); >+} >+ > static inline struct fe_priv *get_nvpriv(struct net_device *dev) > { > return netdev_priv(dev); > } > >+static void __init quirk_nforce_network_class(struct pci_dev *pdev) >+{ >+ /* Some implementations of the nVidia network controllers >+ * show up as bridges, when we need to see them as network >+ * devices. >+ */ >+ >+ /* If this is already known as a network ctlr, do nothing. */ >+ if ((pdev->class >> 8) == PCI_CLASS_NETWORK_ETHERNET) >+ return; >+ >+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_OTHER) { >+ char c; >+ >+ /* Clearing bit 6 of the register at 0xf8 >+ * selects Ethernet device class >+ */ >+ pci_read_config_byte(pdev, 0xf8, &c); >+ c &= 0xbf; >+ pci_write_config_byte(pdev, 0xf8, c); >+ >+ /* sysfs needs pdev->class to be set correctly */ >+ pdev->class &= 0x0000ff; >+ pdev->class |= (PCI_CLASS_NETWORK_ETHERNET << 8); >+ } >+} >+ > static inline u8 __iomem *get_hwbase(struct net_device *dev) > { >- return ((struct fe_priv *)netdev_priv(dev))->base; >+ return ((struct fe_priv *)get_nvpriv(dev))->base; > } > > static inline void pci_push(u8 __iomem *base) >@@ -893,16 +1136,10 @@ > pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), > np->rx_ring.ex, np->ring_addr); > } >- if (np->rx_skbuff) >- kfree(np->rx_skbuff); >- if (np->rx_dma) >- kfree(np->rx_dma); >- if (np->tx_skbuff) >- kfree(np->tx_skbuff); >- if (np->tx_dma) >- kfree(np->tx_dma); >- if (np->tx_dma_len) >- kfree(np->tx_dma_len); >+ if (np->rx_skb) >+ kfree(np->rx_skb); >+ if (np->tx_skb) >+ kfree(np->tx_skb); > } > > static int using_multi_irqs(struct net_device *dev) >@@ -910,7 +1147,7 @@ > struct fe_priv *np = get_nvpriv(dev); > > if (!(np->msi_flags & NV_MSI_X_ENABLED) || >- ((np->msi_flags & NV_MSI_X_ENABLED) && >+ ((np->msi_flags & NV_MSI_X_ENABLED) && > ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) > return 0; > else >@@ -921,6 +1158,8 @@ > { > struct fe_priv *np = get_nvpriv(dev); > >+ dprintk(KERN_DEBUG "%s: nv_enable_irq: begin\n",dev->name); >+ /* modify network device class id */ > if (!using_multi_irqs(dev)) { > if (np->msi_flags & NV_MSI_X_ENABLED) > enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); >@@ -937,6 +1176,7 @@ > { > struct fe_priv *np = get_nvpriv(dev); > >+ dprintk(KERN_DEBUG "%s: nv_disable_irq: begin\n",dev->name); > if (!using_multi_irqs(dev)) { > if (np->msi_flags & NV_MSI_X_ENABLED) > disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); >@@ -1020,29 +1260,83 @@ > return retval; > } > >-static int phy_reset(struct net_device *dev) >+static void nv_save_LED_stats(struct net_device *dev) >+{ >+ struct fe_priv *np = get_nvpriv(dev); >+ u32 reg=0; >+ u32 value=0; >+ int i=0; >+ >+ reg = Mv_Page_Address; >+ value = 3; >+ mii_rw(dev,np->phyaddr,reg,value); >+ udelay(5); >+ >+ reg = Mv_LED_Control; >+ for(i=0;i<3;i++){ >+ np->led_stats[i]=mii_rw(dev,np->phyaddr,reg+i,MII_READ); >+ dprintk(KERN_DEBUG "%s: save LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]); >+ } >+ >+ reg = Mv_Page_Address; >+ value = 0; >+ mii_rw(dev,np->phyaddr,reg,value); >+ udelay(5); >+} >+ >+static void nv_restore_LED_stats(struct net_device *dev) >+{ >+ >+ struct fe_priv *np = get_nvpriv(dev); >+ u32 reg=0; >+ u32 value=0; >+ int i=0; >+ >+ reg = Mv_Page_Address; >+ value = 3; >+ mii_rw(dev,np->phyaddr,reg,value); >+ udelay(5); >+ >+ reg = Mv_LED_Control; >+ for(i=0;i<3;i++){ >+ mii_rw(dev,np->phyaddr,reg+i,np->led_stats[i]); >+ udelay(1); >+ dprintk(KERN_DEBUG "%s: restore LED reg%d: value=0x%x\n",dev->name,reg+i,np->led_stats[i]); >+ } >+ >+ reg = Mv_Page_Address; >+ value = 0; >+ mii_rw(dev,np->phyaddr,reg,value); >+ udelay(5); >+} >+ >+static int phy_reset(struct net_device *dev, u32 bmcr_setup) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u32 miicontrol; > unsigned int tries = 0; > >- miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); >- miicontrol |= BMCR_RESET; >+ dprintk(KERN_DEBUG "%s: phy_reset: begin\n",dev->name); >+ /**/ >+ nv_save_LED_stats(dev); >+ miicontrol = BMCR_RESET | bmcr_setup; > if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { > return -1; > } > > /* wait for 500ms */ >- msleep(500); >+ nv_msleep(500); > > /* must wait till reset is deasserted */ > while (miicontrol & BMCR_RESET) { >- msleep(10); >+ nv_msleep(10); > miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); > /* FIXME: 100 tries seem excessive */ > if (tries++ > 100) > return -1; > } >+ nv_restore_LED_stats(dev); >+ > return 0; > } > >@@ -1052,9 +1346,36 @@ > u8 __iomem *base = get_hwbase(dev); > u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; > >+ dprintk(KERN_DEBUG "%s: phy_init: begin\n",dev->name); >+ /* phy errata for E3016 phy */ >+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) { >+ reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); >+ reg &= ~PHY_MARVELL_E3016_INITMASK; >+ if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { >+ printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ } >+ > /* set advertise register */ > reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); >- reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); >+ reg &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); >+ if (speed_duplex == NV_SPEED_DUPLEX_AUTO) >+ reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL); >+ if (speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX) >+ reg |= ADVERTISE_10HALF; >+ if (speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX) >+ reg |= ADVERTISE_10FULL; >+ if (speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX) >+ reg |= ADVERTISE_100HALF; >+ if (speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX) >+ reg |= ADVERTISE_100FULL; >+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ >+ reg |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; >+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) >+ reg |= ADVERTISE_PAUSE_ASYM; >+ np->fixed_mode = reg; >+ > if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { > printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); > return PHY_ERROR; >@@ -1069,11 +1390,15 @@ > np->gigabit = PHY_GIGABIT; > mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); > mii_control_1000 &= ~ADVERTISE_1000HALF; >- if (phyinterface & PHY_RGMII) >+ if (phyinterface & PHY_RGMII && >+ (speed_duplex == NV_SPEED_DUPLEX_AUTO || >+ (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && autoneg == AUTONEG_ENABLE))) > mii_control_1000 |= ADVERTISE_1000FULL; >- else >+ else { >+ if (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && autoneg == AUTONEG_DISABLE) >+ printk(KERN_INFO "%s: 1000mpbs full only allowed with autoneg\n", pci_name(np->pci_dev)); > mii_control_1000 &= ~ADVERTISE_1000FULL; >- >+ } > if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { > printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); > return PHY_ERROR; >@@ -1082,8 +1407,25 @@ > else > np->gigabit = 0; > >- /* reset the phy */ >- if (phy_reset(dev)) { >+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); >+ if (autoneg == AUTONEG_DISABLE){ >+ np->pause_flags &= ~(NV_PAUSEFRAME_RX_ENABLE | NV_PAUSEFRAME_TX_ENABLE); >+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) >+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; >+ if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) >+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; >+ mii_control &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); >+ if (reg & (ADVERTISE_10FULL|ADVERTISE_100FULL)) >+ mii_control |= BMCR_FULLDPLX; >+ if (reg & (ADVERTISE_100HALF|ADVERTISE_100FULL)) >+ mii_control |= BMCR_SPEED100; >+ } else { >+ mii_control |= BMCR_ANENABLE; >+ } >+ >+ /* reset the phy and setup BMCR >+ * (certain phys need reset at same time new values are set) */ >+ if (phy_reset(dev, mii_control)) { > printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); > return PHY_ERROR; > } >@@ -1091,14 +1433,14 @@ > /* phy vendor specific configuration */ > if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { > phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); >- phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); >- phy_reserved |= (PHY_INIT3 | PHY_INIT4); >+ phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); >+ phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); > if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { > printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); > return PHY_ERROR; > } > phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); >- phy_reserved |= PHY_INIT5; >+ phy_reserved |= PHY_CICADA_INIT5; > if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { > printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); > return PHY_ERROR; >@@ -1106,20 +1448,92 @@ > } > if (np->phy_oui == PHY_OUI_CICADA) { > phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); >- phy_reserved |= PHY_INIT6; >+ phy_reserved |= PHY_CICADA_INIT6; > if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { > printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); > return PHY_ERROR; > } > } >+ if (np->phy_oui == PHY_OUI_VITESSE) { >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); >+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1; >+ phy_reserved |= PHY_VITESSE_INIT3; >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); >+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1; >+ phy_reserved |= PHY_VITESSE_INIT3; >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); >+ phy_reserved &= ~PHY_VITESSE_INIT_MSK2; >+ phy_reserved |= PHY_VITESSE_INIT8; >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { >+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); >+ return PHY_ERROR; >+ } >+ } > /* some phys clear out pause advertisment on reset, set it back */ > mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); > > /* restart auto negotiation */ >- mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); >- mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); >- if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { >- return PHY_ERROR; >+ if (autoneg == AUTONEG_ENABLE) { >+ mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); >+ mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); >+ if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { >+ return PHY_ERROR; >+ } > } > > return 0; >@@ -1127,18 +1541,23 @@ > > static void nv_start_rx(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); >+ u32 rx_ctrl = readl(base + NvRegReceiverControl); > > dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); > /* Already running? Stop it. */ >- if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { >- writel(0, base + NvRegReceiverControl); >+ if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { >+ rx_ctrl &= ~NVREG_RCVCTL_START; >+ writel(rx_ctrl, base + NvRegReceiverControl); > pci_push(base); > } > writel(np->linkspeed, base + NvRegLinkSpeed); > pci_push(base); >- writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); >+ rx_ctrl |= NVREG_RCVCTL_START; >+ if (np->mac_in_use) >+ rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; >+ writel(rx_ctrl, base + NvRegReceiverControl); > dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", > dev->name, np->duplex, np->linkspeed); > pci_push(base); >@@ -1146,44 +1565,63 @@ > > static void nv_stop_rx(struct net_device *dev) > { >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); >+ u32 rx_ctrl = readl(base + NvRegReceiverControl); > > dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); >- writel(0, base + NvRegReceiverControl); >+ if (!np->mac_in_use) >+ rx_ctrl &= ~NVREG_RCVCTL_START; >+ else >+ rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; >+ writel(rx_ctrl, base + NvRegReceiverControl); > reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, > NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, > KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); > > udelay(NV_RXSTOP_DELAY2); >+ if (!np->mac_in_use) > writel(0, base + NvRegLinkSpeed); > } > > static void nv_start_tx(struct net_device *dev) > { >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); >+ u32 tx_ctrl = readl(base + NvRegTransmitterControl); > > dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); >- writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); >+ tx_ctrl |= NVREG_XMITCTL_START; >+ if (np->mac_in_use) >+ tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; >+ writel(tx_ctrl, base + NvRegTransmitterControl); > pci_push(base); > } > > static void nv_stop_tx(struct net_device *dev) > { >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); >+ u32 tx_ctrl = readl(base + NvRegTransmitterControl); > > dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); >- writel(0, base + NvRegTransmitterControl); >+ if (!np->mac_in_use) >+ tx_ctrl &= ~NVREG_XMITCTL_START; >+ else >+ tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; >+ writel(tx_ctrl, base + NvRegTransmitterControl); > reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, > NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, > KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); > > udelay(NV_TXSTOP_DELAY2); >- writel(0, base + NvRegUnknownTransmitterReg); >+ if (!np->mac_in_use) >+ writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); > } > > static void nv_txrx_reset(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > > dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); >@@ -1196,7 +1634,7 @@ > > static void nv_mac_reset(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > > dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); >@@ -1213,74 +1651,81 @@ > } > > /* >- * nv_get_stats: dev->get_stats function >- * Get latest stats value from the nic. >- * Called with read_lock(&dev_base_lock) held for read - >- * only synchronized against unregister_netdevice. >- */ >-static struct net_device_stats *nv_get_stats(struct net_device *dev) >-{ >- struct fe_priv *np = netdev_priv(dev); >- >- /* It seems that the nic always generates interrupts and doesn't >- * accumulate errors internally. Thus the current values in np->stats >- * are already up to date. >- */ >- return &np->stats; >-} >- >-/* > * nv_alloc_rx: fill rx ring entries. > * Return 1 if the allocations for the skbs failed and the > * rx engine is without Available descriptors > */ >-static int nv_alloc_rx(struct net_device *dev) >+static inline int nv_alloc_rx(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >- unsigned int refill_rx = np->refill_rx; >- int nr; >- >- while (np->cur_rx != refill_rx) { >- struct sk_buff *skb; >- >- nr = refill_rx % np->rx_ring_size; >- if (np->rx_skbuff[nr] == NULL) { >- >- skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); >- if (!skb) >- break; >+ struct fe_priv *np = get_nvpriv(dev); >+ struct ring_desc* less_rx; >+ struct sk_buff *skb; > >+ less_rx = np->get_rx.orig; >+ if (less_rx-- == np->first_rx.orig) >+ less_rx = np->last_rx.orig; >+ >+ while (np->put_rx.orig != less_rx) { >+ skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); >+ if (skb) { > skb->dev = dev; >- np->rx_skbuff[nr] = skb; >- } else { >- skb = np->rx_skbuff[nr]; >- } >- np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, >- skb->end-skb->data, PCI_DMA_FROMDEVICE); >- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { >- np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); >+ np->put_rx_ctx->skb = skb; >+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, >+ skb->end-skb->data, PCI_DMA_FROMDEVICE); >+ np->put_rx_ctx->dma_len = skb->end-skb->data; >+ np->put_rx.orig->PacketBuffer = cpu_to_le32(np->put_rx_ctx->dma); > wmb(); >- np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); >+ np->put_rx.orig->FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); >+ if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) >+ np->put_rx.orig = np->first_rx.orig; >+ if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) >+ np->put_rx_ctx = np->first_rx_ctx; > } else { >- np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; >- np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; >- wmb(); >- np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); >+ return 1; >+ } >+ } >+ return 0; >+} >+ >+static inline int nv_alloc_rx_optimized(struct net_device *dev) >+{ >+ struct fe_priv *np = get_nvpriv(dev); >+ struct ring_desc_ex* less_rx; >+ struct sk_buff *skb; >+ >+ less_rx = np->get_rx.ex; >+ if (less_rx-- == np->first_rx.ex) >+ less_rx = np->last_rx.ex; >+ >+ while (np->put_rx.ex != less_rx) { >+ skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); >+ if (skb) { >+ skb->dev = dev; >+ np->put_rx_ctx->skb = skb; >+ np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, >+ skb->end-skb->data, PCI_DMA_FROMDEVICE); >+ np->put_rx_ctx->dma_len = skb->end-skb->data; >+ np->put_rx.ex->PacketBufferHigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; >+ np->put_rx.ex->PacketBufferLow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; >+ wmb(); >+ np->put_rx.ex->FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); >+ if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) >+ np->put_rx.ex = np->first_rx.ex; >+ if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) >+ np->put_rx_ctx = np->first_rx_ctx; >+ } else { >+ return 1; > } >- dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", >- dev->name, refill_rx); >- refill_rx++; > } >- np->refill_rx = refill_rx; >- if (np->cur_rx - refill_rx == np->rx_ring_size) >- return 1; > return 0; >+ > } > > static void nv_do_rx_refill(unsigned long data) > { > struct net_device *dev = (struct net_device *) data; >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); >+ int retcode; > > if (!using_multi_irqs(dev)) { > if (np->msi_flags & NV_MSI_X_ENABLED) >@@ -1290,7 +1735,12 @@ > } else { > disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); > } >- if (nv_alloc_rx(dev)) { >+ >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >+ retcode = nv_alloc_rx(dev); >+ else >+ retcode = nv_alloc_rx_optimized(dev); >+ if (retcode) { > spin_lock_irq(&np->lock); > if (!np->in_shutdown) > mod_timer(&np->oom_kick, jiffies + OOM_REFILL); >@@ -1306,60 +1756,89 @@ > } > } > >-static void nv_init_rx(struct net_device *dev) >+static void nv_init_rx(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > int i; > >- np->cur_rx = np->rx_ring_size; >- np->refill_rx = 0; >- for (i = 0; i < np->rx_ring_size; i++) >+ np->get_rx = np->put_rx = np->first_rx = np->rx_ring; > if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >+ np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; >+ else >+ np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; >+ np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; >+ np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; >+ >+ for (i = 0; i < np->rx_ring_size; i++) { >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { > np->rx_ring.orig[i].FlagLen = 0; >- else >+ np->rx_ring.orig[i].PacketBuffer = 0; >+ } else { > np->rx_ring.ex[i].FlagLen = 0; >+ np->rx_ring.ex[i].TxVlan = 0; >+ np->rx_ring.ex[i].PacketBufferHigh = 0; >+ np->rx_ring.ex[i].PacketBufferLow = 0; >+ } >+ np->rx_skb[i].skb = NULL; >+ np->rx_skb[i].dma = 0; >+ } > } > > static void nv_init_tx(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > int i; > >- np->next_tx = np->nic_tx = 0; >+ np->get_tx = np->put_tx = np->first_tx = np->tx_ring; >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >+ np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; >+ else >+ np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; >+ np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; >+ np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; >+ > for (i = 0; i < np->tx_ring_size; i++) { >- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { > np->tx_ring.orig[i].FlagLen = 0; >- else >+ np->tx_ring.orig[i].PacketBuffer = 0; >+ } else { > np->tx_ring.ex[i].FlagLen = 0; >- np->tx_skbuff[i] = NULL; >- np->tx_dma[i] = 0; >+ np->tx_ring.ex[i].TxVlan = 0; >+ np->tx_ring.ex[i].PacketBufferHigh = 0; >+ np->tx_ring.ex[i].PacketBufferLow = 0; >+ } >+ np->tx_skb[i].skb = NULL; >+ np->tx_skb[i].dma = 0; > } > } > > static int nv_init_ring(struct net_device *dev) > { >+ struct fe_priv *np = get_nvpriv(dev); > nv_init_tx(dev); > nv_init_rx(dev); >- return nv_alloc_rx(dev); >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >+ return nv_alloc_rx(dev); >+ else >+ return nv_alloc_rx_optimized(dev); > } > > static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > > dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", > dev->name, skbnr); > >- if (np->tx_dma[skbnr]) { >- pci_unmap_page(np->pci_dev, np->tx_dma[skbnr], >- np->tx_dma_len[skbnr], >+ if (np->tx_skb[skbnr].dma) { >+ pci_unmap_page(np->pci_dev, np->tx_skb[skbnr].dma, >+ np->tx_skb[skbnr].dma_len, > PCI_DMA_TODEVICE); >- np->tx_dma[skbnr] = 0; >+ np->tx_skb[skbnr].dma = 0; > } >- >- if (np->tx_skbuff[skbnr]) { >- dev_kfree_skb_any(np->tx_skbuff[skbnr]); >- np->tx_skbuff[skbnr] = NULL; >+ if (np->tx_skb[skbnr].skb) { >+ dev_kfree_skb_any(np->tx_skb[skbnr].skb); >+ np->tx_skb[skbnr].skb = NULL; > return 1; > } else { > return 0; >@@ -1368,14 +1847,19 @@ > > static void nv_drain_tx(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > unsigned int i; >- >+ > for (i = 0; i < np->tx_ring_size; i++) { >- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { > np->tx_ring.orig[i].FlagLen = 0; >- else >+ np->tx_ring.orig[i].PacketBuffer = 0; >+ } else { > np->tx_ring.ex[i].FlagLen = 0; >+ np->tx_ring.ex[i].TxVlan = 0; >+ np->tx_ring.ex[i].PacketBufferHigh = 0; >+ np->tx_ring.ex[i].PacketBufferLow = 0; >+ } > if (nv_release_txskb(dev, i)) > np->stats.tx_dropped++; > } >@@ -1383,20 +1867,25 @@ > > static void nv_drain_rx(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > int i; > for (i = 0; i < np->rx_ring_size; i++) { >- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { > np->rx_ring.orig[i].FlagLen = 0; >- else >+ np->rx_ring.orig[i].PacketBuffer = 0; >+ } else { > np->rx_ring.ex[i].FlagLen = 0; >+ np->rx_ring.ex[i].TxVlan = 0; >+ np->rx_ring.ex[i].PacketBufferHigh = 0; >+ np->rx_ring.ex[i].PacketBufferLow = 0; >+ } > wmb(); >- if (np->rx_skbuff[i]) { >- pci_unmap_single(np->pci_dev, np->rx_dma[i], >- np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, >+ if (np->rx_skb[i].skb) { >+ pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, >+ np->rx_skb[i].skb->end-np->rx_skb[i].skb->data, > PCI_DMA_FROMDEVICE); >- dev_kfree_skb(np->rx_skbuff[i]); >- np->rx_skbuff[i] = NULL; >+ dev_kfree_skb(np->rx_skb[i].skb); >+ np->rx_skb[i].skb = NULL; > } > } > } >@@ -1409,57 +1898,55 @@ > > /* > * nv_start_xmit: dev->hard_start_xmit function >- * Called with netif_tx_lock held. >+ * Called with dev->xmit_lock held. > */ > static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u32 tx_flags = 0; > u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); > unsigned int fragments = skb_shinfo(skb)->nr_frags; >- unsigned int nr = (np->next_tx - 1) % np->tx_ring_size; >- unsigned int start_nr = np->next_tx % np->tx_ring_size; > unsigned int i; > u32 offset = 0; > u32 bcnt; > u32 size = skb->len-skb->data_len; > u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); >- u32 tx_flags_vlan = 0; >+ u32 empty_slots; >+ struct ring_desc* put_tx; >+ struct ring_desc* start_tx; >+ struct ring_desc* prev_tx; >+ struct nv_skb_map* prev_tx_ctx; > >+ //dprintk(KERN_DEBUG "%s: nv_start_xmit \n", dev->name); > /* add fragments to entries count */ > for (i = 0; i < fragments; i++) { > entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + > ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); > } > >- spin_lock_irq(&np->lock); >+ empty_slots = (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); >+ if (likely(empty_slots > entries)) { > >- if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { >- spin_unlock_irq(&np->lock); >- netif_stop_queue(dev); >- return NETDEV_TX_BUSY; >- } >+ start_tx = put_tx = np->put_tx.orig; > > /* setup the header buffer */ > do { >+ prev_tx = put_tx; >+ prev_tx_ctx = np->put_tx_ctx; > bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; >- nr = (nr + 1) % np->tx_ring_size; >- >- np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, >+ np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, > PCI_DMA_TODEVICE); >- np->tx_dma_len[nr] = bcnt; >+ np->put_tx_ctx->dma_len = bcnt; >+ put_tx->PacketBuffer = cpu_to_le32(np->put_tx_ctx->dma); >+ put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags); > >- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { >- np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); >- np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); >- } else { >- np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; >- np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; >- np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); >- } > tx_flags = np->tx_flags; > offset += bcnt; > size -= bcnt; >+ if (unlikely(put_tx++ == np->last_tx.orig)) >+ put_tx = np->first_tx.orig; >+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) >+ np->put_tx_ctx = np->first_tx_ctx; > } while(size); > > /* setup the fragments */ >@@ -1469,34 +1956,30 @@ > offset = 0; > > do { >+ prev_tx = put_tx; >+ prev_tx_ctx = np->put_tx_ctx; > bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; >- nr = (nr + 1) % np->tx_ring_size; > >- np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, >- PCI_DMA_TODEVICE); >- np->tx_dma_len[nr] = bcnt; >+ np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, >+ PCI_DMA_TODEVICE); >+ np->put_tx_ctx->dma_len = bcnt; > >- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { >- np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); >- np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); >- } else { >- np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; >- np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; >- np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); >- } >+ put_tx->PacketBuffer = cpu_to_le32(np->put_tx_ctx->dma); >+ put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags); > offset += bcnt; > size -= bcnt; >+ if (unlikely(put_tx++ == np->last_tx.orig)) >+ put_tx = np->first_tx.orig; >+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) >+ np->put_tx_ctx = np->first_tx_ctx; > } while (size); > } > > /* set last fragment flag */ >- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { >- np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); >- } else { >- np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); >- } >+ prev_tx->FlagLen |= cpu_to_le32(tx_flags_extra); > >- np->tx_skbuff[nr] = skb; >+ /* save skb in this slot's context area */ >+ prev_tx_ctx->skb = skb; > > #ifdef NETIF_F_TSO > if (skb_is_gso(skb)) >@@ -1505,38 +1988,132 @@ > #endif > tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); > >- /* vlan tag */ >- if (np->vlangrp && vlan_tx_tag_present(skb)) { >- tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); >- } >+ start_tx->FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); >+ np->put_tx.orig = put_tx; > >- /* set tx flags */ >- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { >- np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); >+ dev->trans_start = jiffies; >+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); >+ return NETDEV_TX_OK; > } else { >- np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); >- np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); >+ netif_stop_queue(dev); >+ np->stop_tx = 1; >+ return NETDEV_TX_BUSY; > } >+} > >- dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", >- dev->name, np->next_tx, entries, tx_flags_extra); >- { >- int j; >- for (j=0; j<64; j++) { >- if ((j%16) == 0) >- dprintk("\n%03x:", j); >- dprintk(" %02x", ((unsigned char*)skb->data)[j]); >- } >- dprintk("\n"); >+static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) >+{ >+ struct fe_priv *np = get_nvpriv(dev); >+ u32 tx_flags = 0; >+ u32 tx_flags_extra; >+ unsigned int fragments = skb_shinfo(skb)->nr_frags; >+ unsigned int i; >+ u32 offset = 0; >+ u32 bcnt; >+ u32 size = skb->len-skb->data_len; >+ u32 empty_slots; >+ struct ring_desc_ex* put_tx; >+ struct ring_desc_ex* start_tx; >+ struct ring_desc_ex* prev_tx; >+ struct nv_skb_map* prev_tx_ctx; >+ >+ u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); >+ >+ //dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized \n", dev->name); >+ /* add fragments to entries count */ >+ for (i = 0; i < fragments; i++) { >+ entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + >+ ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); > } > >- np->next_tx += entries; >+ empty_slots = (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); >+ if (likely(empty_slots > entries)) { >+ >+ start_tx = put_tx = np->put_tx.ex; >+ >+ /* setup the header buffer */ >+ do { >+ prev_tx = put_tx; >+ prev_tx_ctx = np->put_tx_ctx; >+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; >+ np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, >+ PCI_DMA_TODEVICE); >+ np->put_tx_ctx->dma_len = bcnt; >+ put_tx->PacketBufferHigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; >+ put_tx->PacketBufferLow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; >+ put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags); >+ >+ tx_flags = NV_TX2_VALID; >+ offset += bcnt; >+ size -= bcnt; >+ if (unlikely(put_tx++ == np->last_tx.ex)) >+ put_tx = np->first_tx.ex; >+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) >+ np->put_tx_ctx = np->first_tx_ctx; >+ } while(size); >+ /* setup the fragments */ >+ for (i = 0; i < fragments; i++) { >+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; >+ u32 size = frag->size; >+ offset = 0; >+ >+ do { >+ prev_tx = put_tx; >+ prev_tx_ctx = np->put_tx_ctx; >+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; >+ >+ np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, >+ PCI_DMA_TODEVICE); >+ np->put_tx_ctx->dma_len = bcnt; >+ >+ put_tx->PacketBufferHigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; >+ put_tx->PacketBufferLow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; >+ put_tx->FlagLen = cpu_to_le32((bcnt-1) | tx_flags); >+ offset += bcnt; >+ size -= bcnt; >+ if (unlikely(put_tx++ == np->last_tx.ex)) >+ put_tx = np->first_tx.ex; >+ if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) >+ np->put_tx_ctx = np->first_tx_ctx; >+ } while (size); >+ } >+ >+ /* set last fragment flag */ >+ prev_tx->FlagLen |= cpu_to_le32(NV_TX2_LASTPACKET); >+ >+ /* save skb in this slot's context area */ >+ prev_tx_ctx->skb = skb; >+ >+#ifdef NETIF_F_TSO >+ if (skb_is_gso(skb)) >+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); >+ else >+#endif >+ tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); >+ >+ /* vlan tag */ >+ if (likely(!np->vlangrp)) { >+ start_tx->TxVlan = 0; >+ } else { >+ if (vlan_tx_tag_present(skb)) >+ start_tx->TxVlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb)); >+ else >+ start_tx->TxVlan = 0; >+ } >+ >+ /* set tx flags */ >+ start_tx->FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); >+ np->put_tx.ex = put_tx; > > dev->trans_start = jiffies; >- spin_unlock_irq(&np->lock); > writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); >- pci_push(get_hwbase(dev)); > return NETDEV_TX_OK; >+ >+ } else { >+ netif_stop_queue(dev); >+ np->stop_tx = 1; >+ return NETDEV_TX_BUSY; >+ } > } > > /* >@@ -1544,30 +2121,26 @@ > * > * Caller must own np->lock. > */ >-static void nv_tx_done(struct net_device *dev) >+static inline void nv_tx_done(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u32 Flags; >- unsigned int i; >- struct sk_buff *skb; >+ struct ring_desc* orig_get_tx = np->get_tx.orig; >+ struct ring_desc* put_tx = np->put_tx.orig; > >- while (np->nic_tx != np->next_tx) { >- i = np->nic_tx % np->tx_ring_size; >+ //dprintk(KERN_DEBUG "%s: nv_tx_done \n", dev->name); >+ while ((np->get_tx.orig != put_tx) && >+ !((Flags = le32_to_cpu(np->get_tx.orig->FlagLen)) & NV_TX_VALID)) { >+ dprintk(KERN_DEBUG "%s: nv_tx_done:NVLAN tx done\n", dev->name); > >- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >- Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); >- else >- Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); >+ pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, >+ np->get_tx_ctx->dma_len, >+ PCI_DMA_TODEVICE); >+ np->get_tx_ctx->dma = 0; > >- dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", >- dev->name, np->nic_tx, Flags); >- if (Flags & NV_TX_VALID) >- break; > if (np->desc_ver == DESC_VER_1) { > if (Flags & NV_TX_LASTPACKET) { >- skb = np->tx_skbuff[i]; >- if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| >- NV_TX_UNDERFLOW|NV_TX_ERROR)) { >+ if (Flags & NV_TX_ERROR) { > if (Flags & NV_TX_UNDERFLOW) > np->stats.tx_fifo_errors++; > if (Flags & NV_TX_CARRIERLOST) >@@ -1575,14 +2148,15 @@ > np->stats.tx_errors++; > } else { > np->stats.tx_packets++; >- np->stats.tx_bytes += skb->len; >+ np->stats.tx_bytes += np->get_tx_ctx->skb->len; > } >+ dev_kfree_skb_any(np->get_tx_ctx->skb); >+ np->get_tx_ctx->skb = NULL; >+ > } > } else { > if (Flags & NV_TX2_LASTPACKET) { >- skb = np->tx_skbuff[i]; >- if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| >- NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { >+ if (Flags & NV_TX2_ERROR) { > if (Flags & NV_TX2_UNDERFLOW) > np->stats.tx_fifo_errors++; > if (Flags & NV_TX2_CARRIERLOST) >@@ -1590,27 +2164,74 @@ > np->stats.tx_errors++; > } else { > np->stats.tx_packets++; >- np->stats.tx_bytes += skb->len; >- } >+ np->stats.tx_bytes += np->get_tx_ctx->skb->len; >+ } >+ dev_kfree_skb_any(np->get_tx_ctx->skb); >+ np->get_tx_ctx->skb = NULL; >+ } >+ } >+ >+ if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) >+ np->get_tx.orig = np->first_tx.orig; >+ if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) >+ np->get_tx_ctx = np->first_tx_ctx; >+ } >+ if (unlikely((np->stop_tx == 1) && (np->get_tx.orig != orig_get_tx))) { >+ np->stop_tx = 0; >+ netif_wake_queue(dev); >+ } >+} >+ >+static inline void nv_tx_done_optimized(struct net_device *dev, int max_work) >+{ >+ struct fe_priv *np = get_nvpriv(dev); >+ u32 Flags; >+ struct ring_desc_ex* orig_get_tx = np->get_tx.ex; >+ struct ring_desc_ex* put_tx = np->put_tx.ex; >+ >+ //dprintk(KERN_DEBUG "%s: nv_tx_done_optimized \n", dev->name); >+ while ((np->get_tx.ex != put_tx) && >+ !((Flags = le32_to_cpu(np->get_tx.ex->FlagLen)) & NV_TX_VALID) && >+ (max_work-- > 0)) { >+ dprintk(KERN_DEBUG "%s: nv_tx_done_optimized:NVLAN tx done\n", dev->name); >+ >+ pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, >+ np->get_tx_ctx->dma_len, >+ PCI_DMA_TODEVICE); >+ np->get_tx_ctx->dma = 0; >+ >+ if (Flags & NV_TX2_LASTPACKET) { >+ if (!(Flags & NV_TX2_ERROR)) { >+ np->stats.tx_packets++; > } >+ dev_kfree_skb_any(np->get_tx_ctx->skb); >+ np->get_tx_ctx->skb = NULL; > } >- nv_release_txskb(dev, i); >- np->nic_tx++; >+ >+ if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) >+ np->get_tx.ex = np->first_tx.ex; >+ if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) >+ np->get_tx_ctx = np->first_tx_ctx; > } >- if (np->next_tx - np->nic_tx < np->tx_limit_start) >+ if (unlikely((np->stop_tx == 1) && (np->get_tx.ex != orig_get_tx))) { >+ np->stop_tx = 0; > netif_wake_queue(dev); >+ } > } > > /* > * nv_tx_timeout: dev->tx_timeout function >- * Called with netif_tx_lock held. >+ * Called with dev->xmit_lock held. > */ > static void nv_tx_timeout(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > u32 status; > >+ if (!netif_running(dev)) >+ return; >+ > if (np->msi_flags & NV_MSI_X_ENABLED) > status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; > else >@@ -1621,9 +2242,15 @@ > { > int i; > >- printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", >- dev->name, (unsigned long)np->ring_addr, >- np->next_tx, np->nic_tx); >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { >+ printk(KERN_INFO "%s: Ring at %lx: get %lx put %lx\n", >+ dev->name, (unsigned long)np->tx_ring.orig, >+ (unsigned long)np->get_tx.orig, (unsigned long)np->put_tx.orig); >+ } else { >+ printk(KERN_INFO "%s: Ring at %lx: get %lx put %lx\n", >+ dev->name, (unsigned long)np->tx_ring.ex, >+ (unsigned long)np->get_tx.ex, (unsigned long)np->put_tx.ex); >+ } > printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); > for (i=0;i<=np->register_size;i+= 32) { > printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", >@@ -1637,7 +2264,7 @@ > for (i=0;i<np->tx_ring_size;i+= 4) { > if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { > printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", >- i, >+ i, > le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), > le32_to_cpu(np->tx_ring.orig[i].FlagLen), > le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), >@@ -1648,7 +2275,7 @@ > le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); > } else { > printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", >- i, >+ i, > le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), > le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), > le32_to_cpu(np->tx_ring.ex[i].FlagLen), >@@ -1665,19 +2292,27 @@ > } > } > >+ nv_disable_irq(dev); > spin_lock_irq(&np->lock); > > /* 1) stop tx engine */ > nv_stop_tx(dev); > > /* 2) check that the packets were not sent already: */ >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) > nv_tx_done(dev); >+ else >+ nv_tx_done_optimized(dev, np->tx_ring_size); > > /* 3) if there are dead entries: clear everything */ >- if (np->next_tx != np->nic_tx) { >+ if (np->get_tx_ctx != np->put_tx_ctx) { > printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); > nv_drain_tx(dev); >- np->next_tx = np->nic_tx = 0; >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >+ np->get_tx.orig = np->put_tx.orig = np->first_tx.orig; >+ else >+ np->get_tx.ex = np->put_tx.ex = np->first_tx.ex; >+ np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx; > setup_hw_rings(dev, NV_SETUP_TX_RING); > netif_wake_queue(dev); > } >@@ -1685,6 +2320,7 @@ > /* 4) restart tx engine */ > nv_start_tx(dev); > spin_unlock_irq(&np->lock); >+ nv_enable_irq(dev); > } > > /* >@@ -1740,43 +2376,23 @@ > } > } > >-static void nv_rx_process(struct net_device *dev) >+static inline void nv_rx_process(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u32 Flags; >- u32 vlanflags = 0; >- >- for (;;) { >- struct sk_buff *skb; >- int len; >- int i; >- if (np->cur_rx - np->refill_rx >= np->rx_ring_size) >- break; /* we scanned the whole ring - do not continue */ >- >- i = np->cur_rx % np->rx_ring_size; >- if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { >- Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); >- len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); >- } else { >- Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); >- len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); >- vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); >- } >- >- dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", >- dev->name, np->cur_rx, Flags); >+ struct sk_buff *skb; >+ int len; > >- if (Flags & NV_RX_AVAIL) >- break; /* still owned by hardware, */ >+ //dprintk(KERN_DEBUG "%s: nv_rx_process \n", dev->name); >+ while((np->get_rx.orig != np->put_rx.orig) && >+ !((Flags = le32_to_cpu(np->get_rx.orig->FlagLen)) & NV_RX_AVAIL)) { >+ >+ pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, >+ np->get_rx_ctx->dma_len, >+ PCI_DMA_FROMDEVICE); > >- /* >- * the packet is for us - immediately tear down the pci mapping. >- * TODO: check if a prefetch of the first cacheline improves >- * the performance. >- */ >- pci_unmap_single(np->pci_dev, np->rx_dma[i], >- np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, >- PCI_DMA_FROMDEVICE); >+ skb = np->get_rx_ctx->skb; >+ np->get_rx_ctx->skb = NULL; > > { > int j; >@@ -1784,118 +2400,198 @@ > for (j=0; j<64; j++) { > if ((j%16) == 0) > dprintk("\n%03x:", j); >- dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); >+ dprintk(" %02x", ((unsigned char*)skb->data)[j]); > } > dprintk("\n"); > } >- /* look at what we actually got: */ >+ > if (np->desc_ver == DESC_VER_1) { >- if (!(Flags & NV_RX_DESCRIPTORVALID)) >- goto next_pkt; > >- if (Flags & NV_RX_ERROR) { >- if (Flags & NV_RX_MISSEDFRAME) { >- np->stats.rx_missed_errors++; >- np->stats.rx_errors++; >- goto next_pkt; >- } >- if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { >- np->stats.rx_errors++; >- goto next_pkt; >- } >- if (Flags & NV_RX_CRCERR) { >- np->stats.rx_crc_errors++; >- np->stats.rx_errors++; >- goto next_pkt; >- } >- if (Flags & NV_RX_OVERFLOW) { >- np->stats.rx_over_errors++; >- np->stats.rx_errors++; >- goto next_pkt; >+ if (likely(Flags & NV_RX_DESCRIPTORVALID)) { >+ len = Flags & LEN_MASK_V1; >+ if (unlikely(Flags & NV_RX_ERROR)) { >+ if (Flags & NV_RX_ERROR4) { >+ len = nv_getlen(dev, skb->data, len); >+ if (len < 0) { >+ np->stats.rx_errors++; >+ dev_kfree_skb(skb); >+ goto next_pkt; >+ } >+ } >+ /* framing errors are soft errors */ >+ else if (Flags & NV_RX_FRAMINGERR) { >+ if (Flags & NV_RX_SUBSTRACT1) { >+ len--; >+ } >+ } >+ /* the rest are hard errors */ >+ else { >+ if (Flags & NV_RX_MISSEDFRAME) >+ np->stats.rx_missed_errors++; >+ if (Flags & NV_RX_CRCERR) >+ np->stats.rx_crc_errors++; >+ if (Flags & NV_RX_OVERFLOW) >+ np->stats.rx_over_errors++; >+ np->stats.rx_errors++; >+ dev_kfree_skb(skb); >+ goto next_pkt; >+ } > } >- if (Flags & NV_RX_ERROR4) { >- len = nv_getlen(dev, np->rx_skbuff[i]->data, len); >- if (len < 0) { >+ } else { >+ dev_kfree_skb(skb); >+ goto next_pkt; >+ } >+ } else { >+ if (likely(Flags & NV_RX2_DESCRIPTORVALID)) { >+ len = Flags & LEN_MASK_V2; >+ if (unlikely(Flags & NV_RX2_ERROR)) { >+ if (Flags & NV_RX2_ERROR4) { >+ len = nv_getlen(dev, skb->data, len); >+ if (len < 0) { >+ np->stats.rx_errors++; >+ dev_kfree_skb(skb); >+ goto next_pkt; >+ } >+ } >+ /* framing errors are soft errors */ >+ else if (Flags & NV_RX2_FRAMINGERR) { >+ if (Flags & NV_RX2_SUBSTRACT1) { >+ len--; >+ } >+ } >+ /* the rest are hard errors */ >+ else { >+ if (Flags & NV_RX2_CRCERR) >+ np->stats.rx_crc_errors++; >+ if (Flags & NV_RX2_OVERFLOW) >+ np->stats.rx_over_errors++; > np->stats.rx_errors++; >+ dev_kfree_skb(skb); > goto next_pkt; > } > } >- /* framing errors are soft errors. */ >- if (Flags & NV_RX_FRAMINGERR) { >- if (Flags & NV_RX_SUBSTRACT1) { >- len--; >+ if ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { >+ skb->ip_summed = CHECKSUM_UNNECESSARY; >+ } else { >+ if ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || >+ (Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { >+ skb->ip_summed = CHECKSUM_UNNECESSARY; > } > } >- } >- } else { >- if (!(Flags & NV_RX2_DESCRIPTORVALID)) >+ } else { >+ dev_kfree_skb(skb); > goto next_pkt; >+ } >+ } > >- if (Flags & NV_RX2_ERROR) { >- if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { >- np->stats.rx_errors++; >- goto next_pkt; >- } >- if (Flags & NV_RX2_CRCERR) { >- np->stats.rx_crc_errors++; >- np->stats.rx_errors++; >- goto next_pkt; >- } >- if (Flags & NV_RX2_OVERFLOW) { >- np->stats.rx_over_errors++; >- np->stats.rx_errors++; >- goto next_pkt; >- } >+ /* got a valid packet - forward it to the network core */ >+ dprintk(KERN_DEBUG "%s: nv_rx_process:NVLAN rx done\n", dev->name); >+ skb_put(skb, len); >+ skb->protocol = eth_type_trans(skb, dev); >+ netif_rx(skb); >+ dev->last_rx = jiffies; >+ np->stats.rx_packets++; >+ np->stats.rx_bytes += len; >+next_pkt: >+ if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) >+ np->get_rx.orig = np->first_rx.orig; >+ if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) >+ np->get_rx_ctx = np->first_rx_ctx; >+ } >+} >+ >+static inline int nv_rx_process_optimized(struct net_device *dev, int max_work) >+{ >+ struct fe_priv *np = get_nvpriv(dev); >+ u32 Flags; >+ u32 vlanflags = 0; >+ u32 rx_processed_cnt = 0; >+ struct sk_buff *skb; >+ int len; >+ >+// dprintk(KERN_DEBUG "%s: nv_rx_process_optimized \n", dev->name); >+ while((np->get_rx.ex != np->put_rx.ex) && >+ !((Flags = le32_to_cpu(np->get_rx.ex->FlagLen)) & NV_RX2_AVAIL) && >+ (rx_processed_cnt++ < max_work)) { >+ >+ pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, >+ np->get_rx_ctx->dma_len, >+ PCI_DMA_FROMDEVICE); >+ >+ skb = np->get_rx_ctx->skb; >+ np->get_rx_ctx->skb = NULL; >+ >+ /* look at what we actually got: */ >+ if (likely(Flags & NV_RX2_DESCRIPTORVALID)) { >+ len = Flags & LEN_MASK_V2; >+ if (unlikely(Flags & NV_RX2_ERROR)) { > if (Flags & NV_RX2_ERROR4) { >- len = nv_getlen(dev, np->rx_skbuff[i]->data, len); >+ len = nv_getlen(dev, skb->data, len); > if (len < 0) { >- np->stats.rx_errors++; >+ np->rx_len_errors++; >+ dev_kfree_skb(skb); > goto next_pkt; > } > } > /* framing errors are soft errors */ >- if (Flags & NV_RX2_FRAMINGERR) { >+ else if (Flags & NV_RX2_FRAMINGERR) { > if (Flags & NV_RX2_SUBSTRACT1) { > len--; > } > } >+ /* the rest are hard errors */ >+ else { >+ dev_kfree_skb(skb); >+ goto next_pkt; >+ } > } >- if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { >- Flags &= NV_RX2_CHECKSUMMASK; >- if (Flags == NV_RX2_CHECKSUMOK1 || >- Flags == NV_RX2_CHECKSUMOK2 || >- Flags == NV_RX2_CHECKSUMOK3) { >- dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); >- np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; >+ >+ if (likely(np->rx_csum)) { >+ if (likely((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)) { >+ /*ip and tcp */ >+ skb->ip_summed = CHECKSUM_UNNECESSARY; > } else { >- dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); >+ if ((Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || >+ (Flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { >+ skb->ip_summed = CHECKSUM_UNNECESSARY; >+ } > } > } >- } >- /* got a valid packet - forward it to the network core */ >- skb = np->rx_skbuff[i]; >- np->rx_skbuff[i] = NULL; >+ dprintk(KERN_DEBUG "%s: nv_rx_process_optimized:NVLAN rx done\n", dev->name); > >- skb_put(skb, len); >- skb->protocol = eth_type_trans(skb, dev); >- dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", >- dev->name, np->cur_rx, len, skb->protocol); >- if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { >- vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); >+ /* got a valid packet - forward it to the network core */ >+ skb_put(skb, len); >+ skb->protocol = eth_type_trans(skb, dev); >+ prefetch(skb->data); >+ >+ if (likely(!np->vlangrp)) { >+ netif_rx(skb); >+ } else { >+ vlanflags = le32_to_cpu(np->get_rx.ex->PacketBufferLow); >+ if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) >+ vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); >+ else >+ netif_rx(skb); >+ } >+ >+ dev->last_rx = jiffies; >+ np->stats.rx_packets++; >+ np->stats.rx_bytes += len; > } else { >- netif_rx(skb); >+ dev_kfree_skb(skb); > } >- dev->last_rx = jiffies; >- np->stats.rx_packets++; >- np->stats.rx_bytes += len; > next_pkt: >- np->cur_rx++; >+ if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) >+ np->get_rx.ex = np->first_rx.ex; >+ if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) >+ np->get_rx_ctx = np->first_rx_ctx; > } >+ return rx_processed_cnt; > } > > static void set_bufsize(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > > if (dev->mtu <= ETH_DATA_LEN) > np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; >@@ -1909,7 +2605,7 @@ > */ > static int nv_change_mtu(struct net_device *dev, int new_mtu) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > int old_mtu; > > if (new_mtu < 64 || new_mtu > np->pkt_limit) >@@ -1987,12 +2683,13 @@ > */ > static int nv_set_mac_address(struct net_device *dev, void *addr) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > struct sockaddr *macaddr = (struct sockaddr*)addr; > > if(!is_valid_ether_addr(macaddr->sa_data)) > return -EADDRNOTAVAIL; > >+ dprintk(KERN_DEBUG "%s: nv_set_mac_address \n", dev->name); > /* synchronized against open : rtnl_lock() held by caller */ > memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); > >@@ -2018,11 +2715,11 @@ > > /* > * nv_set_multicast: dev->set_multicast function >- * Called with netif_tx_lock held. >+ * Called with dev->xmit_lock held. > */ > static void nv_set_multicast(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > u32 addr[2]; > u32 mask[2]; >@@ -2032,7 +2729,7 @@ > memset(mask, 0, sizeof(mask)); > > if (dev->flags & IFF_PROMISC) { >- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); >+ dprintk(KERN_DEBUG "%s: Promiscuous mode enabled.\n", dev->name); > pff |= NVREG_PFF_PROMISC; > } else { > pff |= NVREG_PFF_MYADDR; >@@ -2082,7 +2779,7 @@ > > static void nv_update_pause(struct net_device *dev, u32 pause_flags) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > > np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); >@@ -2104,7 +2801,7 @@ > np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; > } else { > writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); >- writel(regmisc, base + NvRegMisc1); >+ writel(regmisc, base + NvRegMisc1); > } > } > } >@@ -2122,7 +2819,7 @@ > */ > static int nv_update_linkspeed(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > int adv = 0; > int lpa = 0; >@@ -2148,7 +2845,7 @@ > goto set_speed; > } > >- if (np->autoneg == 0) { >+ if (np->autoneg == AUTONEG_DISABLE) { > dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", > dev->name, np->fixed_mode); > if (np->fixed_mode & LPA_100FULL) { >@@ -2181,7 +2878,6 @@ > lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); > dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", > dev->name, adv, lpa); >- > retval = 1; > if (np->gigabit == PHY_GIGABIT) { > control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); >@@ -2268,7 +2964,6 @@ > txreg = NVREG_TX_WM_DESC2_3_DEFAULT; > } > writel(txreg, base + NvRegTxWatermark); >- > writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), > base + NvRegMisc1); > pci_push(base); >@@ -2306,7 +3001,7 @@ > if (lpa_pause == LPA_PAUSE_ASYM) > { > pause_flags |= NV_PAUSEFRAME_RX_ENABLE; >- } >+ } > break; > } > } else { >@@ -2352,7 +3047,7 @@ > static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) > { > struct net_device *dev = (struct net_device *) data; >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > u32 events; > int i; >@@ -2372,10 +3067,8 @@ > if (!(events & np->irqmask)) > break; > >- spin_lock(&np->lock); > nv_tx_done(dev); >- spin_unlock(&np->lock); >- >+ > nv_rx_process(dev); > if (nv_alloc_rx(dev)) { > spin_lock(&np->lock); >@@ -2383,7 +3076,7 @@ > mod_timer(&np->oom_kick, jiffies + OOM_REFILL); > spin_unlock(&np->lock); > } >- >+ > if (events & NVREG_IRQ_LINK) { > spin_lock(&np->lock); > nv_link_irq(dev); >@@ -2427,10 +3120,76 @@ > return IRQ_RETVAL(i); > } > >+#define TX_WORK_PER_LOOP 64 >+#define RX_WORK_PER_LOOP 64 >+static irqreturn_t nv_nic_irq_optimized(int foo, void *data, struct pt_regs *regs) >+{ >+ struct net_device *dev = (struct net_device *) data; >+ struct fe_priv *np = get_nvpriv(dev); >+ u8 __iomem *base = get_hwbase(dev); >+ u32 events; >+ int i = 1; >+ >+ do { >+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) { >+ events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; >+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); >+ } else { >+ events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; >+ writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); >+ } >+ if (events & np->irqmask) { >+ >+ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); >+ >+ if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { >+ if (unlikely(nv_alloc_rx_optimized(dev))) { >+ spin_lock(&np->lock); >+ if (!np->in_shutdown) >+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL); >+ spin_unlock(&np->lock); >+ } >+ } >+ if (unlikely(events & NVREG_IRQ_LINK)) { >+ spin_lock(&np->lock); >+ nv_link_irq(dev); >+ spin_unlock(&np->lock); >+ } >+ if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { >+ spin_lock(&np->lock); >+ nv_linkchange(dev); >+ spin_unlock(&np->lock); >+ np->link_timeout = jiffies + LINK_TIMEOUT; >+ } >+ if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) { >+ spin_lock(&np->lock); >+ /* disable interrupts on the nic */ >+ if (!(np->msi_flags & NV_MSI_X_ENABLED)) >+ writel(0, base + NvRegIrqMask); >+ else >+ writel(np->irqmask, base + NvRegIrqMask); >+ pci_push(base); >+ >+ if (!np->in_shutdown) { >+ np->nic_poll_irq = np->irqmask; >+ np->recover_error = 1; >+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT); >+ } >+ spin_unlock(&np->lock); >+ break; >+ } >+ } else >+ break; >+ } >+ while (i++ <= max_interrupt_work); >+ >+ return IRQ_RETVAL(i); >+} >+ > static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) > { > struct net_device *dev = (struct net_device *) data; >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > u32 events; > int i; >@@ -2440,15 +3199,12 @@ > for (i=0; ; i++) { > events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; > writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); >- pci_push(base); > dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); > if (!(events & np->irqmask)) > break; > >- spin_lock_irq(&np->lock); >- nv_tx_done(dev); >- spin_unlock_irq(&np->lock); >- >+ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); >+ > if (events & (NVREG_IRQ_TX_ERR)) { > dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", > dev->name, events); >@@ -2477,7 +3233,7 @@ > static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) > { > struct net_device *dev = (struct net_device *) data; >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > u32 events; > int i; >@@ -2487,19 +3243,19 @@ > for (i=0; ; i++) { > events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; > writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); >- pci_push(base); > dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); > if (!(events & np->irqmask)) > break; >- >- nv_rx_process(dev); >- if (nv_alloc_rx(dev)) { >+ >+ if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { >+ if (unlikely(nv_alloc_rx_optimized(dev))) { > spin_lock_irq(&np->lock); > if (!np->in_shutdown) > mod_timer(&np->oom_kick, jiffies + OOM_REFILL); > spin_unlock_irq(&np->lock); >+ } > } >- >+ > if (i > max_interrupt_work) { > spin_lock_irq(&np->lock); > /* disable interrupts on the nic */ >@@ -2524,7 +3280,7 @@ > static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) > { > struct net_device *dev = (struct net_device *) data; >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > u32 events; > int i; >@@ -2534,11 +3290,10 @@ > for (i=0; ; i++) { > events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; > writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); >- pci_push(base); > dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); > if (!(events & np->irqmask)) > break; >- >+ > if (events & NVREG_IRQ_LINK) { > spin_lock_irq(&np->lock); > nv_link_irq(dev); >@@ -2550,6 +3305,20 @@ > spin_unlock_irq(&np->lock); > np->link_timeout = jiffies + LINK_TIMEOUT; > } >+ if (events & NVREG_IRQ_RECOVER_ERROR) { >+ spin_lock_irq(&np->lock); >+ /* disable interrupts on the nic */ >+ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); >+ pci_push(base); >+ >+ if (!np->in_shutdown) { >+ np->nic_poll_irq |= NVREG_IRQ_OTHER; >+ np->recover_error = 1; >+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT); >+ } >+ spin_unlock_irq(&np->lock); >+ break; >+ } > if (events & (NVREG_IRQ_UNKNOWN)) { > printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", > dev->name, events); >@@ -2578,7 +3347,7 @@ > static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs) > { > struct net_device *dev = (struct net_device *) data; >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > u32 events; > >@@ -2595,16 +3364,17 @@ > dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); > if (!(events & NVREG_IRQ_TIMER)) > return IRQ_RETVAL(0); >- >+ > spin_lock(&np->lock); > np->intr_test = 1; > spin_unlock(&np->lock); >- >+ > dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); > > return IRQ_RETVAL(1); > } > >+#ifdef CONFIG_PCI_MSI > static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) > { > u8 __iomem *base = get_hwbase(dev); >@@ -2630,12 +3400,14 @@ > } > writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); > } >+#endif > > static int nv_request_irq(struct net_device *dev, int intr_test) > { > struct fe_priv *np = get_nvpriv(dev); >- u8 __iomem *base = get_hwbase(dev); > int ret = 1; >+ >+ u8 __iomem *base = get_hwbase(dev); > int i; > > if (np->msi_flags & NV_MSI_X_CAPABLE) { >@@ -2646,21 +3418,21 @@ > np->msi_flags |= NV_MSI_X_ENABLED; > if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { > /* Request irq for rx handling */ >- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { >+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { > printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); > pci_disable_msix(np->pci_dev); > np->msi_flags &= ~NV_MSI_X_ENABLED; > goto out_err; > } > /* Request irq for tx handling */ >- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { >+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { > printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); > pci_disable_msix(np->pci_dev); > np->msi_flags &= ~NV_MSI_X_ENABLED; > goto out_free_rx; > } > /* Request irq for link and timer handling */ >- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { >+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { > printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); > pci_disable_msix(np->pci_dev); > np->msi_flags &= ~NV_MSI_X_ENABLED; >@@ -2669,15 +3441,19 @@ > /* map interrupts to their respective vector */ > writel(0, base + NvRegMSIXMap0); > writel(0, base + NvRegMSIXMap1); >+#ifdef CONFIG_PCI_MSI > set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); > set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); > set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); >+#endif > } else { > /* Request irq for all interrupts */ >- if ((!intr_test && >- request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || >+ if ((!intr_test && np->desc_ver == DESC_VER_3 && >+ request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) || >+ (!intr_test && np->desc_ver != DESC_VER_3 && >+ request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || > (intr_test && >- request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) { >+ request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) { > printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); > pci_disable_msix(np->pci_dev); > np->msi_flags &= ~NV_MSI_X_ENABLED; >@@ -2693,8 +3469,11 @@ > if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { > if ((ret = pci_enable_msi(np->pci_dev)) == 0) { > np->msi_flags |= NV_MSI_ENABLED; >- if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || >- (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) { >+ if ((!intr_test && np->desc_ver == DESC_VER_3 && >+ request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) || >+ (!intr_test && np->desc_ver != DESC_VER_3 && >+ request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || >+ (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) { > printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); > pci_disable_msi(np->pci_dev); > np->msi_flags &= ~NV_MSI_ENABLED; >@@ -2709,13 +3488,17 @@ > } > } > if (ret != 0) { >- if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || >- (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) >+ if ((!intr_test && np->desc_ver == DESC_VER_3 && >+ request_irq(np->pci_dev->irq, &nv_nic_irq_optimized, SA_SHIRQ, dev->name, dev) != 0) || >+ (!intr_test && np->desc_ver != DESC_VER_3 && >+ request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) || >+ (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) > goto out_err; >- >+ > } > > return 0; >+ > out_free_tx: > free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); > out_free_rx: >@@ -2728,7 +3511,7 @@ > { > struct fe_priv *np = get_nvpriv(dev); > int i; >- >+ > if (np->msi_flags & NV_MSI_X_ENABLED) { > for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { > free_irq(np->msi_x_entry[i].vector, dev); >@@ -2747,7 +3530,7 @@ > static void nv_do_nic_poll(unsigned long data) > { > struct net_device *dev = (struct net_device *) data; >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > u32 mask = 0; > >@@ -2759,49 +3542,87 @@ > > if (!using_multi_irqs(dev)) { > if (np->msi_flags & NV_MSI_X_ENABLED) >- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); >+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); > else >- disable_irq_lockdep(dev->irq); >+ disable_irq(dev->irq); > mask = np->irqmask; > } else { > if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { >- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); >+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); > mask |= NVREG_IRQ_RX_ALL; > } > if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { >- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); >+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); > mask |= NVREG_IRQ_TX_ALL; > } > if (np->nic_poll_irq & NVREG_IRQ_OTHER) { >- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); >+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); > mask |= NVREG_IRQ_OTHER; > } > } > np->nic_poll_irq = 0; > >- /* FIXME: Do we need synchronize_irq(dev->irq) here? */ >+ if (np->recover_error) { >+ np->recover_error = 0; >+ printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); >+ if (netif_running(dev)) { >+ netif_tx_lock_bh(dev); >+ spin_lock(&np->lock); >+ /* stop engines */ >+ nv_stop_rx(dev); >+ nv_stop_tx(dev); >+ nv_txrx_reset(dev); >+ /* drain rx queue */ >+ nv_drain_rx(dev); >+ nv_drain_tx(dev); >+ /* reinit driver view of the rx queue */ >+ set_bufsize(dev); >+ if (nv_init_ring(dev)) { >+ if (!np->in_shutdown) >+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL); >+ } >+ /* reinit nic view of the rx queue */ >+ writel(np->rx_buf_sz, base + NvRegOffloadConfig); >+ setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); >+ writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), >+ base + NvRegRingSizes); >+ pci_push(base); >+ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); >+ pci_push(base); > >+ /* restart rx engine */ >+ nv_start_rx(dev); >+ nv_start_tx(dev); >+ spin_unlock(&np->lock); >+ netif_tx_unlock_bh(dev); >+ } >+ } >+ /* FIXME: Do we need synchronize_irq(dev->irq) here? */ >+ > writel(mask, base + NvRegIrqMask); > pci_push(base); > > if (!using_multi_irqs(dev)) { >- nv_nic_irq(0, dev, NULL); >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >+ nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); >+ else >+ nv_nic_irq_optimized((int) 0, (void *) data, (struct pt_regs *) NULL); > if (np->msi_flags & NV_MSI_X_ENABLED) >- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); >+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); > else >- enable_irq_lockdep(dev->irq); >+ enable_irq(dev->irq); > } else { > if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { >- nv_nic_irq_rx(0, dev, NULL); >- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); >+ nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); >+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); > } > if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { >- nv_nic_irq_tx(0, dev, NULL); >- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); >+ nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); >+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); > } > if (np->nic_poll_irq & NVREG_IRQ_OTHER) { >- nv_nic_irq_other(0, dev, NULL); >- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); >+ nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); >+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); > } > } > } >@@ -2816,56 +3637,104 @@ > static void nv_do_stats_poll(unsigned long data) > { > struct net_device *dev = (struct net_device *) data; >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > >- np->estats.tx_bytes += readl(base + NvRegTxCnt); >- np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); >- np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); >- np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); >- np->estats.tx_late_collision += readl(base + NvRegTxLateCol); >- np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); >- np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); >- np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); >- np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); >- np->estats.tx_deferral += readl(base + NvRegTxDef); >- np->estats.tx_packets += readl(base + NvRegTxFrame); >- np->estats.tx_pause += readl(base + NvRegTxPause); >- np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); >- np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); >- np->estats.rx_late_collision += readl(base + NvRegRxLateCol); >- np->estats.rx_runt += readl(base + NvRegRxRunt); >- np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); >- np->estats.rx_over_errors += readl(base + NvRegRxOverflow); >- np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); >- np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); >- np->estats.rx_length_error += readl(base + NvRegRxLenErr); >- np->estats.rx_unicast += readl(base + NvRegRxUnicast); >- np->estats.rx_multicast += readl(base + NvRegRxMulticast); >- np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); >- np->estats.rx_bytes += readl(base + NvRegRxCnt); >- np->estats.rx_pause += readl(base + NvRegRxPause); >- np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); >- np->estats.rx_packets = >- np->estats.rx_unicast + >- np->estats.rx_multicast + >- np->estats.rx_broadcast; >- np->estats.rx_errors_total = >- np->estats.rx_crc_errors + >- np->estats.rx_over_errors + >- np->estats.rx_frame_error + >- (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + >- np->estats.rx_late_collision + >- np->estats.rx_runt + >- np->estats.rx_frame_too_long; >+ spin_lock_irq(&np->lock); >+ >+ np->estats.tx_dropped = np->stats.tx_dropped; >+ if (np->driver_data & DEV_HAS_STATISTICS) { >+ np->estats.tx_packets += readl(base + NvRegTxFrame); >+ np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); >+ np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); >+ np->estats.tx_bytes += readl(base + NvRegTxCnt); >+ np->estats.rx_bytes += readl(base + NvRegRxCnt); >+ np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); >+ np->estats.rx_over_errors += readl(base + NvRegRxOverflow); >+ >+ np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); >+ np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); >+ np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); >+ np->estats.tx_late_collision += readl(base + NvRegTxLateCol); >+ np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); >+ np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); >+ np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); >+ np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); >+ np->estats.rx_late_collision += readl(base + NvRegRxLateCol); >+ np->estats.rx_runt += readl(base + NvRegRxRunt); >+ np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); >+ np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); >+ np->estats.rx_length_error += readl(base + NvRegRxLenErr); >+ np->estats.rx_unicast += readl(base + NvRegRxUnicast); >+ np->estats.rx_multicast += readl(base + NvRegRxMulticast); >+ np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); >+ np->estats.tx_deferral += readl(base + NvRegTxDef); >+ np->estats.tx_pause += readl(base + NvRegTxPause); >+ np->estats.rx_pause += readl(base + NvRegRxPause); >+ np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); >+ np->estats.rx_packets = >+ np->estats.rx_unicast + >+ np->estats.rx_multicast + >+ np->estats.rx_broadcast; >+ np->estats.rx_errors_total = >+ np->estats.rx_crc_errors + >+ np->estats.rx_over_errors + >+ np->estats.rx_frame_error + >+ (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + >+ np->estats.rx_late_collision + >+ np->estats.rx_runt + >+ np->estats.rx_frame_too_long + >+ np->rx_len_errors; >+ >+ /* copy to net_device stats */ >+ np->stats.tx_packets = np->estats.tx_packets; >+ np->stats.tx_fifo_errors = np->estats.tx_fifo_errors; >+ np->stats.tx_carrier_errors = np->estats.tx_carrier_errors; >+ np->stats.tx_bytes = np->estats.tx_bytes; >+ np->stats.rx_bytes = np->estats.rx_bytes; >+ np->stats.rx_crc_errors = np->estats.rx_crc_errors; >+ np->stats.rx_over_errors = np->estats.rx_over_errors; >+ np->stats.rx_packets = np->estats.rx_packets; >+ np->stats.rx_errors = np->estats.rx_errors_total; >+ >+ } else { >+ np->estats.tx_packets = np->stats.tx_packets; >+ np->estats.tx_fifo_errors = np->stats.tx_fifo_errors; >+ np->estats.tx_carrier_errors = np->stats.tx_carrier_errors; >+ np->estats.tx_bytes = np->stats.tx_bytes; >+ np->estats.rx_bytes = np->stats.rx_bytes; >+ np->estats.rx_crc_errors = np->stats.rx_crc_errors; >+ np->estats.rx_over_errors = np->stats.rx_over_errors; >+ np->estats.rx_packets = np->stats.rx_packets; >+ np->estats.rx_errors_total = np->stats.rx_errors; >+ } > >- if (!np->in_shutdown) >+ if (!np->in_shutdown && netif_running(dev)) > mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); >+ spin_unlock_irq(&np->lock); >+} >+ >+/* >+ * nv_get_stats: dev->get_stats function >+ * Get latest stats value from the nic. >+ * Called with read_lock(&dev_base_lock) held for read - >+ * only synchronized against unregister_netdevice. >+ */ >+static struct net_device_stats *nv_get_stats(struct net_device *dev) >+{ >+ struct fe_priv *np = get_nvpriv(dev); >+ >+ /* It seems that the nic always generates interrupts and doesn't >+ * accumulate errors internally. Thus the current values in np->stats >+ * are already up to date. >+ */ >+ nv_do_stats_poll((unsigned long)dev); >+ return &np->stats; > } > > static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > strcpy(info->driver, "forcedeth"); > strcpy(info->version, FORCEDETH_VERSION); > strcpy(info->bus_info, pci_name(np->pci_dev)); >@@ -2873,7 +3742,7 @@ > > static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > wolinfo->supported = WAKE_MAGIC; > > spin_lock_irq(&np->lock); >@@ -2884,7 +3753,7 @@ > > static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > u32 flags = 0; > >@@ -2904,7 +3773,7 @@ > > static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > int adv; > > spin_lock_irq(&np->lock); >@@ -2978,8 +3847,9 @@ > > static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > >+ dprintk(KERN_DEBUG "%s: nv_set_settings \n", dev->name); > if (ecmd->port != PORT_MII) > return -EINVAL; > if (ecmd->transceiver != XCVR_EXTERNAL) >@@ -3057,9 +3927,18 @@ > if (netif_running(dev)) > printk(KERN_INFO "%s: link down.\n", dev->name); > bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); >+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) { >+ bmcr |= BMCR_ANENABLE; >+ /* reset the phy in order for settings to stick, >+ * and cause autoneg to start */ >+ if (phy_reset(dev, bmcr)) { >+ printk(KERN_INFO "%s: phy reset failed\n", dev->name); >+ return -EINVAL; >+ } >+ } else { > bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); > mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); >- >+ } > } else { > int adv, bmcr; > >@@ -3099,17 +3978,19 @@ > bmcr |= BMCR_FULLDPLX; > if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) > bmcr |= BMCR_SPEED100; >- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); > if (np->phy_oui == PHY_OUI_MARVELL) { >- /* reset the phy */ >- if (phy_reset(dev)) { >+ /* reset the phy in order for forced mode settings to stick */ >+ if (phy_reset(dev, bmcr)) { > printk(KERN_INFO "%s: phy reset failed\n", dev->name); > return -EINVAL; > } >- } else if (netif_running(dev)) { >- /* Wait a bit and then reconfigure the nic. */ >- udelay(10); >- nv_linkchange(dev); >+ } else { >+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); >+ if (netif_running(dev)) { >+ /* Wait a bit and then reconfigure the nic. */ >+ udelay(10); >+ nv_linkchange(dev); >+ } > } > } > >@@ -3126,13 +4007,13 @@ > > static int nv_get_regs_len(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > return np->register_size; > } > > static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > u32 *rbuf = buf; > int i; >@@ -3146,7 +4027,7 @@ > > static int nv_nway_reset(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > int ret; > > if (np->autoneg) { >@@ -3166,8 +4047,17 @@ > } > > bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); >- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); >- mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); >+ if (np->phy_model == PHY_MODEL_MARVELL_E3016) { >+ bmcr |= BMCR_ANENABLE; >+ /* reset the phy in order for settings to stick*/ >+ if (phy_reset(dev, bmcr)) { >+ printk(KERN_INFO "%s: phy reset failed\n", dev->name); >+ return -EINVAL; >+ } >+ } else { >+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); >+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); >+ } > > if (netif_running(dev)) { > nv_start_rx(dev); >@@ -3182,19 +4072,9 @@ > return ret; > } > >-static int nv_set_tso(struct net_device *dev, u32 value) >-{ >- struct fe_priv *np = netdev_priv(dev); >- >- if ((np->driver_data & DEV_HAS_CHECKSUM)) >- return ethtool_op_set_tso(dev, value); >- else >- return -EOPNOTSUPP; >-} >- > static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > > ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; > ring->rx_mini_max_pending = 0; >@@ -3209,20 +4089,20 @@ > > static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); >- u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; >+ u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; > dma_addr_t ring_addr; > > if (ring->rx_pending < RX_RING_MIN || > ring->tx_pending < TX_RING_MIN || > ring->rx_mini_pending != 0 || > ring->rx_jumbo_pending != 0 || >- (np->desc_ver == DESC_VER_1 && >- (ring->rx_pending > RING_MAX_DESC_VER_1 || >+ (np->desc_ver == DESC_VER_1 && >+ (ring->rx_pending > RING_MAX_DESC_VER_1 || > ring->tx_pending > RING_MAX_DESC_VER_1)) || >- (np->desc_ver != DESC_VER_1 && >- (ring->rx_pending > RING_MAX_DESC_VER_2_3 || >+ (np->desc_ver != DESC_VER_1 && >+ (ring->rx_pending > RING_MAX_DESC_VER_2_3 || > ring->tx_pending > RING_MAX_DESC_VER_2_3))) { > return -EINVAL; > } >@@ -3237,12 +4117,10 @@ > sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), > &ring_addr); > } >- rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); >- rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); >- tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); >- tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL); >- tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL); >- if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { >+ rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); >+ tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); >+ >+ if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { > /* fall back to old rings */ > if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { > if(rxtx_ring) >@@ -3255,14 +4133,8 @@ > } > if (rx_skbuff) > kfree(rx_skbuff); >- if (rx_dma) >- kfree(rx_dma); > if (tx_skbuff) > kfree(tx_skbuff); >- if (tx_dma) >- kfree(tx_dma); >- if (tx_dma_len) >- kfree(tx_dma_len); > goto exit; > } > >@@ -3280,12 +4152,12 @@ > /* delete queues */ > free_rings(dev); > } >- >+ > /* set new values */ > np->rx_ring_size = ring->rx_pending; > np->tx_ring_size = ring->tx_pending; >- np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE; >- np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1; >+ np->tx_limit_stop =np->tx_ring_size - TX_LIMIT_DIFFERENCE; >+ np->tx_limit_start =np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1; > if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { > np->rx_ring.orig = (struct ring_desc*)rxtx_ring; > np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; >@@ -3293,18 +4165,12 @@ > np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; > np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; > } >- np->rx_skbuff = (struct sk_buff**)rx_skbuff; >- np->rx_dma = (dma_addr_t*)rx_dma; >- np->tx_skbuff = (struct sk_buff**)tx_skbuff; >- np->tx_dma = (dma_addr_t*)tx_dma; >- np->tx_dma_len = (unsigned int*)tx_dma_len; >+ np->rx_skb = (struct nv_skb_map*)rx_skbuff; >+ np->tx_skb = (struct nv_skb_map*)tx_skbuff; > np->ring_addr = ring_addr; >- >- memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); >- memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); >- memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); >- memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); >- memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); >+ >+ memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); >+ memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); > > if (netif_running(dev)) { > /* reinit driver view of the queues */ >@@ -3313,7 +4179,7 @@ > if (!np->in_shutdown) > mod_timer(&np->oom_kick, jiffies + OOM_REFILL); > } >- >+ > /* reinit nic view of the queues */ > writel(np->rx_buf_sz, base + NvRegOffloadConfig); > setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); >@@ -3322,7 +4188,7 @@ > pci_push(base); > writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); > pci_push(base); >- >+ > /* restart engines */ > nv_start_rx(dev); > nv_start_tx(dev); >@@ -3337,7 +4203,7 @@ > > static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > > pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; > pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; >@@ -3346,12 +4212,12 @@ > > static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > int adv, bmcr; > > if ((!np->autoneg && np->duplex == 0) || > (np->autoneg && !pause->autoneg && np->duplex == 0)) { >- printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", >+ printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", > dev->name); > return -EINVAL; > } >@@ -3417,31 +4283,26 @@ > > static u32 nv_get_rx_csum(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >- return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0; >+ struct fe_priv *np = get_nvpriv(dev); >+ return (np->rx_csum) != 0; > } > > static int nv_set_rx_csum(struct net_device *dev, u32 data) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > int retcode = 0; > > if (np->driver_data & DEV_HAS_CHECKSUM) { > >- if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) || >- (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) { >- /* already set or unset */ >- return 0; >- } >- > if (data) { >+ np->rx_csum = 1; > np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; >- } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) { >- np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; > } else { >- printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n"); >- return -EINVAL; >+ np->rx_csum = 0; >+ /* vlan is dependent on rx checksum offload */ >+ if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) >+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; > } > > if (netif_running(dev)) { >@@ -3456,39 +4317,51 @@ > return retcode; > } > >-static int nv_set_tx_csum(struct net_device *dev, u32 data) >+#ifdef NETIF_F_TSO >+static int nv_set_tso(struct net_device *dev, u32 data) > { >- struct fe_priv *np = netdev_priv(dev); >- >- if (np->driver_data & DEV_HAS_CHECKSUM) >- return ethtool_op_set_tx_hw_csum(dev, data); >- else >- return -EOPNOTSUPP; >+ struct fe_priv *np = get_nvpriv(dev); >+ >+ if (np->driver_data & DEV_HAS_CHECKSUM){ >+ return ethtool_op_set_tso(dev, data); >+ }else >+ return -EINVAL; > } >+#endif > > static int nv_set_sg(struct net_device *dev, u32 data) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); >+ >+ if (np->driver_data & DEV_HAS_CHECKSUM){ >+ return ethtool_op_set_sg(dev, data); >+ }else >+ return -EINVAL; >+} >+ >+static int nv_set_tx_csum(struct net_device *dev, u32 data) >+{ >+ struct fe_priv *np = get_nvpriv(dev); > > if (np->driver_data & DEV_HAS_CHECKSUM) >- return ethtool_op_set_sg(dev, data); >+ return ethtool_op_set_tx_hw_csum(dev, data); > else >- return -EOPNOTSUPP; >+ return -EINVAL; > } > > static int nv_get_stats_count(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > > if (np->driver_data & DEV_HAS_STATISTICS) > return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); > else >- return 0; >+ return NV_STATS_COUNT_SW; > } > > static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > > /* update stats */ > nv_do_stats_poll((unsigned long)dev); >@@ -3498,7 +4371,7 @@ > > static int nv_self_test_count(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > > if (np->driver_data & DEV_HAS_TEST_EXTENDED) > return NV_TEST_COUNT_EXTENDED; >@@ -3508,7 +4381,7 @@ > > static int nv_link_test(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > int mii_status; > > mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); >@@ -3551,7 +4424,7 @@ > > static int nv_interrupt_test(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > int ret = 1; > int testcnt; >@@ -3580,7 +4453,7 @@ > nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); > > /* wait for at least one interrupt */ >- msleep(100); >+ nv_msleep(100); > > spin_lock_irq(&np->lock); > >@@ -3614,7 +4487,7 @@ > > static int nv_loopback_test(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > struct sk_buff *tx_skb, *rx_skb; > dma_addr_t test_dma_addr; >@@ -3673,13 +4546,13 @@ > writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); > pci_push(get_hwbase(dev)); > >- msleep(500); >+ nv_msleep(500); > > /* check for rx of the packet */ > if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { > Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); > len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); >- >+ > } else { > Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); > len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); >@@ -3696,17 +4569,17 @@ > } > } > >- if (ret) { >+ if (ret) { > if (len != pkt_len) { > ret = 0; >- dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", >+ dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", > dev->name, len, pkt_len); > } else { >- rx_skb = np->rx_skbuff[0]; >+ rx_skb = np->rx_skb[0].skb; > for (i = 0; i < pkt_len; i++) { > if (rx_skb->data[i] != (u8)(i & 0xff)) { > ret = 0; >- dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", >+ dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", > dev->name, i); > break; > } >@@ -3720,7 +4593,7 @@ > tx_skb->end-tx_skb->data, > PCI_DMA_TODEVICE); > dev_kfree_skb_any(tx_skb); >- >+ > /* stop engines */ > nv_stop_rx(dev); > nv_stop_tx(dev); >@@ -3740,7 +4613,7 @@ > > static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > int result; > memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64)); >@@ -3839,8 +4712,6 @@ > .get_regs = nv_get_regs, > .nway_reset = nv_nway_reset, > .get_perm_addr = ethtool_op_get_perm_addr, >- .get_tso = ethtool_op_get_tso, >- .set_tso = nv_set_tso, > .get_ringparam = nv_get_ringparam, > .set_ringparam = nv_set_ringparam, > .get_pauseparam = nv_get_pauseparam, >@@ -3851,6 +4722,10 @@ > .set_tx_csum = nv_set_tx_csum, > .get_sg = ethtool_op_get_sg, > .set_sg = nv_set_sg, >+#ifdef NETIF_F_TSO >+ .get_tso = ethtool_op_get_tso, >+ .set_tso = nv_set_tso, >+#endif > .get_strings = nv_get_strings, > .get_stats_count = nv_get_stats_count, > .get_ethtool_stats = nv_get_ethtool_stats, >@@ -3870,10 +4745,13 @@ > if (grp) { > /* enable vlan on MAC */ > np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; >+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; > } else { > /* disable vlan on MAC */ > np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; > np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; >+ if (!np->rx_csum) >+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; > } > > writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); >@@ -3886,53 +4764,102 @@ > /* nothing to do */ > }; > >+/* The mgmt unit and driver use a semaphore to access the phy during init */ >+static int nv_mgmt_acquire_sema(struct net_device *dev) >+{ >+ u8 __iomem *base = get_hwbase(dev); >+ int i; >+ u32 tx_ctrl, mgmt_sema; >+ >+ for (i = 0; i < 10; i++) { >+ mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; >+ if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) { >+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is free\n"); >+ break; >+ } >+ nv_msleep(500); >+ } >+ >+ if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) { >+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: sema is not free\n"); >+ return 0; >+ } >+ >+ for (i = 0; i < 2; i++) { >+ tx_ctrl = readl(base + NvRegTransmitterControl); >+ tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; >+ writel(tx_ctrl, base + NvRegTransmitterControl); >+ >+ /* verify that semaphore was acquired */ >+ tx_ctrl = readl(base + NvRegTransmitterControl); >+ if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && >+ ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { >+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: acquired sema\n"); >+ return 1; >+ } else >+ udelay(50); >+ } >+ >+ dprintk(KERN_INFO "forcedeth: nv_mgmt_acquire_sema: exit\n"); >+ return 0; >+} >+ >+/* Indicate to mgmt unit whether driver is loaded or not */ >+static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded) >+{ >+ u8 __iomem *base = get_hwbase(dev); >+ u32 tx_ctrl; >+ >+ tx_ctrl = readl(base + NvRegTransmitterControl); >+ if (loaded) >+ tx_ctrl |= NVREG_XMITCTL_HOST_LOADED; >+ else >+ tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED; >+ writel(tx_ctrl, base + NvRegTransmitterControl); >+} >+ > static int nv_open(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base = get_hwbase(dev); > int ret = 1; > int oom, i; > > dprintk(KERN_DEBUG "nv_open: begin\n"); > >- /* 1) erase previous misconfiguration */ >+ /* erase previous misconfiguration */ > if (np->driver_data & DEV_HAS_POWER_CNTRL) > nv_mac_reset(dev); >- /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ >+ /* stop adapter: ignored, 4.3 seems to be overkill */ > writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); > writel(0, base + NvRegMulticastAddrB); > writel(0, base + NvRegMulticastMaskA); > writel(0, base + NvRegMulticastMaskB); > writel(0, base + NvRegPacketFilterFlags); > >- writel(0, base + NvRegTransmitterControl); >- writel(0, base + NvRegReceiverControl); >+ nv_stop_tx(dev); >+ nv_stop_rx(dev); > > writel(0, base + NvRegAdapterControl); > > if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) > writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); > >- /* 2) initialize descriptor rings */ >+ /* initialize descriptor rings */ > set_bufsize(dev); > oom = nv_init_ring(dev); > >- writel(0, base + NvRegLinkSpeed); >- writel(0, base + NvRegUnknownTransmitterReg); > nv_txrx_reset(dev); > writel(0, base + NvRegUnknownSetupReg6); > > np->in_shutdown = 0; > >- /* 3) set mac address */ >- nv_copy_mac_to_hw(dev); >- >- /* 4) give hw rings */ >+ /* give hw rings */ > setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); > writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), > base + NvRegRingSizes); > >- /* 5) continue setup */ >+ /* continue setup */ > writel(np->linkspeed, base + NvRegLinkSpeed); > if (np->desc_ver == DESC_VER_1) > writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); >@@ -3946,11 +4873,11 @@ > NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, > KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); > >- writel(0, base + NvRegUnknownSetupReg4); >+ writel(0, base + NvRegMIIMask); > writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); > writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); > >- /* 6) continue setup */ >+ /* continue setup */ > writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); > writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); > writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); >@@ -3973,7 +4900,7 @@ > writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, > base + NvRegAdapterControl); > writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); >- writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); >+ writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); > if (np->wolenabled) > writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); > >@@ -4023,15 +4950,14 @@ > if (ret) { > netif_carrier_on(dev); > } else { >- printk("%s: no link during initialization.\n", dev->name); >+ dprintk(KERN_DEBUG "%s: no link during initialization.\n", dev->name); > netif_carrier_off(dev); > } > if (oom) > mod_timer(&np->oom_kick, jiffies + OOM_REFILL); > > /* start statistics timer */ >- if (np->driver_data & DEV_HAS_STATISTICS) >- mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); >+ mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); > > spin_unlock_irq(&np->lock); > >@@ -4043,12 +4969,14 @@ > > static int nv_close(struct net_device *dev) > { >- struct fe_priv *np = netdev_priv(dev); >+ struct fe_priv *np = get_nvpriv(dev); > u8 __iomem *base; > >+ dprintk(KERN_DEBUG "nv_close: begin\n"); > spin_lock_irq(&np->lock); > np->in_shutdown = 1; > spin_unlock_irq(&np->lock); >+ > synchronize_irq(dev->irq); > > del_timer_sync(&np->oom_kick); >@@ -4076,12 +5004,6 @@ > if (np->wolenabled) > nv_start_rx(dev); > >- /* special op: write back the misordered MAC address - otherwise >- * the next nv_probe would see a wrong address. >- */ >- writel(np->orig_mac[0], base + NvRegMacAddrA); >- writel(np->orig_mac[1], base + NvRegMacAddrB); >- > /* FIXME: power down nic */ > > return 0; >@@ -4094,14 +5016,19 @@ > unsigned long addr; > u8 __iomem *base; > int err, i; >- u32 powerstate; >+ u32 powerstate, phystate_orig = 0, phystate, txreg; >+ int phyinitialized = 0; > >+ //NVLAN_DISABLE_ALL_FEATURES ; >+ /* modify network device class id */ >+ quirk_nforce_network_class(pci_dev); > dev = alloc_etherdev(sizeof(struct fe_priv)); > err = -ENOMEM; > if (!dev) > goto out; > >- np = netdev_priv(dev); >+ dprintk(KERN_DEBUG "%s:nv_probe: begin\n",dev->name); >+ np = get_nvpriv(dev); > np->pci_dev = pci_dev; > spin_lock_init(&np->lock); > SET_MODULE_OWNER(dev); >@@ -4188,21 +5115,39 @@ > np->pkt_limit = NV_PKTLIMIT_1; > if (id->driver_data & DEV_HAS_LARGEDESC) > np->pkt_limit = NV_PKTLIMIT_2; >+ if (mtu > np->pkt_limit) { >+ printk(KERN_INFO "forcedeth: MTU value of %d is too large. Setting to maximum value of %d\n", >+ mtu, np->pkt_limit); >+ dev->mtu = np->pkt_limit; >+ } else { >+ dev->mtu = mtu; >+ } > > if (id->driver_data & DEV_HAS_CHECKSUM) { >- np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; >- dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; >+ if (rx_checksum_offload) { >+ np->rx_csum = 1; >+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; >+ } >+ >+ if (tx_checksum_offload) >+ dev->features |= NETIF_F_HW_CSUM; >+ >+ if (scatter_gather) >+ dev->features |= NETIF_F_SG; > #ifdef NETIF_F_TSO >- dev->features |= NETIF_F_TSO; >+ if (tso_offload) >+ dev->features |= NETIF_F_TSO; > #endif > } > > np->vlanctl_bits = 0; >- if (id->driver_data & DEV_HAS_VLAN) { >- np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; >- dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; >- dev->vlan_rx_register = nv_vlan_rx_register; >- dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; >+ if (id->driver_data & DEV_HAS_VLAN && tagging_8021pq) { >+ np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; >+ dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; >+ dev->vlan_rx_register = nv_vlan_rx_register; >+ dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; >+ /* vlan needs rx checksum support, so force it */ >+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; > } > > np->msi_flags = 0; >@@ -4212,12 +5157,21 @@ > if ((id->driver_data & DEV_HAS_MSI_X) && msix) { > np->msi_flags |= NV_MSI_X_CAPABLE; > } >- >- np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; >+ >+ np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE; >+ if (rx_flow_control == NV_RX_FLOW_CONTROL_ENABLED) >+ np->pause_flags |= NV_PAUSEFRAME_RX_REQ; > if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { >- np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; >+ np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE; >+ if (tx_flow_control == NV_TX_FLOW_CONTROL_ENABLED) >+ np->pause_flags |= NV_PAUSEFRAME_TX_REQ; >+ } >+ if (autoneg == AUTONEG_ENABLE) { >+ np->pause_flags |= NV_PAUSEFRAME_AUTONEG; >+ } else if (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX) { >+ printk(KERN_INFO "forcedeth: speed_duplex of 1000 full can not enabled if autoneg is disabled\n"); >+ goto out_relreg; > } >- > > err = -ENOMEM; > np->base = ioremap(addr, np->register_size); >@@ -4227,10 +5181,33 @@ > > dev->irq = pci_dev->irq; > >- np->rx_ring_size = RX_RING_DEFAULT; >- np->tx_ring_size = TX_RING_DEFAULT; >- np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE; >- np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1; >+ if (np->desc_ver == DESC_VER_1) { >+ if (rx_ring_size > RING_MAX_DESC_VER_1) { >+ printk(KERN_INFO "forcedeth: rx_ring_size of %d is too large. Setting to maximum of %d\n", >+ rx_ring_size, RING_MAX_DESC_VER_1); >+ rx_ring_size = RING_MAX_DESC_VER_1; >+ } >+ if (tx_ring_size > RING_MAX_DESC_VER_1) { >+ printk(KERN_INFO "forcedeth: tx_ring_size of %d is too large. Setting to maximum of %d\n", >+ tx_ring_size, RING_MAX_DESC_VER_1); >+ tx_ring_size = RING_MAX_DESC_VER_1; >+ } >+ } else { >+ if (rx_ring_size > RING_MAX_DESC_VER_2_3) { >+ printk(KERN_INFO "forcedeth: rx_ring_size of %d is too large. Setting to maximum of %d\n", >+ rx_ring_size, RING_MAX_DESC_VER_2_3); >+ rx_ring_size = RING_MAX_DESC_VER_2_3; >+ } >+ if (tx_ring_size > RING_MAX_DESC_VER_2_3) { >+ printk(KERN_INFO "forcedeth: tx_ring_size of %d is too large. Setting to maximum of %d\n", >+ tx_ring_size, RING_MAX_DESC_VER_2_3); >+ tx_ring_size = RING_MAX_DESC_VER_2_3; >+ } >+ } >+ np->rx_ring_size = rx_ring_size; >+ np->tx_ring_size = tx_ring_size; >+ np->tx_limit_stop = tx_ring_size - TX_LIMIT_DIFFERENCE; >+ np->tx_limit_start = tx_ring_size - TX_LIMIT_DIFFERENCE - 1; > > if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { > np->rx_ring.orig = pci_alloc_consistent(pci_dev, >@@ -4247,29 +5224,28 @@ > goto out_unmap; > np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; > } >- np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); >- np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); >- np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); >- np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL); >- np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL); >- if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len) >+ np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL); >+ np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL); >+ if (!np->rx_skb || !np->tx_skb) > goto out_freering; >- memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); >- memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); >- memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); >- memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); >- memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); >+ memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); >+ memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); > > dev->open = nv_open; > dev->stop = nv_close; >- dev->hard_start_xmit = nv_start_xmit; >+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) >+ dev->hard_start_xmit = nv_start_xmit; >+ else >+ dev->hard_start_xmit = nv_start_xmit_optimized; > dev->get_stats = nv_get_stats; > dev->change_mtu = nv_change_mtu; > dev->set_mac_address = nv_set_mac_address; > dev->set_multicast_list = nv_set_multicast; >+ > #ifdef CONFIG_NET_POLL_CONTROLLER > dev->poll_controller = nv_poll_controller; > #endif >+ > SET_ETHTOOL_OPS(dev, &ops); > dev->tx_timeout = nv_tx_timeout; > dev->watchdog_timeo = NV_WATCHDOG_TIMEO; >@@ -4281,12 +5257,29 @@ > np->orig_mac[0] = readl(base + NvRegMacAddrA); > np->orig_mac[1] = readl(base + NvRegMacAddrB); > >+ /* check the workaround bit for correct mac address order */ >+ txreg = readl(base + NvRegTransmitPoll); >+ if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { >+ /* mac address is already in correct order */ >+ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; >+ dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; >+ dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; >+ dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; >+ dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; >+ dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; >+ } else { > dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; > dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; > dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; > dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; > dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; > dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; >+ /* set permanent address to be correct aswell */ >+ np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + >+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); >+ np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); >+ writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); >+ } > memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); > > if (!is_valid_ether_addr(dev->perm_addr)) { >@@ -4308,10 +5301,12 @@ > dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev), > dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], > dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); >+ /* set mac address */ >+ nv_copy_mac_to_hw(dev); > > /* disable WOL */ > writel(0, base + NvRegWakeUpFlags); >- np->wolenabled = 0; >+ np->wolenabled = wol; > > if (id->driver_data & DEV_HAS_POWER_CNTRL) { > u8 revision_id; >@@ -4353,6 +5348,59 @@ > np->need_linktimer = 0; > } > >+ /* clear phy state and temporarily halt phy interrupts */ >+ writel(0, base + NvRegMIIMask); >+ phystate = readl(base + NvRegAdapterControl); >+ if (phystate & NVREG_ADAPTCTL_RUNNING) { >+ phystate_orig = 1; >+ phystate &= ~NVREG_ADAPTCTL_RUNNING; >+ writel(phystate, base + NvRegAdapterControl); >+ } >+ writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); >+ >+ if (id->driver_data & DEV_HAS_MGMT_UNIT) { >+ writel(NV_UNKNOWN_VAL, base + NvRegPatternCRC); >+ pci_push(base); >+ nv_msleep(500); >+ /* management unit running on the mac? */ >+ np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; >+ if (np->mac_in_use) { >+ u32 mgmt_sync; >+ dprintk(KERN_DEBUG "%s: probe: mac in use\n",dev->name); >+ /* management unit setup the phy already? */ >+ mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; >+ if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) { >+ dprintk(KERN_DEBUG"%s : probe: sync not ready\n",dev->name); >+ if (!nv_mgmt_acquire_sema(dev)) { >+ dprintk(KERN_DEBUG"%s: probe: could not acquire sema\n",dev->name); >+ for (i = 0; i < 5000; i++) { >+ nv_msleep(1); >+ mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; >+ if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) >+ continue; >+ if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) { >+ dprintk(KERN_DEBUG"%s: probe: phy inited by SMU 1\n",dev->name); >+ phyinitialized = 1; >+ } >+ break; >+ dprintk(KERN_DEBUG"%s: probe: breaking out of loop\n",dev->name); >+ } >+ } else { >+ /* we need to init the phy */ >+ dprintk(KERN_DEBUG"%s: probe: we need to init phy 1\n",dev->name); >+ } >+ } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) { >+ dprintk(KERN_DEBUG"%s: probe: phy inited by SMU 2\n",dev->name); >+ /* phy is inited by SMU */ >+ phyinitialized = 1; >+ } else { >+ /* we need to init the phy */ >+ dprintk(KERN_DEBUG"%s: probe: we need to init phy 2\n",dev->name); >+ } >+ } else >+ dprintk(KERN_DEBUG"%s: probe: mac not in use\n",dev->name); >+ } >+ > /* find a suitable phy */ > for (i = 1; i <= 32; i++) { > int id1, id2; >@@ -4369,6 +5417,7 @@ > if (id2 < 0 || id2 == 0xffff) > continue; > >+ np->phy_model = id2 & PHYID2_MODEL_MASK; > id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; > id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; > dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", >@@ -4382,14 +5431,25 @@ > pci_name(pci_dev)); > goto out_error; > } >- >+ >+ if (!phyinitialized) { > /* reset it */ > phy_init(dev); >+ } else { >+ /* see if gigabit phy */ >+ u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); >+ if (mii_status & PHY_GIGABIT) { >+ np->gigabit = PHY_GIGABIT; >+ } >+ } >+ if (id->driver_data & DEV_HAS_MGMT_UNIT) { >+ nv_mgmt_driver_loaded(dev, 1); >+ } > > /* set default link speed settings */ > np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; > np->duplex = 0; >- np->autoneg = 1; >+ np->autoneg = autoneg; > > err = register_netdev(dev); > if (err) { >@@ -4403,6 +5463,10 @@ > return 0; > > out_error: >+ if (phystate_orig) >+ writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); >+ if (np->mac_in_use) >+ nv_mgmt_driver_loaded(dev, 0); > pci_set_drvdata(pci_dev, NULL); > out_freering: > free_rings(dev); >@@ -4421,8 +5485,17 @@ > static void __devexit nv_remove(struct pci_dev *pci_dev) > { > struct net_device *dev = pci_get_drvdata(pci_dev); >+ struct fe_priv *np = get_nvpriv(dev); >+ u8 __iomem *base = get_hwbase(dev); > > unregister_netdev(dev); >+ /* special op: write back the misordered MAC address - otherwise >+ * the next nv_probe would see a wrong address. >+ */ >+ writel(np->orig_mac[0], base + NvRegMacAddrA); >+ writel(np->orig_mac[1], base + NvRegMacAddrB); >+ if (np->mac_in_use) >+ nv_mgmt_driver_loaded(dev, 0); > > /* free all structures */ > free_rings(dev); >@@ -4488,43 +5561,43 @@ > }, > { /* MCP55 Ethernet Controller */ > PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), >- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, >+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, > }, > { /* MCP55 Ethernet Controller */ > PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), >- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, >+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, > }, > { /* MCP61 Ethernet Controller */ > PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), >- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, >+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, > }, > { /* MCP61 Ethernet Controller */ > PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), >- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, >+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, > }, > { /* MCP61 Ethernet Controller */ > PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), >- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, >+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, > }, > { /* MCP61 Ethernet Controller */ > PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), >- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, >+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, > }, > { /* MCP65 Ethernet Controller */ > PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), >- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, >+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, > }, > { /* MCP65 Ethernet Controller */ > PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), >- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, >+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, > }, > { /* MCP65 Ethernet Controller */ > PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), >- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, >+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, > }, > { /* MCP65 Ethernet Controller */ > PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), >- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, >+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, > }, > {0,}, > }; >@@ -4540,6 +5613,7 @@ > static int __init init_nic(void) > { > printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); >+ dprintk(KERN_DEBUG "DEBUG VERSION\n"); > return pci_module_init(&driver); > } > >@@ -4558,9 +5632,35 @@ > MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); > module_param(msix, int, 0); > MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); >+ >+module_param(speed_duplex, int, 0); >+MODULE_PARM_DESC(speed_duplex, "PHY speed and duplex settings. Auto = 0, 10mbps half = 1, 10mbps full = 2, 100mbps half = 3, 100mbps full = 4, 1000mbps full = 5."); >+module_param(autoneg, int, 0); >+MODULE_PARM_DESC(autoneg, "PHY autonegotiate is enabled by setting to 1 and disabled by setting to 0."); >+module_param(scatter_gather, int, 0); >+MODULE_PARM_DESC(scatter_gather, "Scatter gather is enabled by setting to 1 and disabled by setting to 0."); >+module_param(tso_offload, int, 0); >+MODULE_PARM_DESC(tso_offload, "TCP Segmentation offload is enabled by setting to 1 and disabled by setting to 0."); >+module_param(mtu, int, 0); >+MODULE_PARM_DESC(mtu, "MTU value. Maximum value of 1500 or 9100 depending on hardware."); >+module_param(tx_checksum_offload, int, 0); >+MODULE_PARM_DESC(tx_checksum_offload, "Tx checksum offload is enabled by setting to 1 and disabled by setting to 0."); >+module_param(rx_checksum_offload, int, 0); >+MODULE_PARM_DESC(rx_checksum_offload, "Rx checksum offload is enabled by setting to 1 and disabled by setting to 0."); >+module_param(tx_ring_size, int, 0); >+MODULE_PARM_DESC(tx_ring_size, "Tx ring size. Maximum value of 1024 or 16384 depending on hardware."); >+module_param(rx_ring_size, int, 0); >+MODULE_PARM_DESC(rx_ring_size, "Rx ring size. Maximum value of 1024 or 16384 depending on hardware."); >+module_param(tx_flow_control, int, 0); >+MODULE_PARM_DESC(tx_flow_control, "Tx flow control is enabled by setting to 1 and disabled by setting to 0."); >+module_param(rx_flow_control, int, 0); >+MODULE_PARM_DESC(rx_flow_control, "Rx flow control is enabled by setting to 1 and disabled by setting to 0."); > module_param(dma_64bit, int, 0); > MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); >- >+module_param(wol, int, 0); >+MODULE_PARM_DESC(wol, "Wake-On-Lan is enabled by setting to 1 and disabled by setting to 0."); >+module_param(tagging_8021pq, int, 0); >+MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0."); > MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); > MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); > MODULE_LICENSE("GPL");
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 12221
:
2142
| 2143