patch-2.4.19 linux-2.4.19/drivers/net/dl2k.c
Next file: linux-2.4.19/drivers/net/dl2k.h
Previous file: linux-2.4.19/drivers/net/dgrs.c
Back to the patch index
Back to the overall index
- Lines: 1177
- Date:
Fri Aug 2 17:39:44 2002
- Orig file:
linux-2.4.18/drivers/net/dl2k.c
- Orig date:
Mon Feb 25 11:37:59 2002
diff -urN linux-2.4.18/drivers/net/dl2k.c linux-2.4.19/drivers/net/dl2k.c
@@ -1,6 +1,6 @@
/* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
/*
- Copyright (c) 2001 by D-Link Corporation
+ Copyright (c) 2001,2002 by D-Link Corporation
Written by Edward Peng.<edward_peng@dlink.com.tw>
Created 03-May-2001, base on Linux' sundance.c.
@@ -27,24 +27,31 @@
Added tx_coalesce paramter.
1.07 2002/01/03 Fixed miscount of RX frame error.
1.08 2002/01/17 Fixed the multicast bug.
+ 1.09 2002/03/07 Move rx-poll-now to re-fill loop.
+ Added rio_timer() to watch rx buffers.
+ 1.10 2002/04/16 Fixed miscount of carrier error.
+ 1.11 2002/05/23 Added ISR schedule scheme.
+ Fixed miscount of rx frame error for DGE-550SX.
+ Fixed VLAN bug.
*/
#include "dl2k.h"
static char version[] __devinitdata =
- KERN_INFO "D-Link DL2000-based linux driver v1.08 2002/01/17\n";
+ KERN_INFO "D-Link DL2000-based linux driver v1.11 2002/05/23\n";
#define MAX_UNITS 8
static int mtu[MAX_UNITS];
static int vlan[MAX_UNITS];
static int jumbo[MAX_UNITS];
static char *media[MAX_UNITS];
-static int tx_flow[MAX_UNITS];
-static int rx_flow[MAX_UNITS];
+static int tx_flow=-1;
+static int rx_flow=-1;
static int copy_thresh;
-static int rx_coalesce = DEFAULT_RXC;
-static int rx_timeout = DEFAULT_RXT;
-static int tx_coalesce = DEFAULT_TXC;
+static int rx_coalesce=10; /* Rx frame count each interrupt */
+static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */
+static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */
+
MODULE_AUTHOR ("Edward Peng");
MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
@@ -53,16 +60,16 @@
MODULE_PARM (media, "1-" __MODULE_STRING (MAX_UNITS) "s");
MODULE_PARM (vlan, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (jumbo, "1-" __MODULE_STRING (MAX_UNITS) "i");
-MODULE_PARM (tx_flow, "1-" __MODULE_STRING (MAX_UNITS) "i");
-MODULE_PARM (rx_flow, "1-" __MODULE_STRING (MAX_UNITS) "i");
+MODULE_PARM (tx_flow, "i");
+MODULE_PARM (rx_flow, "i");
MODULE_PARM (copy_thresh, "i");
MODULE_PARM (rx_coalesce, "i"); /* Rx frame count each interrupt */
MODULE_PARM (rx_timeout, "i"); /* Rx DMA wait time in 64ns increments */
-MODULE_PARM (tx_coalesce, "i"); /* HW xmit count each TxComplete [1-8] */
+MODULE_PARM (tx_coalesce, "i"); /* HW xmit count each TxDMAComplete */
/* Enable the default interrupts */
-#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxComplete| \
+#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
UpdateStats | LinkEvent)
#define EnableInt() \
writew(DEFAULT_INTR, ioaddr + IntEnable)
@@ -71,22 +78,24 @@
static int multicast_filter_limit = 0x40;
static int rio_open (struct net_device *dev);
-static void tx_timeout (struct net_device *dev);
+static void rio_timer (unsigned long data);
+static void rio_tx_timeout (struct net_device *dev);
static void alloc_list (struct net_device *dev);
static int start_xmit (struct sk_buff *skb, struct net_device *dev);
static void rio_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
+static void rio_free_tx (struct net_device *dev, int irq);
static void tx_error (struct net_device *dev, int tx_status);
static int receive_packet (struct net_device *dev);
static void rio_error (struct net_device *dev, int int_status);
static int change_mtu (struct net_device *dev, int new_mtu);
static void set_multicast (struct net_device *dev);
static struct net_device_stats *get_stats (struct net_device *dev);
+static int clear_stats (struct net_device *dev);
static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
static int rio_close (struct net_device *dev);
static int find_miiphy (struct net_device *dev);
static int parse_eeprom (struct net_device *dev);
static int read_eeprom (long ioaddr, int eep_addr);
-static unsigned get_crc (unsigned char *p, int len);
static int mii_wait_link (struct net_device *dev, int wait);
static int mii_set_media (struct net_device *dev);
static int mii_get_media (struct net_device *dev);
@@ -95,9 +104,6 @@
static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
u16 data);
-#ifdef RIO_DEBUG
-static int rio_ioctl_ext (struct net_device *dev, struct ioctl_data *iodata);
-#endif
static int __devinit
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -106,7 +112,7 @@
struct netdev_private *np;
static int card_idx;
int chip_idx = ent->driver_data;
- int err, irq = pdev->irq;
+ int err, irq;
long ioaddr;
static int version_printed;
void *ring_space;
@@ -119,6 +125,7 @@
if (err)
return err;
+ irq = pdev->irq;
err = pci_request_regions (pdev, "dl2k");
if (err)
goto err_out_disable;
@@ -146,7 +153,8 @@
np = dev->priv;
np->chip_id = chip_idx;
np->pdev = pdev;
- spin_lock_init (&np->lock);
+ spin_lock_init (&np->tx_lock);
+ spin_lock_init (&np->rx_lock);
/* Parse manual configuration */
np->an_enable = 1;
@@ -195,17 +203,18 @@
}
np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
vlan[card_idx] : 0;
- if (rx_coalesce != 0 && rx_timeout != 0) {
+ if (rx_coalesce > 0 && rx_timeout > 0) {
np->rx_coalesce = rx_coalesce;
np->rx_timeout = rx_timeout;
np->coalesce = 1;
}
- np->tx_flow = (tx_flow[card_idx]) ? 1 : 0;
- np->rx_flow = (rx_flow[card_idx]) ? 1 : 0;
+ np->tx_flow = (tx_flow == 0) ? 0 : 1;
+ np->rx_flow = (rx_flow == 0) ? 0 : 1;
+
if (tx_coalesce < 1)
tx_coalesce = 1;
- if (tx_coalesce > 8)
- tx_coalesce = 8;
+ if (tx_coalesce > TX_RING_SIZE-1)
+ tx_coalesce = TX_RING_SIZE - 1;
}
dev->open = &rio_open;
dev->hard_start_xmit = &start_xmit;
@@ -213,7 +222,7 @@
dev->get_stats = &get_stats;
dev->set_multicast_list = &set_multicast;
dev->do_ioctl = &rio_ioctl;
- dev->tx_timeout = &tx_timeout;
+ dev->tx_timeout = &rio_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
dev->change_mtu = &change_mtu;
#if 0
@@ -243,6 +252,7 @@
/* Fiber device? */
np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
+ np->link_status = 0;
/* Set media and reset PHY */
if (np->phy_media) {
/* default 1000mbps_fd for fiber deivices */
@@ -261,6 +271,7 @@
np->an_enable = 1;
mii_set_media (dev);
}
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
/* Reset all logic functions */
writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
@@ -276,6 +287,15 @@
dev->name, np->name,
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq);
+ if (tx_coalesce > 1)
+ printk(KERN_INFO "tx_coalesce:\t%d packets\n",
+ tx_coalesce);
+ if (np->coalesce)
+ printk(KERN_INFO "rx_coalesce:\t%d packets\n"
+ KERN_INFO "rx_timeout: \t%d ns\n",
+ np->rx_coalesce, np->rx_timeout*640);
+ if (np->vlan)
+ printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
return 0;
err_out_unmap_rx:
@@ -341,7 +361,7 @@
}
/* Check CRC */
- crc = ~get_crc (sromdata, 256 - 4);
+ crc = ~ether_crc_le(256 - 4, sromdata);
if (psrom->crc != crc) {
printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name);
return -1;
@@ -422,10 +442,14 @@
ioaddr + RxDMAIntCtrl);
}
/* Set RIO to poll every N*320nsec. */
- writeb (0xff, ioaddr + RxDMAPollPeriod);
+ writeb (0x20, ioaddr + RxDMAPollPeriod);
writeb (0xff, ioaddr + TxDMAPollPeriod);
- netif_start_queue (dev);
- writel (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl);
+ writeb (0x30, ioaddr + RxDMABurstThresh);
+ writeb (0x30, ioaddr + RxDMAUrgentThresh);
+
+ /* clear statistics */
+ clear_stats (dev);
+
/* VLAN supported */
if (np->vlan) {
/* priority field in RxDMAIntCtrl */
@@ -444,42 +468,76 @@
/* Enable default interrupts */
EnableInt ();
- /* clear statistics */
- get_stats (dev);
+ init_timer (&np->timer);
+ np->timer.expires = jiffies + 1*HZ;
+ np->timer.data = (unsigned long) dev;
+ np->timer.function = &rio_timer;
+ add_timer (&np->timer);
+
+ /* Start Tx/Rx */
+ writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
+ ioaddr + MACCtrl);
+
+ netif_start_queue (dev);
return 0;
}
-static void
-tx_timeout (struct net_device *dev)
+static void
+rio_timer (unsigned long data)
{
+ struct net_device *dev = (struct net_device *)data;
struct netdev_private *np = dev->priv;
+ unsigned int entry;
+ int next_tick = 1*HZ;
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->rx_lock, flags);
+ /* Recover rx ring exhausted error */
+ if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
+ printk(KERN_INFO "Try to recover rx ring exhausted...\n");
+ /* Re-allocate skbuffs to fill the descriptor ring */
+ for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
+ struct sk_buff *skb;
+ entry = np->old_rx % RX_RING_SIZE;
+ /* Dropped packets don't need to re-allocate */
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = dev_alloc_skb (np->rx_buf_sz);
+ if (skb == NULL) {
+ np->rx_ring[entry].fraginfo = 0;
+ printk (KERN_INFO
+ "%s: Still unable to re-allocate Rx skbuff.#%d\n",
+ dev->name, entry);
+ break;
+ }
+ np->rx_skbuff[entry] = skb;
+ skb->dev = dev;
+ /* 16 byte align the IP header */
+ skb_reserve (skb, 2);
+ np->rx_ring[entry].fraginfo =
+ cpu_to_le64 (pci_map_single
+ (np->pdev, skb->tail, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
+ }
+ np->rx_ring[entry].fraginfo |=
+ cpu_to_le64 (np->rx_buf_sz) << 48;
+ np->rx_ring[entry].status = 0;
+ } /* end for */
+ } /* end if */
+ spin_unlock_irqrestore (&np->rx_lock, flags);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void
+rio_tx_timeout (struct net_device *dev)
+{
long ioaddr = dev->base_addr;
printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
dev->name, readl (ioaddr + TxStatus));
- /* Free used tx skbuffs */
- for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
- int entry = np->old_tx % TX_RING_SIZE;
- struct sk_buff *skb;
-
- if (!(np->tx_ring[entry].status & TFDDone))
- break;
- skb = np->tx_skbuff[entry];
- pci_unmap_single (np->pdev,
- np->tx_ring[entry].fraginfo,
- skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq (skb);
- np->tx_skbuff[entry] = 0;
- }
+ rio_free_tx(dev, 0);
dev->if_port = 0;
dev->trans_start = jiffies;
- np->stats.tx_errors++;
- /* If the ring is no longer full, clear tx_full and
- call netif_wake_queue() */
- if (np->tx_full && np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1) {
- np->tx_full = 0;
- netif_wake_queue (dev);
- }
}
/* allocate and initialize Tx and Rx descriptors */
@@ -489,7 +547,6 @@
struct netdev_private *np = dev->priv;
int i;
- np->tx_full = 0;
np->cur_rx = np->cur_tx = 0;
np->old_rx = np->old_tx = 0;
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
@@ -500,16 +557,14 @@
np->tx_ring[i].status = cpu_to_le64 (TFDDone);
np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
((i+1)%TX_RING_SIZE) *
- sizeof (struct
- netdev_desc));
+ sizeof (struct netdev_desc));
}
/* Initialize Rx descriptors */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
- sizeof (struct
- netdev_desc));
+ sizeof (struct netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
np->rx_skbuff[i] = 0;
@@ -530,8 +585,8 @@
skb_reserve (skb, 2); /* 16 byte align the IP header. */
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
- cpu_to_le64 (pci_map_single
- (np->pdev, skb->tail, np->rx_buf_sz,
+ cpu_to_le64 ( pci_map_single (
+ np->pdev, skb->tail, np->rx_buf_sz,
PCI_DMA_FROMDEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
}
@@ -550,18 +605,17 @@
struct netdev_desc *txdesc;
unsigned entry;
u32 ioaddr;
- int tx_shift;
- unsigned long flags;
+ u64 tfc_vlan_tag = 0;
+ if (np->link_status == 0) { /* Link Down */
+ dev_kfree_skb(skb);
+ return 0;
+ }
ioaddr = dev->base_addr;
entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb;
txdesc = &np->tx_ring[entry];
- /* Set TFDDone to avoid TxDMA gather this descriptor */
- txdesc->status = cpu_to_le64 (TFDDone);
- txdesc->status |=
- cpu_to_le64 (entry | WordAlignDisable | (1 << FragCountShift));
#if 0
if (skb->ip_summed == CHECKSUM_HW) {
txdesc->status |=
@@ -575,31 +629,33 @@
(cpu_to_le64 (np->vlan) << 32) |
(cpu_to_le64 (skb->priority) << 45);
}
-
- /* Send one packet each time at 10Mbps mode */
- /* Tx coalescing loop do not exceed 8 */
- if (entry % tx_coalesce == 0 || np->speed == 10)
- txdesc->status |= cpu_to_le64 (TxIndicate);
txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
skb->len,
PCI_DMA_TODEVICE));
txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48;
- /* Clear TFDDone, then TxDMA start to send this descriptor */
- txdesc->status &= ~cpu_to_le64 (TFDDone);
-
- DEBUG_TFD_DUMP (np);
+ /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
+ * Work around: Always use 1 descriptor in 10Mbps mode */
+ if (entry % tx_coalesce == 0 || np->speed == 10)
+ txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
+ WordAlignDisable |
+ TxDMAIndicate |
+ (1 << FragCountShift));
+ else
+ txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
+ WordAlignDisable |
+ (1 << FragCountShift));
/* TxDMAPollNow */
writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
- np->cur_tx++;
- if (np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1 && np->speed != 10) {
+ /* Schedule ISR */
+ writel(10000, ioaddr + CountDown);
+ np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
+ if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
+ < TX_QUEUE_LEN - 1 && np->speed != 10) {
/* do nothing */
- } else {
- spin_lock_irqsave(&np->lock, flags);
- np->tx_full = 1;
+ } else if (!netif_queue_stopped(dev)) {
netif_stop_queue (dev);
- spin_unlock_irqrestore (&np->lock, flags);
}
/* The first TFDListPtr */
@@ -609,14 +665,6 @@
writel (0, dev->base_addr + TFDListPtr1);
}
- if (np->old_tx > TX_RING_SIZE) {
- spin_lock_irqsave (&np->lock, flags);
- tx_shift = TX_RING_SIZE;
- np->old_tx -= tx_shift;
- np->cur_tx -= tx_shift;
- spin_unlock_irqrestore (&np->lock, flags);
- }
-
/* NETDEV WATCHDOG timer */
dev->trans_start = jiffies;
return 0;
@@ -633,61 +681,79 @@
ioaddr = dev->base_addr;
np = dev->priv;
- spin_lock(&np->lock);
while (1) {
int_status = readw (ioaddr + IntStatus);
writew (int_status, ioaddr + IntStatus);
int_status &= DEFAULT_INTR;
- if (int_status == 0)
+ if (int_status == 0 || --cnt < 0)
break;
/* Processing received packets */
if (int_status & RxDMAComplete)
receive_packet (dev);
- /* TxComplete interrupt */
- if ((int_status & TxComplete) || np->tx_full) {
+ /* TxDMAComplete interrupt */
+ if ((int_status & (TxDMAComplete|IntRequested))) {
int tx_status;
tx_status = readl (ioaddr + TxStatus);
if (tx_status & 0x01)
tx_error (dev, tx_status);
/* Free used tx skbuffs */
- for (;np->cur_tx - np->old_tx > 0; np->old_tx++) {
- int entry = np->old_tx % TX_RING_SIZE;
- struct sk_buff *skb;
-
- if (!(np->tx_ring[entry].status & TFDDone))
- break;
- skb = np->tx_skbuff[entry];
- pci_unmap_single (np->pdev,
- np->tx_ring[entry].fraginfo,
- skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq (skb);
- np->tx_skbuff[entry] = 0;
- }
- }
- /* If the ring is no longer full, clear tx_full and
- call netif_wake_queue() */
- if (np->tx_full && np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1) {
- if (np->speed != 10 || int_status & TxComplete) {
- np->tx_full = 0;
- netif_wake_queue (dev);
- }
+ rio_free_tx (dev, 1);
}
/* Handle uncommon events */
if (int_status &
- (IntRequested | HostError | LinkEvent | UpdateStats))
+ (HostError | LinkEvent | UpdateStats))
rio_error (dev, int_status);
- /* If too much interrupts here, disable all interrupts except
- IntRequest. When CountDown down to 0, IntRequest will
- be caught by rio_error() to recovery the interrupts */
- if (--cnt < 0) {
- get_stats (dev);
- writel (1, ioaddr + CountDown);
- writew (IntRequested, ioaddr + IntEnable);
+ }
+ if (np->cur_tx != np->old_tx)
+ writel (100, ioaddr + CountDown);
+}
+
+static void
+rio_free_tx (struct net_device *dev, int irq)
+{
+ struct netdev_private *np = (struct netdev_private *) dev->priv;
+ int entry = np->old_tx % TX_RING_SIZE;
+ int tx_use = 0;
+ long flag = 0;
+
+ if (irq)
+ spin_lock_irqsave(&np->tx_lock, flag);
+ else
+ spin_lock(&np->tx_lock);
+ /* Free used tx skbuffs */
+ while (entry != np->cur_tx) {
+ struct sk_buff *skb;
+
+ if (!(np->tx_ring[entry].status & TFDDone))
break;
- }
+ skb = np->tx_skbuff[entry];
+ pci_unmap_single (np->pdev,
+ np->tx_ring[entry].fraginfo,
+ skb->len, PCI_DMA_TODEVICE);
+ if (irq)
+ dev_kfree_skb_irq (skb);
+ else
+ dev_kfree_skb (skb);
+
+ np->tx_skbuff[entry] = 0;
+ entry = (entry + 1) % TX_RING_SIZE;
+ tx_use++;
+ }
+ if (irq)
+ spin_unlock_irqrestore(&np->tx_lock, flag);
+ else
+ spin_unlock(&np->tx_lock);
+ np->old_tx = entry;
+
+ /* If the ring is no longer full, clear tx_full and
+ call netif_wake_queue() */
+
+ if (netif_queue_stopped(dev) &&
+ ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
+ < TX_QUEUE_LEN - 1 || np->speed == 10)) {
+ netif_wake_queue (dev);
}
- spin_unlock(&np->lock);
}
static void
@@ -700,11 +766,10 @@
np = dev->priv;
- frame_id = (tx_status & 0xffff0000) >> 16;
+ frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
np->stats.tx_errors++;
- np->stats.tx_dropped++;
/* Ttransmit Underrun */
if (tx_status & 0x10) {
np->stats.tx_fifo_errors++;
@@ -719,20 +784,7 @@
break;
mdelay (1);
}
- /* Free completed descriptors */
- for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
- int entry = np->old_tx % TX_RING_SIZE;
- struct sk_buff *skb;
- if (!(np->tx_ring[entry].status & TFDDone))
- break;
-
- skb = np->tx_skbuff[entry];
- pci_unmap_single (np->pdev, np->tx_ring[entry].fraginfo,
- skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq (skb);
- np->tx_skbuff[entry] = 0;
- }
-
+ rio_free_tx (dev, 1);
/* Reset TFDListPtr */
writel (np->tx_ring_dma +
np->old_tx * sizeof (struct netdev_desc),
@@ -755,14 +807,13 @@
/* Let TxStartThresh stay default value */
}
/* Maximum Collisions */
-#ifdef ETHER_STATS
- if (tx_status & 0x08)
+#ifdef ETHER_STATS
+ if (tx_status & 0x08)
np->stats.collisions16++;
#else
- if (tx_status & 0x08)
+ if (tx_status & 0x08)
np->stats.collisions++;
#endif
-
/* Restart the Tx */
writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
}
@@ -772,14 +823,8 @@
{
struct netdev_private *np = (struct netdev_private *) dev->priv;
int entry = np->cur_rx % RX_RING_SIZE;
- int cnt = np->old_rx + RX_RING_SIZE - np->cur_rx;
- int rx_shift;
- if (np->old_rx > RX_RING_SIZE) {
- rx_shift = RX_RING_SIZE;
- np->old_rx -= rx_shift;
- np->cur_rx -= rx_shift;
- }
- DEBUG_RFD_DUMP (np, 1);
+ int cnt = 30;
+
/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
while (1) {
struct netdev_desc *desc = &np->rx_ring[entry];
@@ -795,20 +840,19 @@
frame_status = le64_to_cpu (desc->status);
if (--cnt < 0)
break;
- DEBUG_PKT_DUMP (np, pkt_len);
pci_dma_sync_single (np->pdev, desc->fraginfo, np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
/* Update rx error statistics, drop packet. */
- if (frame_status & 0x003f0000) {
+ if (frame_status & RFS_Errors) {
np->stats.rx_errors++;
- if (frame_status & 0x00300000)
+ if (frame_status & (RxRuntFrame | RxLengthError))
np->stats.rx_length_errors++;
- if (frame_status & 0x00010000)
- np->stats.rx_fifo_errors++;
- if (frame_status & 0x00060000)
- np->stats.rx_frame_errors++;
- if (frame_status & 0x00080000)
+ if (frame_status & RxFCSError)
np->stats.rx_crc_errors++;
+ if (frame_status & RxAlignmentError && np->speed != 1000)
+ np->stats.rx_frame_errors++;
+ if (frame_status & RxFIFOOverrun)
+ np->stats.rx_fifo_errors++;
} else {
struct sk_buff *skb;
@@ -829,30 +873,33 @@
skb_put (skb, pkt_len);
}
skb->protocol = eth_type_trans (skb, dev);
-#if 0
+#if 0
/* Checksum done by hw, but csum value unavailable. */
- if (!(frame_status & (TCPError | UDPError | IPError))) {
+ if (np->pci_rev_id >= 0x0c &&
+ !(frame_status & (TCPError | UDPError | IPError))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
+ }
#endif
netif_rx (skb);
dev->last_rx = jiffies;
}
- entry = (++np->cur_rx) % RX_RING_SIZE;
-
+ entry = (entry + 1) % RX_RING_SIZE;
}
+ spin_lock(&np->rx_lock);
+ np->cur_rx = entry;
/* Re-allocate skbuffs to fill the descriptor ring */
- for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
+ entry = np->old_rx;
+ while (entry != np->cur_rx) {
struct sk_buff *skb;
- entry = np->old_rx % RX_RING_SIZE;
/* Dropped packets don't need to re-allocate */
if (np->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb (np->rx_buf_sz);
if (skb == NULL) {
np->rx_ring[entry].fraginfo = 0;
- printk (KERN_ERR
- "%s: Allocate Rx buffer error!",
- dev->name);
+ printk (KERN_INFO
+ "%s: receive_packet: "
+ "Unable to re-allocate Rx skbuff.#%d\n",
+ dev->name, entry);
break;
}
np->rx_skbuff[entry] = skb;
@@ -867,13 +914,10 @@
np->rx_ring[entry].fraginfo |=
cpu_to_le64 (np->rx_buf_sz) << 48;
np->rx_ring[entry].status = 0;
+ entry = (entry + 1) % RX_RING_SIZE;
}
-
- /* RxDMAPollNow */
- writel (readl (dev->base_addr + DMACtrl) | 0x00000010,
- dev->base_addr + DMACtrl);
-
- DEBUG_RFD_DUMP (np, 2);
+ np->old_rx = entry;
+ spin_unlock(&np->rx_lock);
return 0;
}
@@ -884,14 +928,6 @@
struct netdev_private *np = dev->priv;
u16 macctrl;
- /* Stop the down counter and recovery the interrupt */
- if (int_status & IntRequested) {
- writew (0, ioaddr + IntEnable);
- writel (0, ioaddr + CountDown);
- /* Enable default interrupts */
- EnableInt ();
- }
-
/* Link change event */
if (int_status & LinkEvent) {
if (mii_wait_link (dev, 10) == 0) {
@@ -901,14 +937,19 @@
else
mii_get_media (dev);
macctrl = 0;
+ macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
macctrl |= (np->full_duplex) ? DuplexSelect : 0;
macctrl |= (np->tx_flow) ?
TxFlowControlEnable : 0;
macctrl |= (np->rx_flow) ?
RxFlowControlEnable : 0;
writew(macctrl, ioaddr + MACCtrl);
+ np->link_status = 1;
+ netif_carrier_on(dev);
} else {
printk (KERN_INFO "%s: Link off\n", dev->name);
+ np->link_status = 0;
+ netif_carrier_off(dev);
}
}
@@ -932,41 +973,100 @@
{
long ioaddr = dev->base_addr;
struct netdev_private *np = dev->priv;
- u16 temp1;
- u16 temp2;
int i;
+ unsigned int stat_reg;
+
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
+
np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
- temp1 = readw (ioaddr + FrameLostRxError);
- np->stats.rx_errors += temp1;
- np->stats.rx_missed_errors += temp1;
- np->stats.tx_dropped += readw (ioaddr + FramesAbortXSColls);
- temp1 = readl (ioaddr + SingleColFrames) +
- readl (ioaddr + MultiColFrames) + readl (ioaddr + LateCollisions);
- temp2 = readw (ioaddr + CarrierSenseErrors);
- np->stats.tx_carrier_errors += temp2;
- np->stats.tx_errors += readw (ioaddr + FramesWEXDeferal) +
- readl (ioaddr + FramesWDeferredXmt) + temp2;
-
- /* detailed rx_error */
- np->stats.rx_length_errors += readw (ioaddr + FrameTooLongErrors);
- np->stats.rx_crc_errors += readw (ioaddr + FrameCheckSeqError);
+
+ np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
+ np->stats.collisions += readl (ioaddr + SingleColFrames)
+ + readl (ioaddr + MultiColFrames);
+
+ /* detailed tx errors */
+ stat_reg = readw (ioaddr + FramesAbortXSColls);
+ np->stats.tx_aborted_errors += stat_reg;
+ np->stats.tx_errors += stat_reg;
+
+ stat_reg = readw (ioaddr + CarrierSenseErrors);
+ np->stats.tx_carrier_errors += stat_reg;
+ np->stats.tx_errors += stat_reg;
/* Clear all other statistic register. */
- readw (ioaddr + InRangeLengthErrors);
- readw (ioaddr + MacControlFramesXmtd);
+ readl (ioaddr + McstOctetXmtOk);
readw (ioaddr + BcstFramesXmtdOk);
readl (ioaddr + McstFramesXmtdOk);
+ readw (ioaddr + BcstFramesRcvdOk);
+ readw (ioaddr + MacControlFramesRcvd);
+ readw (ioaddr + FrameTooLongErrors);
+ readw (ioaddr + InRangeLengthErrors);
+ readw (ioaddr + FramesCheckSeqErrors);
+ readw (ioaddr + FramesLostRxErrors);
+ readl (ioaddr + McstOctetXmtOk);
readl (ioaddr + BcstOctetXmtOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readl (ioaddr + FramesWDeferredXmt);
+ readl (ioaddr + LateCollisions);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readw (ioaddr + MacControlFramesXmtd);
+ readw (ioaddr + FramesWEXDeferal);
+
+
+ for (i = 0x100; i <= 0x150; i += 4)
+ readl (ioaddr + i);
+ readw (ioaddr + TxJumboFrames);
+ readw (ioaddr + RxJumboFrames);
+ readw (ioaddr + TCPCheckSumErrors);
+ readw (ioaddr + UDPCheckSumErrors);
+ readw (ioaddr + IPCheckSumErrors);
+ return &np->stats;
+}
+
+static int
+clear_stats (struct net_device *dev)
+{
+ long ioaddr = dev->base_addr;
+ int i;
+
+ /* All statistics registers need to be acknowledged,
+ else statistic overflow could cause problems */
+ readl (ioaddr + FramesRcvOk);
+ readl (ioaddr + FramesXmtOk);
+ readl (ioaddr + OctetRcvOk);
+ readl (ioaddr + OctetXmtOk);
+
+ readl (ioaddr + McstFramesRcvdOk);
+ readl (ioaddr + SingleColFrames);
+ readl (ioaddr + MultiColFrames);
+ readl (ioaddr + LateCollisions);
+ /* detailed rx errors */
+ readw (ioaddr + FrameTooLongErrors);
+ readw (ioaddr + InRangeLengthErrors);
+ readw (ioaddr + FramesCheckSeqErrors);
+ readw (ioaddr + FramesLostRxErrors);
+
+ /* detailed tx errors */
+ readw (ioaddr + FramesAbortXSColls);
+ readw (ioaddr + CarrierSenseErrors);
+
+ /* Clear all other statistic register. */
readl (ioaddr + McstOctetXmtOk);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readw (ioaddr + BcstFramesRcvdOk);
readw (ioaddr + MacControlFramesRcvd);
- readw (ioaddr + BcstFramesRcvOk);
- readl (ioaddr + McstFramesRcvOk);
- readl (ioaddr + BcstOctetRcvOk);
+ readl (ioaddr + McstOctetXmtOk);
+ readl (ioaddr + BcstOctetXmtOk);
+ readl (ioaddr + McstFramesXmtdOk);
+ readl (ioaddr + FramesWDeferredXmt);
+ readw (ioaddr + BcstFramesXmtdOk);
+ readw (ioaddr + MacControlFramesXmtd);
+ readw (ioaddr + FramesWEXDeferal);
for (i = 0x100; i <= 0x150; i += 4)
readl (ioaddr + i);
@@ -975,9 +1075,10 @@
readw (ioaddr + TCPCheckSumErrors);
readw (ioaddr + UDPCheckSumErrors);
readw (ioaddr + IPCheckSumErrors);
- return &np->stats;
+ return 0;
}
+
int
change_mtu (struct net_device *dev, int new_mtu)
{
@@ -993,23 +1094,6 @@
return 0;
}
-#define CRC_POLY 0xedb88320
-static unsigned
-get_crc (unsigned char *p, int len)
-{
- int bit;
- unsigned char byte;
- unsigned crc = 0xffffffff;
-
- while (--len >= 0) {
- byte = *p++;
- for (bit = 0; bit < 8; bit++, byte >>= 1) {
- crc = (crc >> 1) ^ (((crc ^ byte) & 1) ? CRC_POLY : 0);
- }
- }
- return crc;
-}
-
static void
set_multicast (struct net_device *dev)
{
@@ -1024,7 +1108,7 @@
hash_table[0] = hash_table[1] = 0;
/* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
- hash_table[1] |= 0x02000000;
+ hash_table[1] |= cpu_to_le32(0x02000000);
if (dev->flags & IFF_PROMISC) {
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
@@ -1038,11 +1122,16 @@
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist=mclist->next) {
- crc = get_crc (mclist->dmi_addr, ETH_ALEN);
- for (index=0, bit=0; bit<6; bit++, crc<<=1) {
- if (crc & 0x80000000) index |= 1 << bit;
- }
+ i++, mclist=mclist->next) {
+
+ crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
+
+ /* The inverted high significant 6 bits of CRC are
+ used as an index to hashtable */
+ for (index = 0, bit = 0; bit < 6; bit++)
+ if (test_bit(31 - bit, &crc))
+ set_bit(bit, &index);
+
hash_table[index / 32] |= (1 << (index % 32));
}
} else {
@@ -1064,58 +1153,44 @@
int phy_addr;
struct netdev_private *np = dev->priv;
struct mii_data *miidata = (struct mii_data *) &rq->ifr_data;
-#ifdef RIO_DEBUG
- struct ioctl_data *iodata = (struct ioctl_data *) (rq->ifr_data);
-#endif
- u16 *data = (u16 *) & rq->ifr_data;
+
struct netdev_desc *desc;
int i;
phy_addr = np->phy_addr;
switch (cmd) {
case SIOCDEVPRIVATE:
-#ifdef RIO_DEBUG
- if (rio_ioctl_ext (dev, iodata) != 0)
- return -EOPNOTSUPP;
break;
-#else
- return -EOPNOTSUPP;
-#endif
+
case SIOCDEVPRIVATE + 1:
miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
break;
case SIOCDEVPRIVATE + 2:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
break;
case SIOCDEVPRIVATE + 3:
- np->rx_debug = (data[0] <= 7) ? data[0] : 0;
- printk ("rx_debug = %d\n", np->rx_debug);
break;
case SIOCDEVPRIVATE + 4:
- np->tx_debug = (data[0] <= 7) ? data[0] : 0;
- printk ("tx_debug = %d\n", np->tx_debug);
break;
case SIOCDEVPRIVATE + 5:
- np->tx_full = 1;
netif_stop_queue (dev);
break;
case SIOCDEVPRIVATE + 6:
- np->tx_full = 0;
netif_wake_queue (dev);
break;
case SIOCDEVPRIVATE + 7:
printk
("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
- np->tx_full, np->cur_tx, np->old_tx, np->cur_rx,
+ netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
np->old_rx);
break;
case SIOCDEVPRIVATE + 8:
+ printk("TX ring:\n");
for (i = 0; i < TX_RING_SIZE; i++) {
desc = &np->tx_ring[i];
printk
- ("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
+ ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
+ i,
(u32) (np->tx_ring_dma + i * sizeof (*desc)),
(u32) desc->next_desc,
(u32) desc->status, (u32) (desc->fraginfo >> 32),
@@ -1124,106 +1199,13 @@
}
printk ("\n");
break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-#ifdef RIO_DEBUG
-int
-rio_ioctl_ext (struct net_device *dev, struct ioctl_data *iodata)
-{
- struct netdev_private *np = dev->priv;
- int phy_addr = np->phy_addr;
- u32 hi, lo;
- int i;
- BMCR_t bmcr;
- BMSR_t bmsr;
-
- if (iodata == NULL)
- goto invalid_cmd;
- if (strcmp (iodata->signature, "rio") != 0)
- goto invalid_cmd;
-
- switch (iodata->cmd) {
- case 0:
- for (i = 0; i < TX_RING_SIZE; i++) {
- hi = np->tx_ring[i].status >> 32;
- lo = np->tx_ring[i].status;
- printk ("TFC=%08x %08x \n", hi, lo);
-
- }
- break;
- case 1:
- for (i = 0; i < RX_RING_SIZE; i++) {
- hi = np->rx_ring[i].status >> 32;
- lo = np->rx_ring[i].status;
- printk ("RFS=%08x %08x \n", hi, lo);
- }
- break;
- case 2:
- break;
- case 3:
- if (iodata->data != NULL)
- np->tx_debug = iodata->data[0];
- break;
- case 4:
- /* Soft reset PHY */
- mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
- bmcr.image = 0;
- bmcr.bits.an_enable = 1;
- bmcr.bits.reset = 1;
- mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
- break;
- case 5:
- mii_write (dev, phy_addr, MII_BMCR, 0x1940);
- mdelay (10);
- mii_write (dev, phy_addr, MII_BMCR, 0x1940);
- mdelay (100); /* wait a certain time */
- break;
- case 6:
- /* 5) Set media and Power Up */
- bmcr.image = 0;
- bmcr.bits.power_down = 1;
- if (np->an_enable) {
- bmcr.bits.an_enable = 1;
- } else {
- if (np->speed == 100) {
- bmcr.bits.speed100 = 1;
- bmcr.bits.speed1000 = 0;
- printk ("Manual 100 Mbps, ");
- } else if (np->speed == 10) {
- bmcr.bits.speed100 = 0;
- bmcr.bits.speed1000 = 0;
- printk ("Manual 10 Mbps, ");
- }
- if (np->full_duplex) {
- bmcr.bits.duplex_mode = 1;
- printk ("Full duplex. \n");
- } else {
- bmcr.bits.duplex_mode = 0;
- printk ("Half duplex.\n");
- }
- }
- mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
- break;
- case 7:
- bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
- bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
- printk ("BMCR=%x BMSR=%x LinkUp=%d\n",
- bmcr.image, bmsr.image, bmsr.bits.link_status);
- break;
default:
return -EOPNOTSUPP;
}
return 0;
-
- invalid_cmd:
- return -1;
}
-#endif
+
#define EEP_READ 0x0200
#define EEP_BUSY 0x8000
/* Read the EEPROM word */
@@ -1393,10 +1375,11 @@
printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
}
if (negotiate.bits.pause) {
- np->tx_flow = 1;
- np->rx_flow = 1;
+ np->tx_flow &= 1;
+ np->rx_flow &= 1;
} else if (negotiate.bits.asymmetric) {
- np->rx_flow = 1;
+ np->tx_flow = 0;
+ np->rx_flow &= 1;
}
/* else tx_flow, rx_flow = user select */
} else {
@@ -1545,10 +1528,11 @@
np->full_duplex = 0;
}
if (negotiate.bits.pause) {
- np->tx_flow = 1;
- np->rx_flow = 1;
+ np->tx_flow &= 1;
+ np->rx_flow &= 1;
} else if (negotiate.bits.asymmetric) {
- np->rx_flow = 1;
+ np->tx_flow = 0;
+ np->rx_flow &= 1;
}
/* else tx_flow, rx_flow = user select */
} else {
@@ -1647,7 +1631,8 @@
writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
synchronize_irq ();
free_irq (dev->irq, dev);
-
+ del_timer_sync (&np->timer);
+
/* Free all the skbuffs in the queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].status = 0;
@@ -1697,10 +1682,10 @@
}
static struct pci_driver rio_driver = {
- name:"dl2k",
- id_table:rio_pci_tbl,
- probe:rio_probe1,
- remove:__devexit_p(rio_remove1),
+ name: "dl2k",
+ id_table: rio_pci_tbl,
+ probe: rio_probe1,
+ remove: __devexit_p(rio_remove1),
};
static int __init
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)