patch-2.4.2 linux/drivers/net/natsemi.c
Next file: linux/drivers/net/ne.c
Previous file: linux/drivers/net/myri_sbus.c
Back to the patch index
Back to the overall index
- Lines: 137
- Date:
Tue Feb 13 13:15:04 2001
- Orig file:
v2.4.1/linux/drivers/net/natsemi.c
- Orig date:
Mon Dec 11 13:38:29 2000
diff -u --recursive --new-file v2.4.1/linux/drivers/net/natsemi.c linux/drivers/net/natsemi.c
@@ -26,6 +26,11 @@
- Bug fixes and better intr performance (Tjeerd)
Version 1.0.2:
- Now reads correct MAC address from eeprom
+ Version 1.0.3:
+ - Eliminate redundant priv->tx_full flag
+ - Call netif_start_queue from dev->tx_timeout
+ - wmb() in start_tx() to flush data
+ - Update Tx locking
*/
@@ -35,7 +40,7 @@
static const char version2[] =
" http://www.scyld.com/network/natsemi.html\n";
static const char version3[] =
-" (unofficial 2.4.x kernel port, version 1.0.2, October 6, 2000 Jeff Garzik, Tjeerd Mulder)\n";
+" (unofficial 2.4.x kernel port, version 1.0.3, January 21, 2001 Jeff Garzik, Tjeerd Mulder)\n";
/* Updated to recommendations in pci-skeleton v2.03. */
/* Automatically extracted configuration info:
@@ -105,7 +110,7 @@
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/malloc.h>
+#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -187,13 +192,14 @@
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
-queue slot is empty, it clears the tbusy flag when finished otherwise it sets
-the 'lp->tx_full' flag.
+queue slot is empty, it clears the tbusy flag when finished. Under 2.4, the
+"tbusy flag" is now controlled by netif_{start,stop,wake}_queue() and tested
+by netif_queue_stopped().
The interrupt handler has exclusive control over the Rx ring and records stats
from the Tx ring. After reaping the stats, it marks the Tx queue entry as
-empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
-clears both the tx_full and tbusy flags.
+empty by incrementing the dirty_tx mark. Iff Tx queueing is stopped and Tx
+entries were reaped, the Tx queue is started and scheduled.
IV. Notes
@@ -319,7 +325,6 @@
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
- unsigned int tx_full:1; /* The Tx queue is full. */
/* These values are keep track of the transceiver/media in use. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
unsigned int duplex_lock:1;
@@ -697,7 +702,7 @@
dev->trans_start = jiffies;
np->stats.tx_errors++;
- return;
+ netif_start_queue(dev);
}
@@ -707,7 +712,6 @@
struct netdev_private *np = (struct netdev_private *)dev->priv;
int i;
- np->tx_full = 0;
np->cur_rx = np->cur_tx = 0;
np->dirty_rx = np->dirty_tx = 0;
@@ -763,11 +767,13 @@
np->cur_tx++;
/* StrongARM: Explicitly cache flush np->tx_ring and skb->data,skb->len. */
+ wmb();
- if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
- np->tx_full = 1;
+ spin_lock_irq(&np->lock);
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
netif_stop_queue(dev);
- }
+ spin_unlock_irq(&np->lock);
+
/* Wake the potentially-idle transmit channel. */
writel(TxOn, dev->base_addr + ChipCmd);
@@ -798,9 +804,7 @@
#endif
ioaddr = dev->base_addr;
- np = (struct netdev_private *)dev->priv;
-
- spin_lock(&np->lock);
+ np = dev->priv;
do {
u32 intr_status = readl(ioaddr + IntrStatus);
@@ -818,6 +822,8 @@
if (intr_status & (IntrRxDone | IntrRxErr | IntrRxIdle | IntrRxOverrun))
netdev_rx(dev);
+ spin_lock(&np->lock);
+
for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
int entry = np->dirty_tx % TX_RING_SIZE;
if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
@@ -839,13 +845,14 @@
dev_kfree_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = 0;
}
- if (np->tx_full
+ if (netif_queue_stopped(dev)
&& np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
/* The ring is no longer full, wake queue. */
- np->tx_full = 0;
netif_wake_queue(dev);
}
+ spin_unlock(&np->lock);
+
/* Abnormal error summary/uncommon events handlers. */
if (intr_status & IntrAbnormalSummary)
netdev_error(dev, intr_status);
@@ -873,8 +880,6 @@
}
}
#endif
-
- spin_unlock(&np->lock);
}
/* This routine is logically part of the interrupt handler, but separated
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)