patch-2.3.43 linux/drivers/net/eepro100.c
Next file: linux/drivers/net/epic100.c
Previous file: linux/drivers/net/eepro.c
Back to the patch index
Back to the overall index
- Lines: 731
- Date:
Thu Feb 10 12:22:03 2000
- Orig file:
v2.3.42/linux/drivers/net/eepro100.c
- Orig date:
Thu Nov 11 20:11:40 1999
diff -u --recursive --new-file v2.3.42/linux/drivers/net/eepro100.c linux/drivers/net/eepro100.c
@@ -49,9 +49,7 @@
e.g. "options=16" for FD, "options=32" for 100mbps-only. */
static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
-#ifdef MODULE
static int debug = -1; /* The debug level */
-#endif
/* A few values that may be tweaked. */
/* The ring sizes should be a power of two for efficiency. */
@@ -81,11 +79,11 @@
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/malloc.h>
#include <linux/interrupt.h>
+#include <linux/timer.h>
#ifdef HAS_PCI_NETIF
#include "pci-netif.h"
#else
@@ -95,6 +93,7 @@
#endif
#endif
#include <linux/spinlock.h>
+#include <linux/init.h>
#include <asm/bitops.h>
#include <asm/io.h>
@@ -103,7 +102,6 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
-#if LINUX_VERSION_CODE > 0x20118 && defined(MODULE)
MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
MODULE_DESCRIPTION("Intel i82557/i82558 PCI EtherExpressPro driver");
MODULE_PARM(debug, "i");
@@ -117,12 +115,8 @@
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(multicast_filter_limit, "i");
-#endif
#define RUN_AT(x) (jiffies + (x))
-/* Condensed bus+endian portability operations. */
-#define virt_to_le32bus(addr) cpu_to_le32(virt_to_bus(addr))
-#define le32bus_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
#if (LINUX_VERSION_CODE < 0x20123)
#define test_and_set_bit(val, addr) set_bit(val, addr)
@@ -356,12 +350,15 @@
CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
};
/* Do atomically if possible. */
-#if defined(__i386__) || defined(__alpha__)
-#define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status)
+#if defined(__i386__) || defined(__alpha__) || defined(__ia64__)
+#define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status)
#elif defined(__powerpc__)
#define clear_suspend(cmd) clear_bit(6, &(cmd)->cmd_status)
#else
-#define clear_suspend(cmd) (cmd)->cmd_status &= cpu_to_le32(~CmdSuspend)
+#if 0
+# error You are probably in trouble: clear_suspend() MUST be atomic.
+#endif
+# define clear_suspend(cmd) (cmd)->cmd_status &= cpu_to_le32(~CmdSuspend)
#endif
enum SCBCmdBits {
@@ -439,11 +436,13 @@
/* Do not change the position (alignment) of the first few elements!
The later elements are grouped for cache locality. */
struct speedo_private {
- struct TxFD tx_ring[TX_RING_SIZE]; /* Commands (usually CmdTxPacket). */
+ struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
/* The addresses of a Tx/Rx-in-place packets/buffers. */
struct sk_buff* tx_skbuff[TX_RING_SIZE];
struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ dma_addr_t rx_ring_dma[RX_RING_SIZE];
+ dma_addr_t tx_ring_dma;
struct descriptor *last_cmd; /* Last command sent. */
unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
spinlock_t lock; /* Group with Tx control cache line. */
@@ -455,14 +454,15 @@
struct net_device *next_module;
void *priv_addr; /* Unaligned address for kfree */
struct enet_statistics stats;
- struct speedo_stats lstats;
+ struct speedo_stats *lstats;
int chip_id;
unsigned char pci_bus, pci_devfn, acpi_pwr;
+ struct pci_dev *pdev;
struct timer_list timer; /* Media selection timer. */
int mc_setup_frm_len; /* The length of an allocated.. */
struct descriptor *mc_setup_frm; /* ..multicast setup frame. */
int mc_setup_busy; /* Avoid double-use of setup frame. */
- int in_interrupt; /* Word-aligned dev->interrupt */
+ dma_addr_t mc_setup_dma;
char rx_mode; /* Current PROMISC/ALLMULTI setting. */
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
@@ -474,7 +474,6 @@
unsigned short phy[2]; /* PHY media interfaces available. */
unsigned short advertising; /* Current PHY advertised caps. */
unsigned short partner; /* Link partner caps. */
- long last_reset;
};
/* The parameters for a CmdConfigure operation.
@@ -541,7 +540,6 @@
for (; pci_index < 8; pci_index++) {
unsigned char pci_bus, pci_device_fn, pci_latency;
- u32 pciaddr;
long ioaddr;
int irq;
@@ -556,14 +554,15 @@
{
struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
#ifdef USE_IO
- pciaddr = pdev->resource[1].start;
+ ioaddr = pdev->resource[1].start;
#else
- pciaddr = pdev->resource[0].start;
+ ioaddr = pdev->resource[0].start;
#endif
irq = pdev->irq;
}
#else
{
+ u32 pciaddr;
u8 pci_irq_line;
pcibios_read_config_byte(pci_bus, pci_device_fn,
PCI_INTERRUPT_LINE, &pci_irq_line);
@@ -571,23 +570,30 @@
#ifdef USE_IO
pcibios_read_config_dword(pci_bus, pci_device_fn,
PCI_BASE_ADDRESS_1, &pciaddr);
+ pciaddr &= ~3UL;
#else
pcibios_read_config_dword(pci_bus, pci_device_fn,
PCI_BASE_ADDRESS_0, &pciaddr);
#endif
+ ioaddr = pciaddr;
irq = pci_irq_line;
}
#endif
/* Remove I/O space marker in bit 0. */
- if (pciaddr & 1) {
- ioaddr = pciaddr & ~3UL;
- if (check_region(ioaddr, 32))
- continue;
- } else if ((ioaddr = (long)ioremap(pciaddr & ~0xfUL, 0x1000)) == 0) {
- printk(KERN_INFO "Failed to map PCI address %#x.\n",
- pciaddr);
+#ifdef USE_IO
+ if (check_region(ioaddr, 32))
continue;
+#else
+ {
+ unsigned long orig_ioaddr = ioaddr;
+
+ if ((ioaddr = (long)ioremap(ioaddr & ~0xfUL, 0x1000)) == 0) {
+ printk(KERN_INFO "Failed to map PCI address %#lx.\n",
+ orig_ioaddr);
+ continue;
+ }
}
+#endif
if (speedo_debug > 2)
printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
ioaddr, irq);
@@ -626,17 +632,34 @@
{
struct net_device *dev;
struct speedo_private *sp;
+ struct pci_dev *pdev;
+ unsigned char *tx_ring;
+ dma_addr_t tx_ring_dma;
const char *product;
int i, option;
u16 eeprom[0x100];
int acpi_idle_state = 0;
-#ifndef MODULE
+
static int did_version = 0; /* Already printed version info. */
if (speedo_debug > 0 && did_version++ == 0)
printk(version);
-#endif
+
+ pdev = pci_find_slot(pci_bus, pci_devfn);
+
+ tx_ring = pci_alloc_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
+ + sizeof(struct speedo_stats), &tx_ring_dma);
+ if (!tx_ring) {
+ printk(KERN_ERR "Could not allocate DMA memory.\n");
+ return NULL;
+ }
dev = init_etherdev(NULL, sizeof(struct speedo_private));
+ if (dev == NULL) {
+ pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
+ + sizeof(struct speedo_stats),
+ tx_ring, tx_ring_dma);
+ return NULL;
+ }
if (dev->mem_start > 0)
option = dev->mem_start;
@@ -708,7 +731,7 @@
{
const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
/* The self-test results must be paragraph aligned. */
- s32 str[6], *volatile self_test_results;
+ volatile s32 *self_test_results = (volatile s32 *)tx_ring;
int boguscnt = 16000; /* Timeout for set-test. */
if (eeprom[3] & 0x03)
printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
@@ -742,11 +765,10 @@
((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
}
- /* Perform a system self-test. */
- self_test_results = (s32*) ((((long) str) + 15) & ~0xf);
+ /* Perform a system self-test. Use the tx_ring consistent DMA mapping for it. */
self_test_results[0] = 0;
self_test_results[1] = -1;
- outl(virt_to_bus(self_test_results) | PortSelfTest, ioaddr + SCBPort);
+ outl(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
do {
udelay(10);
} while (self_test_results[1] == -1 && --boguscnt >= 0);
@@ -794,9 +816,13 @@
sp->pci_bus = pci_bus;
sp->pci_devfn = pci_devfn;
+ sp->pdev = pdev;
sp->chip_id = chip_idx;
sp->acpi_pwr = acpi_idle_state;
-
+ sp->tx_ring = (struct TxFD *)tx_ring;
+ sp->tx_ring_dma = tx_ring_dma;
+ sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
+
sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
if (card_idx >= 0) {
if (full_duplex[card_idx] >= 0)
@@ -814,6 +840,8 @@
/* The Speedo-specific entries in the device structure. */
dev->open = &speedo_open;
dev->hard_start_xmit = &speedo_start_xmit;
+ dev->tx_timeout = &speedo_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
dev->stop = &speedo_close;
dev->get_stats = &speedo_get_stats;
dev->set_multicast_list = &set_rx_mode;
@@ -909,8 +937,7 @@
sp->dirty_tx = 0;
sp->last_cmd = 0;
sp->tx_full = 0;
- sp->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
- sp->in_interrupt = 0;
+ spin_lock_init(&sp->lock);
/* .. we can safely take handler calls during init. */
if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev)) {
@@ -943,9 +970,8 @@
/* Fire up the hardware. */
speedo_resume(dev);
- dev->tbusy = 0;
- dev->interrupt = 0;
- dev->start = 1;
+ clear_bit(LINK_STATE_RXSEM, &dev->state);
+ netif_start_queue(dev);
/* Setup the chip and configure the multicast list. */
sp->mc_setup_frm = NULL;
@@ -999,12 +1025,12 @@
wait_for_cmd_done(ioaddr + SCBCmd);
/* Load the statistics block and rx ring addresses. */
- outl(virt_to_bus(&sp->lstats), ioaddr + SCBPointer);
+ outl(sp->tx_ring_dma + sizeof(struct TxFD) * TX_RING_SIZE, ioaddr + SCBPointer);
outb(CUStatsAddr, ioaddr + SCBCmd);
- sp->lstats.done_marker = 0;
+ sp->lstats->done_marker = 0;
wait_for_cmd_done(ioaddr + SCBCmd);
- outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+ outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
ioaddr + SCBPointer);
outb(RxStart, ioaddr + SCBCmd);
wait_for_cmd_done(ioaddr + SCBCmd);
@@ -1019,7 +1045,8 @@
/* Avoid a bug(?!) here by marking the command already completed. */
cur_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
cur_cmd->link =
- virt_to_le32bus(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
+ cpu_to_le32(sp->tx_ring_dma + (sp->cur_tx % TX_RING_SIZE)
+ * sizeof(struct TxFD));
memcpy(cur_cmd->params, dev->dev_addr, 6);
if (sp->last_cmd)
clear_suspend(sp->last_cmd);
@@ -1028,7 +1055,8 @@
/* Start the chip's Tx process and unmask interrupts. */
wait_for_cmd_done(ioaddr + SCBCmd);
- outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
+ outl(sp->tx_ring_dma
+ + (sp->dirty_tx % TX_RING_SIZE) * sizeof(struct TxFD),
ioaddr + SCBPointer);
outw(CUStart, ioaddr + SCBCmd);
}
@@ -1065,12 +1093,6 @@
printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
dev->name, inw(ioaddr + SCBStatus));
}
- /* This has a small false-trigger window. */
- if (test_bit(0, (void*)&dev->tbusy) &&
- (jiffies - dev->trans_start) > TX_TIMEOUT) {
- speedo_tx_timeout(dev);
- sp->last_reset = jiffies;
- }
if (sp->rx_mode < 0 ||
(sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
/* We haven't received a packet in a Long Time. We might have been
@@ -1135,18 +1157,16 @@
skb->dev = dev; /* Mark as being used by this device. */
rxf = (struct RxFD *)skb->tail;
sp->rx_ringp[i] = rxf;
+ sp->rx_ring_dma[i] =
+ pci_map_single(sp->pdev, rxf, PKT_BUF_SZ + sizeof(struct RxFD));
skb_reserve(skb, sizeof(struct RxFD));
if (last_rxf)
- last_rxf->link = virt_to_le32bus(rxf);
+ last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
last_rxf = rxf;
rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
rxf->link = 0; /* None yet. */
- /* This field unused by i82557, we use it as a consistency check. */
-#ifdef final_version
+ /* This field unused by i82557. */
rxf->rx_buf_addr = 0xffffffff;
-#else
- rxf->rx_buf_addr = virt_to_bus(skb->tail);
-#endif
rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
}
sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1175,7 +1195,8 @@
/* Only the command unit has stopped. */
printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
dev->name);
- outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
+ outl(sp->tx_ring_dma
+ + (sp->dirty_tx % TX_RING_SIZE) * sizeof(struct TxFD),
ioaddr + SCBPointer);
outw(CUStart, ioaddr + SCBCmd);
} else {
@@ -1209,22 +1230,6 @@
long ioaddr = dev->base_addr;
int entry;
- /* Block a timer-based transmit from overlapping. This could better be
- done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
- If this ever occurs the queue layer is doing something evil! */
- if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
- int tickssofar = jiffies - dev->trans_start;
- if (tickssofar < TX_TIMEOUT - 2)
- return 1;
- if (tickssofar < TX_TIMEOUT) {
- /* Reap sent packets from the full Tx queue. */
- outw(SCBTriggerIntr, ioaddr + SCBCmd);
- return 1;
- }
- speedo_tx_timeout(dev);
- return 1;
- }
-
/* Caution: the write order is important here, set the base address
with the "ownership" bits last. */
@@ -1240,12 +1245,18 @@
sp->tx_ring[entry].status =
cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
sp->tx_ring[entry].link =
- virt_to_le32bus(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
+ cpu_to_le32(sp->tx_ring_dma
+ + (sp->cur_tx % TX_RING_SIZE)
+ * sizeof(struct TxFD));
sp->tx_ring[entry].tx_desc_addr =
- virt_to_le32bus(&sp->tx_ring[entry].tx_buf_addr0);
+ cpu_to_le32(sp->tx_ring_dma
+ + ((long)&sp->tx_ring[entry].tx_buf_addr0
+ - (long)sp->tx_ring));
/* The data region is always in one buffer descriptor. */
sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
- sp->tx_ring[entry].tx_buf_addr0 = virt_to_le32bus(skb->data);
+ sp->tx_ring[entry].tx_buf_addr0 =
+ cpu_to_le32(pci_map_single(sp->pdev, skb->data,
+ skb->len));
sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
/* Todo: perhaps leave the interrupt bit set if the Tx queue is more
than half full. Argument against: we should be receiving packets
@@ -1255,12 +1266,12 @@
{
struct descriptor *last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
- clear_suspend(last_cmd);
+ last_cmd->cmd_status &= cpu_to_le32(~(CmdSuspend | CmdIntr));
}
- if (sp->cur_tx - sp->dirty_tx >= TX_QUEUE_LIMIT)
+ if (sp->cur_tx - sp->dirty_tx >= TX_QUEUE_LIMIT) {
sp->tx_full = 1;
- else
- clear_bit(0, (void*)&dev->tbusy);
+ netif_stop_queue(dev);
+ }
spin_unlock_irqrestore(&sp->lock, flags);
}
wait_for_cmd_done(ioaddr + SCBCmd);
@@ -1288,16 +1299,6 @@
ioaddr = dev->base_addr;
sp = (struct speedo_private *)dev->priv;
-#ifndef final_version
- /* A lock to prevent simultaneous entry on SMP machines. */
- if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
- printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
- dev->name);
- sp->in_interrupt = 0; /* Avoid halting machine. */
- return;
- }
- dev->interrupt = 1;
-#endif
do {
status = inw(ioaddr + SCBStatus);
@@ -1319,7 +1320,7 @@
outw(RxResumeNoResources, ioaddr + SCBCmd);
else if ((status & 0x003c) == 0x0008) { /* No resources (why?!) */
/* No idea of what went wrong. Restart the receiver. */
- outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+ outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
ioaddr + SCBPointer);
outw(RxStart, ioaddr + SCBCmd);
}
@@ -1350,10 +1351,18 @@
#if LINUX_VERSION_CODE > 0x20127
sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
#endif
- dev_free_skb(sp->tx_skbuff[entry]);
+ pci_unmap_single(sp->pdev,
+ le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
+ sp->tx_skbuff[entry]->len);
+ dev_kfree_skb_irq(sp->tx_skbuff[entry]);
sp->tx_skbuff[entry] = 0;
- } else if ((status & 0x70000) == CmdNOp)
+ } else if ((status & 0x70000) == CmdNOp) {
+ if (sp->mc_setup_busy)
+ pci_unmap_single(sp->pdev,
+ sp->mc_setup_dma,
+ sp->mc_setup_frm_len);
sp->mc_setup_busy = 0;
+ }
dirty_tx++;
}
@@ -1371,7 +1380,6 @@
&& sp->cur_tx - dirty_tx < TX_QUEUE_LIMIT - 1) {
/* The ring is no longer full, clear tbusy. */
sp->tx_full = 0;
- clear_bit(0, (void*)&dev->tbusy);
spin_unlock(&sp->lock);
netif_wake_queue(dev);
} else
@@ -1391,8 +1399,6 @@
printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
dev->name, inw(ioaddr + SCBStatus));
- dev->interrupt = 0;
- clear_bit(0, (void*)&sp->in_interrupt);
return;
}
@@ -1436,6 +1442,8 @@
skb->dev = dev;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
+ pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
+ PKT_BUF_SZ + sizeof(struct RxFD));
#if 1 || USE_IP_CSUM
/* Packet is in one chunk -- we can copy + cksum. */
eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
@@ -1455,15 +1463,9 @@
}
sp->rx_skbuff[entry] = NULL;
temp = skb_put(skb, pkt_len);
-#if !defined(final_version) && !defined(__powerpc__)
- if (bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr) != temp)
- printk(KERN_ERR "%s: Rx consistency error -- the skbuff "
- "addresses do not match in speedo_rx: %p vs. %p "
- "/ %p.\n", dev->name,
- bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
- skb->head, temp);
-#endif
sp->rx_ringp[entry] = NULL;
+ pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
+ PKT_BUF_SZ + sizeof(struct RxFD));
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -1489,16 +1491,19 @@
break; /* Better luck next time! */
}
rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+ sp->rx_ring_dma[entry] =
+ pci_map_single(sp->pdev, rxf, PKT_BUF_SZ
+ + sizeof(struct RxFD));
skb->dev = dev;
skb_reserve(skb, sizeof(struct RxFD));
- rxf->rx_buf_addr = virt_to_bus(skb->tail);
+ rxf->rx_buf_addr = 0xffffffff;
} else {
rxf = sp->rx_ringp[entry];
}
rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
rxf->link = 0; /* None yet. */
rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
- sp->last_rxf->link = virt_to_le32bus(rxf);
+ sp->last_rxf->link = cpu_to_le32(sp->rx_ring_dma[entry]);
sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
sp->last_rxf = rxf;
}
@@ -1514,8 +1519,7 @@
struct speedo_private *sp = (struct speedo_private *)dev->priv;
int i;
- dev->start = 0;
- dev->tbusy = 1;
+ netif_stop_queue(dev);
if (speedo_debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
@@ -1536,6 +1540,9 @@
sp->rx_skbuff[i] = 0;
/* Clear the Rx descriptors. */
if (skb) {
+ pci_unmap_single(sp->pdev,
+ sp->rx_ring_dma[i],
+ PKT_BUF_SZ + sizeof(struct RxFD));
#if LINUX_VERSION_CODE < 0x20100
skb->free = 1;
#endif
@@ -1546,9 +1553,14 @@
for (i = 0; i < TX_RING_SIZE; i++) {
struct sk_buff *skb = sp->tx_skbuff[i];
sp->tx_skbuff[i] = 0;
+
/* Clear the Tx descriptors. */
- if (skb)
+ if (skb) {
+ pci_unmap_single(sp->pdev,
+ le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
+ skb->len);
dev_free_skb(skb);
+ }
}
if (sp->mc_setup_frm) {
kfree(sp->mc_setup_frm);
@@ -1587,20 +1599,20 @@
long ioaddr = dev->base_addr;
/* Update only if the previous dump finished. */
- if (sp->lstats.done_marker == le32_to_cpu(0xA007)) {
- sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats.tx_coll16_errs);
- sp->stats.tx_window_errors += le32_to_cpu(sp->lstats.tx_late_colls);
- sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_underruns);
- sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_lost_carrier);
- /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats.tx_deferred);*/
- sp->stats.collisions += le32_to_cpu(sp->lstats.tx_total_colls);
- sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats.rx_crc_errs);
- sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats.rx_align_errs);
- sp->stats.rx_over_errors += le32_to_cpu(sp->lstats.rx_resource_errs);
- sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats.rx_overrun_errs);
- sp->stats.rx_length_errors += le32_to_cpu(sp->lstats.rx_runt_errs);
- sp->lstats.done_marker = 0x0000;
- if (dev->start) {
+ if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
+ sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
+ sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
+ sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
+ sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
+ /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
+ sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
+ sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
+ sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
+ sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
+ sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
+ sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
+ sp->lstats->done_marker = 0x0000;
+ if (test_bit(LINK_STATE_START, &dev->state)) {
wait_for_cmd_done(ioaddr + SCBCmd);
outw(CUDumpStats, ioaddr + SCBCmd);
}
@@ -1690,7 +1702,8 @@
sp->tx_skbuff[entry] = 0; /* Redundant. */
sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
sp->tx_ring[entry].link =
- virt_to_le32bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
+ cpu_to_le32(sp->tx_ring_dma + ((entry + 1) % TX_RING_SIZE)
+ * sizeof(struct TxFD));
config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
/* Construct a full CmdConfig frame. */
memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));
@@ -1726,7 +1739,8 @@
sp->tx_skbuff[entry] = 0;
sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
sp->tx_ring[entry].link =
- virt_to_le32bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
+ cpu_to_le32(sp->tx_ring_dma + ((entry + 1) % TX_RING_SIZE)
+ * sizeof(struct TxFD));
sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
*setup_params++ = cpu_to_le16(dev->mc_count*6);
@@ -1750,12 +1764,17 @@
struct descriptor *mc_setup_frm = sp->mc_setup_frm;
int i;
+ /* If we are busy, someone might be quickly adding to the MC list.
+ Try again later when the list updates stop. */
+ if (sp->mc_setup_busy) {
+ sp->rx_mode = -1;
+ return;
+ }
if (sp->mc_setup_frm_len < 10 + dev->mc_count*6
|| sp->mc_setup_frm == NULL) {
/* Allocate a full setup frame, 10bytes + <max addrs>. */
if (sp->mc_setup_frm)
kfree(sp->mc_setup_frm);
- sp->mc_setup_busy = 0;
sp->mc_setup_frm_len = 10 + multicast_filter_limit*6;
sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC);
if (sp->mc_setup_frm == NULL) {
@@ -1765,12 +1784,6 @@
return;
}
}
- /* If we are busy, someone might be quickly adding to the MC list.
- Try again later when the list updates stop. */
- if (sp->mc_setup_busy) {
- sp->rx_mode = -1;
- return;
- }
mc_setup_frm = sp->mc_setup_frm;
/* Fill the setup frame. */
if (speedo_debug > 1)
@@ -1796,16 +1809,18 @@
entry = sp->cur_tx++ % TX_RING_SIZE;
last_cmd = sp->last_cmd;
sp->last_cmd = mc_setup_frm;
- sp->mc_setup_busy++;
+ sp->mc_setup_busy = 1;
/* Change the command to a NoOp, pointing to the CmdMulti command. */
sp->tx_skbuff[entry] = 0;
sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
- sp->tx_ring[entry].link = virt_to_le32bus(mc_setup_frm);
+ sp->mc_setup_dma = pci_map_single(sp->pdev, mc_setup_frm, sp->mc_setup_frm_len);
+ sp->tx_ring[entry].link = cpu_to_le32(sp->mc_setup_dma);
/* Set the link in the setup frame. */
mc_setup_frm->link =
- virt_to_le32bus(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));
+ cpu_to_le32(sp->tx_ring_dma + ((entry + 1) % TX_RING_SIZE)
+ * sizeof(struct TxFD));
wait_for_cmd_done(ioaddr + SCBCmd);
clear_suspend(last_cmd);
@@ -1819,10 +1834,9 @@
sp->rx_mode = new_rx_mode;
}
-
-#ifdef MODULE
-int init_module(void)
+
+static int __init eepro100_init_module(void)
{
int cards_found;
@@ -1847,7 +1861,7 @@
return 0;
}
-void cleanup_module(void)
+static void __exit eepro100_cleanup_module(void)
{
struct net_device *next_dev;
@@ -1871,22 +1885,9 @@
}
}
-#else /* not MODULE */
+module_init(eepro100_init_module);
+module_exit(eepro100_cleanup_module);
-int eepro100_probe(void)
-{
- int cards_found = 0;
-
- cards_found = eepro100_init();
-
- /* Only emit the version if the driver is being used. */
- if (speedo_debug > 0 && cards_found)
- printk(version);
-
- return cards_found ? 0 : -ENODEV;
-}
-#endif /* MODULE */
-
/*
* Local variables:
* compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS` `[ -f ./pci-netif.h ] && echo -DHAS_PCI_NETIF`"
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)