patch-2.4.18 linux/drivers/net/sungem.c
Next file: linux/drivers/net/sungem.h
Previous file: linux/drivers/net/sundance.c
Back to the patch index
Back to the overall index
- Lines: 750
- Date:
Tue Feb 5 17:30:24 2002
- Orig file:
linux.orig/drivers/net/sungem.c
- Orig date:
Mon Feb 18 20:18:39 2002
diff -Naur -X /home/marcelo/lib/dontdiff linux.orig/drivers/net/sungem.c linux/drivers/net/sungem.c
@@ -1,4 +1,4 @@
-/* $Id: sungem.c,v 1.43 2001/12/05 08:40:54 davem Exp $
+/* $Id: sungem.c,v 1.44.2.5 2002/02/01 21:45:52 davem Exp $
* sungem.c: Sun GEM ethernet driver.
*
* Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com)
@@ -10,6 +10,10 @@
* - Get rid of all those nasty mdelay's and replace them
* with schedule_timeout.
* - Implement WOL
+ * - Currently, forced Gb mode is only supported on bcm54xx
+ * PHY for which I use the SPD2 bit of the control register.
+ * On m1011 PHY, I can't force as I don't have the specs, but
+ * I can at least detect gigabit with autoneg.
*/
#include <linux/module.h>
@@ -57,6 +61,10 @@
#include "sungem.h"
+#define DEFAULT_MSG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
#define DRV_NAME "sungem"
#define DRV_VERSION "0.96"
#define DRV_RELDATE "11/17/01"
@@ -70,16 +78,18 @@
MODULE_LICENSE("GPL");
MODULE_PARM(gem_debug, "i");
-MODULE_PARM_DESC(gem_debug, "(ignored)");
+MODULE_PARM_DESC(gem_debug, "bitmapped message enable number");
MODULE_PARM(link_mode, "i");
+MODULE_PARM_DESC(link_mode, "default link mode");
+int gem_debug = -1;
static int link_mode;
static u16 link_modes[] __devinitdata = {
BMCR_ANENABLE, /* 0 : autoneg */
0, /* 1 : 10bt half duplex */
BMCR_SPEED100, /* 2 : 100bt half duplex */
- BMCR_SPD2, /* verify this */ /* 3 : 1000bt half duplex */
+ BMCR_SPD2, /* bcm54xx only */ /* 3 : 1000bt half duplex */
BMCR_FULLDPLX, /* 4 : 10bt full duplex */
BMCR_SPEED100|BMCR_FULLDPLX, /* 5 : 100bt full duplex */
BMCR_SPD2|BMCR_FULLDPLX /* 6 : 1000bt full duplex */
@@ -88,12 +98,6 @@
#define GEM_MODULE_NAME "gem"
#define PFX GEM_MODULE_NAME ": "
-#ifdef GEM_DEBUG
-int gem_debug = GEM_DEBUG;
-#else
-int gem_debug = 1;
-#endif
-
static struct pci_device_id gem_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
@@ -109,6 +113,8 @@
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{0, }
};
@@ -174,6 +180,8 @@
static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
{
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
}
static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
@@ -181,6 +189,10 @@
u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
u32 pcs_miistat;
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
+ gp->dev->name, pcs_istat);
+
if (!(pcs_istat & PCS_ISTAT_LSC)) {
printk(KERN_ERR "%s: PCS irq but no link status change???\n",
dev->name);
@@ -230,6 +242,10 @@
{
u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
+ gp->dev->name, txmac_stat);
+
/* Defer timer expiration is quite normal,
* don't even log the event.
*/
@@ -274,12 +290,26 @@
static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
{
u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
+ int ret = 0;
+
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
+ gp->dev->name, rxmac_stat);
if (rxmac_stat & MAC_RXSTAT_OFLW) {
- printk(KERN_ERR "%s: RX MAC fifo overflow.\n",
- dev->name);
+ u32 smac = readl(gp->regs + MAC_SMACHINE);
+
+ printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n",
+ dev->name, smac);
gp->net_stats.rx_over_errors++;
gp->net_stats.rx_fifo_errors++;
+
+ if (((smac >> 24) & 0x7) == 0x7) {
+ /* Due to a bug, the chip is hung in this case
+ * and a full reset is necessary.
+ */
+ ret = 1;
+ }
}
if (rxmac_stat & MAC_RXSTAT_ACE)
@@ -294,13 +324,17 @@
/* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
* events.
*/
- return 0;
+ return ret;
}
static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
{
u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
+ gp->dev->name, mac_cstat);
+
/* This interrupt is just for pause frame and pause
* tracking. It is useful for diagnostics and debug
* but probably by default we will mask these events.
@@ -401,11 +435,17 @@
{
if (gem_status & GREG_STAT_RXNOBUF) {
/* Frame arrived, no free RX buffers available. */
+ if (netif_msg_rx_err(gp))
+ printk(KERN_DEBUG "%s: no buffer for rx frame\n",
+ gp->dev->name);
gp->net_stats.rx_dropped++;
}
if (gem_status & GREG_STAT_RXTAGERR) {
/* corrupt RX tag framing */
+ if (netif_msg_rx_err(gp))
+ printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
+ gp->dev->name);
gp->net_stats.rx_errors++;
goto do_reset;
@@ -444,7 +484,7 @@
return 0;
do_reset:
- gp->reset_task_pending = 1;
+ gp->reset_task_pending = 2;
schedule_task(&gp->reset_task);
return 1;
@@ -454,6 +494,10 @@
{
int entry, limit;
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
+ gp->dev->name, gem_status);
+
entry = gp->tx_old;
limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
while (entry != limit) {
@@ -463,6 +507,9 @@
u32 dma_len;
int frag;
+ if (netif_msg_tx_done(gp))
+ printk(KERN_DEBUG "%s: tx done, slot %d\n",
+ gp->dev->name, entry);
skb = gp->tx_skbs[entry];
if (skb_shinfo(skb)->nr_frags) {
int last = entry + skb_shinfo(skb)->nr_frags;
@@ -534,6 +581,10 @@
{
int entry, drops;
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
+ gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
+
entry = gp->rx_new;
drops = 0;
for (;;) {
@@ -785,6 +836,9 @@
if (TX_BUFFS_AVAIL(gp) <= 0)
netif_stop_queue(dev);
+ if (netif_msg_tx_queued(gp))
+ printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
+ dev->name, entry, skb->len);
writel(gp->tx_new, gp->regs + TXDMA_KICK);
spin_unlock_irq(&gp->lock);
@@ -958,6 +1012,14 @@
*fd = 1;
if (val & (LPA_100FULL | LPA_100HALF))
*spd = 100;
+
+ if (gp->phy_mod == phymod_m1011) {
+ val = phy_read(gp, 0x0a);
+ if (val & 0xc00)
+ *spd = 1000;
+ if (val & 0x800)
+ *fd = 1;
+ }
}
}
@@ -992,8 +1054,9 @@
speed = 1000;
}
- printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
- gp->dev->name, speed, (full_duplex ? "full" : "half"));
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
+ gp->dev->name, speed, (full_duplex ? "full" : "half"));
val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
if (full_duplex) {
@@ -1058,15 +1121,21 @@
static int gem_mdio_link_not_up(struct gem *gp)
{
+ u16 val;
+
if (gp->lstate == link_force_ret) {
- printk(KERN_INFO "%s: Autoneg failed again, keeping"
- " forced mode\n", gp->dev->name);
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: Autoneg failed again, keeping"
+ " forced mode\n", gp->dev->name);
phy_write(gp, MII_BMCR, gp->link_fcntl);
gp->timer_ticks = 5;
gp->lstate = link_force_ok;
} else if (gp->lstate == link_aneg) {
- u16 val = phy_read(gp, MII_BMCR);
+ val = phy_read(gp, MII_BMCR);
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: switching to forced 100bt\n",
+ gp->dev->name);
/* Try forced modes. */
val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
val &= ~(BMCR_FULLDPLX);
@@ -1079,14 +1148,16 @@
* If already at 10Mbps, warn user about the
* situation every 10 ticks.
*/
- u16 val = phy_read(gp, MII_BMCR);
+ val = phy_read(gp, MII_BMCR);
if (val & BMCR_SPEED100) {
val &= ~BMCR_SPEED100;
phy_write(gp, MII_BMCR, val);
gp->timer_ticks = 5;
- } else {
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: switching to forced 10bt\n",
+ gp->dev->name);
+ } else
return 1;
- }
}
return 0;
}
@@ -1115,7 +1186,8 @@
/* Reset the chip & rings */
gem_stop(gp);
gem_init_rings(gp, 0);
- gem_init_hw(gp, 0);
+ gem_init_hw(gp,
+ (gp->reset_task_pending == 2));
netif_wake_queue(gp->dev);
}
@@ -1139,22 +1211,16 @@
if (gp->phy_type == phy_mii_mdio0 ||
gp->phy_type == phy_mii_mdio1) {
u16 val = phy_read(gp, MII_BMSR);
+ u16 cntl = phy_read(gp, MII_BMCR);
int up;
/* When using autoneg, we really wait for ANEGCOMPLETE or we may
* get a "transcient" incorrect link state
*/
-#if 0
- {
- u16 cntl = phy_read(gp, MII_BMCR);
- if (cntl & BMCR_ANENABLE)
- up = (val & (BMSR_ANEGCOMPLETE | BMSR_LSTATUS)) == (BMSR_ANEGCOMPLETE | BMSR_LSTATUS);
- else
- up = (val & BMSR_LSTATUS) != 0;
- }
-#else
- up = (val & BMSR_LSTATUS) != 0;
-#endif
+ if (cntl & BMCR_ANENABLE)
+ up = (val & (BMSR_ANEGCOMPLETE | BMSR_LSTATUS)) == (BMSR_ANEGCOMPLETE | BMSR_LSTATUS);
+ else
+ up = (val & BMSR_LSTATUS) != 0;
if (up) {
/* Ok, here we got a link. If we had it due to a forced
* fallback, and we were configured for autoneg, we do
@@ -1165,8 +1231,9 @@
gp->lstate = link_force_ret;
gp->link_fcntl = phy_read(gp, MII_BMCR);
gp->timer_ticks = 5;
- printk(KERN_INFO "%s: Got link after fallback, retrying autoneg"
- " once...\n", gp->dev->name);
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: Got link after fallback, retrying"
+ " autoneg once...\n", gp->dev->name);
phy_write(gp, MII_BMCR,
gp->link_fcntl | BMCR_ANENABLE | BMCR_ANRESTART);
} else if (gp->lstate != link_up) {
@@ -1182,7 +1249,9 @@
*/
if (gp->lstate == link_up) {
gp->lstate = link_down;
- printk(KERN_INFO "%s: Link down\n", gp->dev->name);
+ if (netif_msg_link(gp))
+ printk(KERN_INFO "%s: Link down\n",
+ gp->dev->name);
gp->reset_task_pending = 1;
schedule_task(&gp->reset_task);
restart = 1;
@@ -1527,6 +1596,11 @@
gem_init_bcm5411_phy(gp);
gp->gigabit_capable = 1;
break;
+ case 0x1410c60:
+ printk("M1011 (Marvel ?)\n");
+ gp->phy_mod = phymod_m1011;
+ gp->gigabit_capable = 1;
+ break;
case 0x18074c0:
printk("Lucent\n");
@@ -1539,7 +1613,7 @@
break;
default:
- printk("Unknown\n");
+ printk("Unknown (Using generic mode)\n");
gp->phy_mod = phymod_generic;
break;
};
@@ -1610,6 +1684,12 @@
writel(val, gp->regs + PCS_SCTRL);
gp->gigabit_capable = 1;
}
+
+ /* BMCR_SPD2 is a broadcom 54xx specific thing afaik */
+ if (gp->phy_mod != phymod_bcm5400 && gp->phy_mod != phymod_bcm5401 &&
+ gp->phy_mod != phymod_bcm5411)
+ gp->link_cntl &= ~BMCR_SPD2;
+
}
static void gem_init_dma(struct gem *gp)
@@ -1651,6 +1731,58 @@
#define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
+static u32
+gem_setup_multicast(struct gem *gp)
+{
+ u32 rxcfg = 0;
+ int i;
+
+ if ((gp->dev->flags & IFF_ALLMULTI) ||
+ (gp->dev->mc_count > 256)) {
+ for (i=0; i<16; i++)
+ writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
+ rxcfg |= MAC_RXCFG_HFE;
+ } else if (gp->dev->flags & IFF_PROMISC) {
+ rxcfg |= MAC_RXCFG_PROM;
+ } else {
+ u16 hash_table[16];
+ u32 crc, poly = CRC_POLYNOMIAL_LE;
+ struct dev_mc_list *dmi = gp->dev->mc_list;
+ int i, j, bit, byte;
+
+ for (i = 0; i < 16; i++)
+ hash_table[i] = 0;
+
+ for (i = 0; i < gp->dev->mc_count; i++) {
+ char *addrs = dmi->dmi_addr;
+
+ dmi = dmi->next;
+
+ if (!(*addrs & 1))
+ continue;
+
+ crc = 0xffffffffU;
+ for (byte = 0; byte < 6; byte++) {
+ for (bit = *addrs++, j = 0; j < 8; j++, bit >>= 1) {
+ int test;
+
+ test = ((bit ^ crc) & 0x01);
+ crc >>= 1;
+ if (test)
+ crc = crc ^ poly;
+ }
+ }
+ crc >>= 24;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ }
+ for (i=0; i<16; i++)
+ writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
+ rxcfg |= MAC_RXCFG_HFE;
+ }
+
+ return rxcfg;
+}
+
static void gem_init_mac(struct gem *gp)
{
unsigned char *e = &gp->dev->dev_addr[0];
@@ -1691,75 +1823,7 @@
writel(0, gp->regs + MAC_AF21MSK);
writel(0, gp->regs + MAC_AF0MSK);
- rxcfg = 0;
- if ((gp->dev->flags & IFF_ALLMULTI) ||
- (gp->dev->mc_count > 256)) {
- writel(0xffff, gp->regs + MAC_HASH0);
- writel(0xffff, gp->regs + MAC_HASH1);
- writel(0xffff, gp->regs + MAC_HASH2);
- writel(0xffff, gp->regs + MAC_HASH3);
- writel(0xffff, gp->regs + MAC_HASH4);
- writel(0xffff, gp->regs + MAC_HASH5);
- writel(0xffff, gp->regs + MAC_HASH6);
- writel(0xffff, gp->regs + MAC_HASH7);
- writel(0xffff, gp->regs + MAC_HASH8);
- writel(0xffff, gp->regs + MAC_HASH9);
- writel(0xffff, gp->regs + MAC_HASH10);
- writel(0xffff, gp->regs + MAC_HASH11);
- writel(0xffff, gp->regs + MAC_HASH12);
- writel(0xffff, gp->regs + MAC_HASH13);
- writel(0xffff, gp->regs + MAC_HASH14);
- writel(0xffff, gp->regs + MAC_HASH15);
- } else if (gp->dev->flags & IFF_PROMISC) {
- rxcfg |= MAC_RXCFG_PROM;
- } else {
- u16 hash_table[16];
- u32 crc, poly = CRC_POLYNOMIAL_LE;
- struct dev_mc_list *dmi = gp->dev->mc_list;
- int i, j, bit, byte;
-
- for (i = 0; i < 16; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < gp->dev->mc_count; i++) {
- char *addrs = dmi->dmi_addr;
-
- dmi = dmi->next;
-
- if (!(*addrs & 1))
- continue;
-
- crc = 0xffffffffU;
- for (byte = 0; byte < 6; byte++) {
- for (bit = *addrs++, j = 0; j < 8; j++, bit >>= 1) {
- int test;
-
- test = ((bit ^ crc) & 0x01);
- crc >>= 1;
- if (test)
- crc = crc ^ poly;
- }
- }
- crc >>= 24;
- hash_table[crc >> 4] |= 1 << (crc & 0xf);
- }
- writel(hash_table[0], gp->regs + MAC_HASH0);
- writel(hash_table[1], gp->regs + MAC_HASH1);
- writel(hash_table[2], gp->regs + MAC_HASH2);
- writel(hash_table[3], gp->regs + MAC_HASH3);
- writel(hash_table[4], gp->regs + MAC_HASH4);
- writel(hash_table[5], gp->regs + MAC_HASH5);
- writel(hash_table[6], gp->regs + MAC_HASH6);
- writel(hash_table[7], gp->regs + MAC_HASH7);
- writel(hash_table[8], gp->regs + MAC_HASH8);
- writel(hash_table[9], gp->regs + MAC_HASH9);
- writel(hash_table[10], gp->regs + MAC_HASH10);
- writel(hash_table[11], gp->regs + MAC_HASH11);
- writel(hash_table[12], gp->regs + MAC_HASH12);
- writel(hash_table[13], gp->regs + MAC_HASH13);
- writel(hash_table[14], gp->regs + MAC_HASH14);
- writel(hash_table[15], gp->regs + MAC_HASH15);
- }
+ rxcfg = gem_setup_multicast(gp);
writel(0, gp->regs + MAC_NCOLL);
writel(0, gp->regs + MAC_FASUCC);
@@ -1827,9 +1891,8 @@
u32 mif_cfg;
/* On Apple's sungem, we can't rely on registers as the chip
- * was been powered down by the firmware. We do the PHY lookup
- * when the interface is opened and we configure the driver
- * with known values.
+ * was been powered down by the firmware. The PHY is looked
+ * up later on.
*/
if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
gp->phy_type = phy_mii_mdio0;
@@ -1954,7 +2017,8 @@
pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
- udelay(100);
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout((21 * HZ) / 1000);
pci_read_config_word(gp->pdev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
@@ -2024,7 +2088,8 @@
val & ~MII_BCM5201_AUXMODE2_LOWPOWER);
#endif
phy_write(gp, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE);
- }
+ } else if (gp->phy_mod == phymod_m1011)
+ phy_write(gp, MII_BMCR, BMCR_PDOWN);
/* According to Apple, we must set the MDIO pins to this begnign
* state or we may 1) eat more current, 2) damage some PHYs
@@ -2052,16 +2117,14 @@
schedule();
/* Actually stop the chip */
- if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
+ if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
gem_stop_phy(gp);
- else
- gem_stop(gp);
-
#ifdef CONFIG_ALL_PPC
- /* Power down the chip */
- if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
+ /* Power down the chip */
gem_apple_powerdown(gp);
#endif /* CONFIG_ALL_PPC */
+ } else
+ gem_stop(gp);
}
static void gem_pm_task(void *data)
@@ -2123,6 +2186,7 @@
*/
if (request_irq(gp->pdev->irq, gem_interrupt,
SA_SHIRQ, dev->name, (void *)dev)) {
+ printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
#ifdef CONFIG_ALL_PPC
if (!hw_was_up && gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
gem_apple_powerdown(gp);
@@ -2285,91 +2349,28 @@
static void gem_set_multicast(struct net_device *dev)
{
struct gem *gp = dev->priv;
-
+ u32 rxcfg, rxcfg_new;
+ int limit = 10000;
+
if (!gp->hw_running)
return;
netif_stop_queue(dev);
- if ((gp->dev->flags & IFF_ALLMULTI) ||
- (gp->dev->mc_count > 256)) {
- writel(0xffff, gp->regs + MAC_HASH0);
- writel(0xffff, gp->regs + MAC_HASH1);
- writel(0xffff, gp->regs + MAC_HASH2);
- writel(0xffff, gp->regs + MAC_HASH3);
- writel(0xffff, gp->regs + MAC_HASH4);
- writel(0xffff, gp->regs + MAC_HASH5);
- writel(0xffff, gp->regs + MAC_HASH6);
- writel(0xffff, gp->regs + MAC_HASH7);
- writel(0xffff, gp->regs + MAC_HASH8);
- writel(0xffff, gp->regs + MAC_HASH9);
- writel(0xffff, gp->regs + MAC_HASH10);
- writel(0xffff, gp->regs + MAC_HASH11);
- writel(0xffff, gp->regs + MAC_HASH12);
- writel(0xffff, gp->regs + MAC_HASH13);
- writel(0xffff, gp->regs + MAC_HASH14);
- writel(0xffff, gp->regs + MAC_HASH15);
- } else if (gp->dev->flags & IFF_PROMISC) {
- u32 rxcfg = readl(gp->regs + MAC_RXCFG);
- int limit = 10000;
-
- writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
- while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
- if (!limit--)
- break;
- udelay(10);
- }
-
- rxcfg |= MAC_RXCFG_PROM;
- writel(rxcfg, gp->regs + MAC_RXCFG);
- } else {
- u16 hash_table[16];
- u32 crc, poly = CRC_POLYNOMIAL_LE;
- struct dev_mc_list *dmi = gp->dev->mc_list;
- int i, j, bit, byte;
-
- for (i = 0; i < 16; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < dev->mc_count; i++) {
- char *addrs = dmi->dmi_addr;
-
- dmi = dmi->next;
-
- if (!(*addrs & 1))
- continue;
+ rxcfg = readl(gp->regs + MAC_RXCFG);
+ rxcfg_new = gem_setup_multicast(gp);
+
+ writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+ while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
+ if (!limit--)
+ break;
+ udelay(10);
+ }
- crc = 0xffffffffU;
- for (byte = 0; byte < 6; byte++) {
- for (bit = *addrs++, j = 0; j < 8; j++, bit >>= 1) {
- int test;
+ rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
+ rxcfg |= rxcfg_new;
- test = ((bit ^ crc) & 0x01);
- crc >>= 1;
- if (test)
- crc = crc ^ poly;
- }
- }
- crc >>= 24;
- hash_table[crc >> 4] |= 1 << (crc & 0xf);
- }
- writel(hash_table[0], gp->regs + MAC_HASH0);
- writel(hash_table[1], gp->regs + MAC_HASH1);
- writel(hash_table[2], gp->regs + MAC_HASH2);
- writel(hash_table[3], gp->regs + MAC_HASH3);
- writel(hash_table[4], gp->regs + MAC_HASH4);
- writel(hash_table[5], gp->regs + MAC_HASH5);
- writel(hash_table[6], gp->regs + MAC_HASH6);
- writel(hash_table[7], gp->regs + MAC_HASH7);
- writel(hash_table[8], gp->regs + MAC_HASH8);
- writel(hash_table[9], gp->regs + MAC_HASH9);
- writel(hash_table[10], gp->regs + MAC_HASH10);
- writel(hash_table[11], gp->regs + MAC_HASH11);
- writel(hash_table[12], gp->regs + MAC_HASH12);
- writel(hash_table[13], gp->regs + MAC_HASH13);
- writel(hash_table[14], gp->regs + MAC_HASH14);
- writel(hash_table[15], gp->regs + MAC_HASH15);
- }
+ writel(rxcfg, gp->regs + MAC_RXCFG);
/* Hrm... we may walk on the reset task here... */
netif_wake_queue(dev);
@@ -2491,7 +2492,7 @@
case ETHTOOL_GMSGLVL: {
struct ethtool_value edata = { cmd: ETHTOOL_GMSGLVL };
- edata.data = gem_debug;
+ edata.data = gp->msg_enable;
if (copy_to_user(ep_user, &edata, sizeof(edata)))
return -EFAULT;
return 0;
@@ -2503,7 +2504,7 @@
if (copy_from_user(&edata, ep_user, sizeof(edata)))
return -EFAULT;
- gem_debug = edata.data;
+ gp->msg_enable = edata.data;
return 0;
}
@@ -2699,6 +2700,8 @@
dev->base_addr = (long) pdev;
gp->dev = dev;
+ gp->msg_enable = (gem_debug < 0 ? DEFAULT_MSG : gem_debug);
+
spin_lock_init(&gp->lock);
init_MUTEX(&gp->pm_sem);
@@ -2733,25 +2736,15 @@
* not have properly shut down the PHY.
*/
#ifdef CONFIG_ALL_PPC
- if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
+ if (pdev->vendor == PCI_VENDOR_ID_APPLE)
gem_apple_powerup(gp);
- if (gem_check_invariants(gp))
- goto err_out_iounmap;
- gem_stop(gp);
- gp->hw_running = 1;
- gem_init_phy(gp);
- gem_begin_auto_negotiation(gp, NULL);
- }
#endif
- /* Non Apple hardware, we just reset the chip and check
- * for invariants
- */
- if (pdev->vendor != PCI_VENDOR_ID_APPLE) {
- gem_stop(gp);
- if (gem_check_invariants(gp))
- goto err_out_iounmap;
- gp->hw_running = 1;
- }
+ gem_stop(gp);
+ if (gem_check_invariants(gp))
+ goto err_out_iounmap;
+ gp->hw_running = 1;
+ gem_init_phy(gp);
+ gem_begin_auto_negotiation(gp, NULL);
/* It is guarenteed that the returned buffer will be at least
* PAGE_SIZE aligned.
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)