1 /* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
3 Written/copyright 1999-2001 by Donald Becker.
4 Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
5 Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
6 Portions copyright 2004 Harald Welte <laforge@gnumonks.org>
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL. License for under other terms may be
14 available. Contact the original author for details.
16 The original author may be reached as becker@scyld.com, or at
17 Scyld Computing Corporation
18 410 Severn Ave., Suite 210
21 Support information and updates available at
22 http://www.scyld.com/network/netsemi.html
23 [link no longer provides useful info -jgarzik]
27 * big endian support with CFG:BEM instead of cpu_to_le32
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/timer.h>
34 #include <linux/errno.h>
35 #include <linux/ioport.h>
36 #include <linux/slab.h>
37 #include <linux/interrupt.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/skbuff.h>
42 #include <linux/init.h>
43 #include <linux/spinlock.h>
44 #include <linux/ethtool.h>
45 #include <linux/delay.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/mii.h>
48 #include <linux/crc32.h>
49 #include <linux/bitops.h>
50 #include <linux/prefetch.h>
51 #include <asm/processor.h> /* Processor type for cache alignment. */
54 #include <asm/uaccess.h>
56 #define DRV_NAME "natsemi"
57 #define DRV_VERSION "2.1"
58 #define DRV_RELDATE "Sept 11, 2006"
62 /* Updated to recommendations in pci-skeleton v2.03. */
64 /* The user-configurable values.
65 These may be modified when a driver module is loaded.*/
67 #define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
72 static int debug = -1;
76 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
77 This chip uses a 512 element hash table based on the Ethernet CRC. */
78 static const int multicast_filter_limit = 100;
80 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
81 Setting to > 1518 effectively disables this feature. */
82 static int rx_copybreak;
84 /* Used to pass the media type, etc.
85 Both 'options[]' and 'full_duplex[]' should exist for driver
87 The media type is usually passed in 'options[]'.
89 #define MAX_UNITS 8 /* More are supported, limit only on options */
90 static int options[MAX_UNITS];
91 static int full_duplex[MAX_UNITS];
93 /* Operational parameters that are set at compile time. */
95 /* Keep the ring sizes a power of two for compile efficiency.
96 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
97 Making the Tx ring too large decreases the effectiveness of channel
98 bonding and packet priority.
99 There are no ill effects from too-large receive rings. */
100 #define TX_RING_SIZE 16
101 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
102 #define RX_RING_SIZE 32
104 /* Operational parameters that usually are not changed. */
105 /* Time in jiffies before concluding the transmitter is hung. */
106 #define TX_TIMEOUT (2*HZ)
108 #define NATSEMI_HW_TIMEOUT 400
109 #define NATSEMI_TIMER_FREQ 3*HZ
110 #define NATSEMI_PG0_NREGS 64
111 #define NATSEMI_RFDR_NREGS 8
112 #define NATSEMI_PG1_NREGS 4
113 #define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
115 #define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */
116 #define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
119 * The nic writes 32-bit values, even if the upper bytes of
120 * a 32-bit value are beyond the end of the buffer.
122 #define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */
123 #define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */
124 #define NATSEMI_LONGPKT 1518 /* limit for normal packets */
125 #define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */
127 /* These identify the driver base version and may not be removed. */
128 static const char version[] __devinitdata =
129 KERN_INFO DRV_NAME " dp8381x driver, version "
130 DRV_VERSION ", " DRV_RELDATE "\n"
131 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
132 KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
133 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
135 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
136 MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
137 MODULE_LICENSE("GPL");
139 module_param(mtu, int, 0);
140 module_param(debug, int, 0);
141 module_param(rx_copybreak, int, 0);
142 module_param_array(options, int, NULL, 0);
143 module_param_array(full_duplex, int, NULL, 0);
144 MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
145 MODULE_PARM_DESC(debug, "DP8381x default debug level");
146 MODULE_PARM_DESC(rx_copybreak,
147 "DP8381x copy breakpoint for copy-only-tiny-frames");
148 MODULE_PARM_DESC(options,
149 "DP8381x: Bits 0-3: media type, bit 17: full duplex");
150 MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
155 I. Board Compatibility
157 This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
158 It also works with other chips in in the DP83810 series.
160 II. Board-specific settings
162 This driver requires the PCI interrupt line to be valid.
163 It honors the EEPROM-set values.
165 III. Driver operation
169 This driver uses two statically allocated fixed-size descriptor lists
170 formed into rings by a branch from the final descriptor to the beginning of
171 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
172 The NatSemi design uses a 'next descriptor' pointer that the driver forms
175 IIIb/c. Transmit/Receive Structure
177 This driver uses a zero-copy receive and transmit scheme.
178 The driver allocates full frame size skbuffs for the Rx ring buffers at
179 open() time and passes the skb->data field to the chip as receive data
180 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
181 a fresh skbuff is allocated and the frame is copied to the new skbuff.
182 When the incoming frame is larger, the skbuff is passed directly up the
183 protocol stack. Buffers consumed this way are replaced by newly allocated
184 skbuffs in a later phase of receives.
186 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
187 using a full-sized skbuff for small frames vs. the copying costs of larger
188 frames. New boards are typically used in generously configured machines
189 and the underfilled buffers have negligible impact compared to the benefit of
190 a single allocation size, so the default value of zero results in never
191 copying packets. When copying is done, the cost is usually mitigated by using
192 a combined copy/checksum routine. Copying also preloads the cache, which is
193 most useful with small frames.
195 A subtle aspect of the operation is that unaligned buffers are not permitted
196 by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
197 longword aligned for further processing. On copies frames are put into the
198 skbuff at an offset of "+2", 16-byte aligning the IP header.
200 IIId. Synchronization
202 Most operations are synchronized on the np->lock irq spinlock, except the
203 performance critical codepaths:
205 The rx process only runs in the interrupt handler. Access from outside
206 the interrupt handler is only permitted after disable_irq().
208 The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
209 is set, then access is permitted under spin_lock_irq(&np->lock).
211 Thus configuration functions that want to access everything must call
212 disable_irq(dev->irq);
213 netif_tx_lock_bh(dev);
214 spin_lock_irq(&np->lock);
218 NatSemi PCI network controllers are very uncommon.
222 http://www.scyld.com/expert/100mbps.html
223 http://www.scyld.com/expert/NWay.html
224 Datasheet is available from:
225 http://www.national.com/pf/DP/DP83815.html
235 * Support for fibre connections on Am79C874:
236 * This phy needs a special setup when connected to a fibre cable.
237 * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
239 #define PHYID_AM79C874 0x0022561b
242 MII_MCTRL = 0x15, /* mode control register */
243 MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */
244 MII_EN_SCRM = 0x0004, /* enable scrambler (tp) */
248 /* array of board data directly indexed by pci_tbl[x].driver_data */
249 static const struct {
252 unsigned int eeprom_size;
253 } natsemi_pci_info[] __devinitdata = {
254 { "NatSemi DP8381[56]", 0, 24 },
257 static const struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
258 { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
259 { } /* terminate list */
261 MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
263 /* Offsets to the device registers.
264 Unlike software-only systems, device drivers interact with complex hardware.
265 It's not useful to define symbolic names for every register bit in the
268 enum register_offsets {
276 IntrHoldoff = 0x1C, /* DP83816 only */
303 /* These are from the spec, around page 78... on a separate table.
304 * The meaning of these registers depend on the value of PGSEL. */
311 /* the values for the 'magic' registers above (PGSEL=1) */
312 #define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */
313 #define TSTDAT_VAL 0x0
314 #define DSPCFG_VAL 0x5040
315 #define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */
316 #define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */
317 #define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */
318 #define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */
320 /* misc PCI space registers */
321 enum pci_register_offsets {
335 enum ChipConfig_bits {
339 CfgAnegEnable = 0x2000,
341 CfgAnegFull = 0x8000,
342 CfgAnegDone = 0x8000000,
343 CfgFullDuplex = 0x20000000,
344 CfgSpeed100 = 0x40000000,
345 CfgLink = 0x80000000,
351 EE_ChipSelect = 0x08,
358 enum PCIBusCfg_bits {
362 /* Bits in the interrupt status/mask registers. */
363 enum IntrStatus_bits {
367 IntrRxEarly = 0x0008,
369 IntrRxOverrun = 0x0020,
374 IntrTxUnderrun = 0x0400,
379 IntrHighBits = 0x8000,
380 RxStatusFIFOOver = 0x10000,
381 IntrPCIErr = 0xf00000,
382 RxResetDone = 0x1000000,
383 TxResetDone = 0x2000000,
384 IntrAbnormalSummary = 0xCD20,
388 * Default Interrupts:
389 * Rx OK, Rx Packet Error, Rx Overrun,
390 * Tx OK, Tx Packet Error, Tx Underrun,
391 * MIB Service, Phy Interrupt, High Bits,
392 * Rx Status FIFO overrun,
393 * Received Target Abort, Received Master Abort,
394 * Signalled System Error, Received Parity Error
396 #define DEFAULT_INTR 0x00f1cd65
401 TxMxdmaMask = 0x700000,
403 TxMxdma_4 = 0x100000,
404 TxMxdma_8 = 0x200000,
405 TxMxdma_16 = 0x300000,
406 TxMxdma_32 = 0x400000,
407 TxMxdma_64 = 0x500000,
408 TxMxdma_128 = 0x600000,
409 TxMxdma_256 = 0x700000,
410 TxCollRetry = 0x800000,
411 TxAutoPad = 0x10000000,
412 TxMacLoop = 0x20000000,
413 TxHeartIgn = 0x40000000,
414 TxCarrierIgn = 0x80000000
419 * - 256 byte DMA burst length
420 * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
421 * - 64 bytes initial drain threshold (i.e. begin actual transmission
422 * when 64 byte are in the fifo)
423 * - on tx underruns, increase drain threshold by 64.
424 * - at most use a drain threshold of 1472 bytes: The sum of the fill
425 * threshold and the drain threshold must be less than 2016 bytes.
428 #define TX_FLTH_VAL ((512/32) << 8)
429 #define TX_DRTH_VAL_START (64/32)
430 #define TX_DRTH_VAL_INC 2
431 #define TX_DRTH_VAL_LIMIT (1472/32)
435 RxMxdmaMask = 0x700000,
437 RxMxdma_4 = 0x100000,
438 RxMxdma_8 = 0x200000,
439 RxMxdma_16 = 0x300000,
440 RxMxdma_32 = 0x400000,
441 RxMxdma_64 = 0x500000,
442 RxMxdma_128 = 0x600000,
443 RxMxdma_256 = 0x700000,
444 RxAcceptLong = 0x8000000,
445 RxAcceptTx = 0x10000000,
446 RxAcceptRunt = 0x40000000,
447 RxAcceptErr = 0x80000000
449 #define RX_DRTH_VAL (128/8)
467 WakeMagicSecure = 0x400,
468 SecureHack = 0x100000,
470 WokeUnicast = 0x800000,
471 WokeMulticast = 0x1000000,
472 WokeBroadcast = 0x2000000,
474 WokePMatch0 = 0x8000000,
475 WokePMatch1 = 0x10000000,
476 WokePMatch2 = 0x20000000,
477 WokePMatch3 = 0x40000000,
478 WokeMagic = 0x80000000,
479 WakeOptsSummary = 0x7ff
482 enum RxFilterAddr_bits {
483 RFCRAddressMask = 0x3ff,
484 AcceptMulticast = 0x00200000,
485 AcceptMyPhys = 0x08000000,
486 AcceptAllPhys = 0x10000000,
487 AcceptAllMulticast = 0x20000000,
488 AcceptBroadcast = 0x40000000,
489 RxFilterEnable = 0x80000000
492 enum StatsCtrl_bits {
499 enum MIntrCtrl_bits {
507 #define PHY_ADDR_NONE 32
508 #define PHY_ADDR_INTERNAL 1
510 /* values we might find in the silicon revision register */
511 #define SRR_DP83815_C 0x0302
512 #define SRR_DP83815_D 0x0403
513 #define SRR_DP83816_A4 0x0504
514 #define SRR_DP83816_A5 0x0505
516 /* The Rx and Tx buffer descriptors. */
517 /* Note that using only 32 bit fields simplifies conversion to big-endian
526 /* Bits in network_desc.status */
527 enum desc_status_bits {
528 DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
529 DescNoCRC=0x10000000, DescPktOK=0x08000000,
532 DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
533 DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
534 DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
535 DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
537 DescRxAbort=0x04000000, DescRxOver=0x02000000,
538 DescRxDest=0x01800000, DescRxLong=0x00400000,
539 DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
540 DescRxCRC=0x00080000, DescRxAlign=0x00040000,
541 DescRxLoop=0x00020000, DesRxColl=0x00010000,
544 struct netdev_private {
545 /* Descriptor rings first for alignment */
547 struct netdev_desc *rx_ring;
548 struct netdev_desc *tx_ring;
549 /* The addresses of receive-in-place skbuffs */
550 struct sk_buff *rx_skbuff[RX_RING_SIZE];
551 dma_addr_t rx_dma[RX_RING_SIZE];
552 /* address of a sent-in-place packet/buffer, for later free() */
553 struct sk_buff *tx_skbuff[TX_RING_SIZE];
554 dma_addr_t tx_dma[TX_RING_SIZE];
555 struct net_device_stats stats;
556 /* Media monitoring timer */
557 struct timer_list timer;
558 /* Frequently used values: keep some adjacent for cache effect */
559 struct pci_dev *pci_dev;
560 struct netdev_desc *rx_head_desc;
561 /* Producer/consumer ring indices */
562 unsigned int cur_rx, dirty_rx;
563 unsigned int cur_tx, dirty_tx;
564 /* Based on MTU+slack. */
565 unsigned int rx_buf_sz;
567 /* Interrupt status */
569 /* Do not touch the nic registers */
571 /* Don't pay attention to the reported link state. */
573 /* external phy that is used: only valid if dev->if_port != PORT_TP */
575 int phy_addr_external;
576 unsigned int full_duplex;
580 /* FIFO and PCI burst thresholds */
581 u32 tx_config, rx_config;
582 /* original contents of ClkRun register */
584 /* silicon revision */
586 /* expected DSPCFG value */
588 /* parms saved in ethtool format */
589 u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */
590 u8 duplex; /* Duplex, half or full */
591 u8 autoneg; /* Autonegotiation enabled */
592 /* MII transceiver section */
601 static void move_int_phy(struct net_device *dev, int addr);
602 static int eeprom_read(void __iomem *ioaddr, int location);
603 static int mdio_read(struct net_device *dev, int reg);
604 static void mdio_write(struct net_device *dev, int reg, u16 data);
605 static void init_phy_fixup(struct net_device *dev);
606 static int miiport_read(struct net_device *dev, int phy_id, int reg);
607 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
608 static int find_mii(struct net_device *dev);
609 static void natsemi_reset(struct net_device *dev);
610 static void natsemi_reload_eeprom(struct net_device *dev);
611 static void natsemi_stop_rxtx(struct net_device *dev);
612 static int netdev_open(struct net_device *dev);
613 static void do_cable_magic(struct net_device *dev);
614 static void undo_cable_magic(struct net_device *dev);
615 static void check_link(struct net_device *dev);
616 static void netdev_timer(unsigned long data);
617 static void dump_ring(struct net_device *dev);
618 static void tx_timeout(struct net_device *dev);
619 static int alloc_ring(struct net_device *dev);
620 static void refill_rx(struct net_device *dev);
621 static void init_ring(struct net_device *dev);
622 static void drain_tx(struct net_device *dev);
623 static void drain_ring(struct net_device *dev);
624 static void free_ring(struct net_device *dev);
625 static void reinit_ring(struct net_device *dev);
626 static void init_registers(struct net_device *dev);
627 static int start_tx(struct sk_buff *skb, struct net_device *dev);
628 static irqreturn_t intr_handler(int irq, void *dev_instance);
629 static void netdev_error(struct net_device *dev, int intr_status);
630 static int natsemi_poll(struct net_device *dev, int *budget);
631 static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
632 static void netdev_tx_done(struct net_device *dev);
633 static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
634 #ifdef CONFIG_NET_POLL_CONTROLLER
635 static void natsemi_poll_controller(struct net_device *dev);
637 static void __set_rx_mode(struct net_device *dev);
638 static void set_rx_mode(struct net_device *dev);
639 static void __get_stats(struct net_device *dev);
640 static struct net_device_stats *get_stats(struct net_device *dev);
641 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
642 static int netdev_set_wol(struct net_device *dev, u32 newval);
643 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
644 static int netdev_set_sopass(struct net_device *dev, u8 *newval);
645 static int netdev_get_sopass(struct net_device *dev, u8 *data);
646 static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
647 static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
648 static void enable_wol_mode(struct net_device *dev, int enable_intr);
649 static int netdev_close(struct net_device *dev);
650 static int netdev_get_regs(struct net_device *dev, u8 *buf);
651 static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
652 static const struct ethtool_ops ethtool_ops;
654 static inline void __iomem *ns_ioaddr(struct net_device *dev)
656 return (void __iomem *) dev->base_addr;
659 static inline void natsemi_irq_enable(struct net_device *dev)
661 writel(1, ns_ioaddr(dev) + IntrEnable);
662 readl(ns_ioaddr(dev) + IntrEnable);
665 static inline void natsemi_irq_disable(struct net_device *dev)
667 writel(0, ns_ioaddr(dev) + IntrEnable);
668 readl(ns_ioaddr(dev) + IntrEnable);
671 static void move_int_phy(struct net_device *dev, int addr)
673 struct netdev_private *np = netdev_priv(dev);
674 void __iomem *ioaddr = ns_ioaddr(dev);
678 * The internal phy is visible on the external mii bus. Therefore we must
679 * move it away before we can send commands to an external phy.
680 * There are two addresses we must avoid:
681 * - the address on the external phy that is used for transmission.
682 * - the address that we want to access. User space can access phys
683 * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independant from the
684 * phy that is used for transmission.
689 if (target == np->phy_addr_external)
691 writew(target, ioaddr + PhyCtrl);
692 readw(ioaddr + PhyCtrl);
696 static void __devinit natsemi_init_media (struct net_device *dev)
698 struct netdev_private *np = netdev_priv(dev);
702 netif_carrier_on(dev);
704 netif_carrier_off(dev);
706 /* get the initial settings from hardware */
707 tmp = mdio_read(dev, MII_BMCR);
708 np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
709 np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
710 np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
711 np->advertising= mdio_read(dev, MII_ADVERTISE);
713 if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL
714 && netif_msg_probe(np)) {
715 printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
717 pci_name(np->pci_dev),
718 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
719 "enabled, advertise" : "disabled, force",
721 (ADVERTISE_100FULL|ADVERTISE_100HALF))?
724 (ADVERTISE_100FULL|ADVERTISE_10FULL))?
727 if (netif_msg_probe(np))
729 "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
730 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
735 static int __devinit natsemi_probe1 (struct pci_dev *pdev,
736 const struct pci_device_id *ent)
738 struct net_device *dev;
739 struct netdev_private *np;
740 int i, option, irq, chip_idx = ent->driver_data;
741 static int find_cnt = -1;
742 unsigned long iostart, iosize;
743 void __iomem *ioaddr;
744 const int pcibar = 1; /* PCI base address register */
748 /* when built into the kernel, we only print version if device is found */
750 static int printed_version;
751 if (!printed_version++)
755 i = pci_enable_device(pdev);
758 /* natsemi has a non-standard PM control register
759 * in PCI config space. Some boards apparently need
760 * to be brought to D0 in this manner.
762 pci_read_config_dword(pdev, PCIPM, &tmp);
763 if (tmp & PCI_PM_CTRL_STATE_MASK) {
764 /* D0 state, disable PME assertion */
765 u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
766 pci_write_config_dword(pdev, PCIPM, newtmp);
770 iostart = pci_resource_start(pdev, pcibar);
771 iosize = pci_resource_len(pdev, pcibar);
774 pci_set_master(pdev);
776 dev = alloc_etherdev(sizeof (struct netdev_private));
779 SET_MODULE_OWNER(dev);
780 SET_NETDEV_DEV(dev, &pdev->dev);
782 i = pci_request_regions(pdev, DRV_NAME);
784 goto err_pci_request_regions;
786 ioaddr = ioremap(iostart, iosize);
792 /* Work around the dropped serial bit. */
793 prev_eedata = eeprom_read(ioaddr, 6);
794 for (i = 0; i < 3; i++) {
795 int eedata = eeprom_read(ioaddr, i + 7);
796 dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
797 dev->dev_addr[i*2+1] = eedata >> 7;
798 prev_eedata = eedata;
801 dev->base_addr = (unsigned long __force) ioaddr;
804 np = netdev_priv(dev);
807 pci_set_drvdata(pdev, dev);
809 spin_lock_init(&np->lock);
810 np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
813 np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
817 * - If configured to ignore the PHY set up for external.
818 * - If the nic was configured to use an external phy and if find_mii
819 * finds a phy: use external port, first phy that replies.
820 * - Otherwise: internal port.
821 * Note that the phy address for the internal phy doesn't matter:
822 * The address would be used to access a phy over the mii bus, but
823 * the internal phy is accessed through mapped registers.
825 if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
826 dev->if_port = PORT_MII;
828 dev->if_port = PORT_TP;
829 /* Reset the chip to erase previous misconfiguration. */
830 natsemi_reload_eeprom(dev);
833 if (dev->if_port != PORT_TP) {
834 np->phy_addr_external = find_mii(dev);
835 /* If we're ignoring the PHY it doesn't matter if we can't
837 if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
838 dev->if_port = PORT_TP;
839 np->phy_addr_external = PHY_ADDR_INTERNAL;
842 np->phy_addr_external = PHY_ADDR_INTERNAL;
845 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
847 option = dev->mem_start;
849 /* The lower four bits are the media type. */
855 "natsemi %s: ignoring user supplied media type %d",
856 pci_name(np->pci_dev), option & 15);
858 if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
861 /* The chip-specific entries in the device structure. */
862 dev->open = &netdev_open;
863 dev->hard_start_xmit = &start_tx;
864 dev->stop = &netdev_close;
865 dev->get_stats = &get_stats;
866 dev->set_multicast_list = &set_rx_mode;
867 dev->change_mtu = &natsemi_change_mtu;
868 dev->do_ioctl = &netdev_ioctl;
869 dev->tx_timeout = &tx_timeout;
870 dev->watchdog_timeo = TX_TIMEOUT;
871 dev->poll = natsemi_poll;
874 #ifdef CONFIG_NET_POLL_CONTROLLER
875 dev->poll_controller = &natsemi_poll_controller;
877 SET_ETHTOOL_OPS(dev, ðtool_ops);
882 natsemi_init_media(dev);
884 /* save the silicon revision for later querying */
885 np->srr = readl(ioaddr + SiliconRev);
886 if (netif_msg_hw(np))
887 printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
888 pci_name(np->pci_dev), np->srr);
890 i = register_netdev(dev);
892 goto err_register_netdev;
894 if (netif_msg_drv(np)) {
895 printk(KERN_INFO "natsemi %s: %s at %#08lx (%s), ",
896 dev->name, natsemi_pci_info[chip_idx].name, iostart,
897 pci_name(np->pci_dev));
898 for (i = 0; i < ETH_ALEN-1; i++)
899 printk("%02x:", dev->dev_addr[i]);
900 printk("%02x, IRQ %d", dev->dev_addr[i], irq);
901 if (dev->if_port == PORT_TP)
902 printk(", port TP.\n");
903 else if (np->ignore_phy)
904 printk(", port MII, ignoring PHY\n");
906 printk(", port MII, phy ad %d.\n", np->phy_addr_external);
914 pci_release_regions(pdev);
915 pci_set_drvdata(pdev, NULL);
917 err_pci_request_regions:
923 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
924 The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
926 /* Delay between EEPROM clock transitions.
927 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
928 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
929 made udelay() unreliable.
930 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
933 #define eeprom_delay(ee_addr) readl(ee_addr)
935 #define EE_Write0 (EE_ChipSelect)
936 #define EE_Write1 (EE_ChipSelect | EE_DataIn)
938 /* The EEPROM commands include the alway-set leading bit. */
940 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
943 static int eeprom_read(void __iomem *addr, int location)
947 void __iomem *ee_addr = addr + EECtrl;
948 int read_cmd = location | EE_ReadCmd;
950 writel(EE_Write0, ee_addr);
952 /* Shift the read command bits out. */
953 for (i = 10; i >= 0; i--) {
954 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
955 writel(dataval, ee_addr);
956 eeprom_delay(ee_addr);
957 writel(dataval | EE_ShiftClk, ee_addr);
958 eeprom_delay(ee_addr);
960 writel(EE_ChipSelect, ee_addr);
961 eeprom_delay(ee_addr);
963 for (i = 0; i < 16; i++) {
964 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
965 eeprom_delay(ee_addr);
966 retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
967 writel(EE_ChipSelect, ee_addr);
968 eeprom_delay(ee_addr);
971 /* Terminate the EEPROM access. */
972 writel(EE_Write0, ee_addr);
977 /* MII transceiver control section.
978 * The 83815 series has an internal transceiver, and we present the
979 * internal management registers as if they were MII connected.
980 * External Phy registers are referenced through the MII interface.
983 /* clock transitions >= 20ns (25MHz)
984 * One readl should be good to PCI @ 100MHz
986 #define mii_delay(ioaddr) readl(ioaddr + EECtrl)
988 static int mii_getbit (struct net_device *dev)
991 void __iomem *ioaddr = ns_ioaddr(dev);
993 writel(MII_ShiftClk, ioaddr + EECtrl);
994 data = readl(ioaddr + EECtrl);
995 writel(0, ioaddr + EECtrl);
997 return (data & MII_Data)? 1 : 0;
1000 static void mii_send_bits (struct net_device *dev, u32 data, int len)
1003 void __iomem *ioaddr = ns_ioaddr(dev);
1005 for (i = (1 << (len-1)); i; i >>= 1)
1007 u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1008 writel(mdio_val, ioaddr + EECtrl);
1010 writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1013 writel(0, ioaddr + EECtrl);
1017 static int miiport_read(struct net_device *dev, int phy_id, int reg)
1024 mii_send_bits (dev, 0xffffffff, 32);
1025 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1026 /* ST,OP = 0110'b for read operation */
1027 cmd = (0x06 << 10) | (phy_id << 5) | reg;
1028 mii_send_bits (dev, cmd, 14);
1030 if (mii_getbit (dev))
1033 for (i = 0; i < 16; i++) {
1035 retval |= mii_getbit (dev);
1042 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1047 mii_send_bits (dev, 0xffffffff, 32);
1048 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1049 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1050 cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1051 mii_send_bits (dev, cmd, 32);
1056 static int mdio_read(struct net_device *dev, int reg)
1058 struct netdev_private *np = netdev_priv(dev);
1059 void __iomem *ioaddr = ns_ioaddr(dev);
1061 /* The 83815 series has two ports:
1062 * - an internal transceiver
1063 * - an external mii bus
1065 if (dev->if_port == PORT_TP)
1066 return readw(ioaddr+BasicControl+(reg<<2));
1068 return miiport_read(dev, np->phy_addr_external, reg);
1071 static void mdio_write(struct net_device *dev, int reg, u16 data)
1073 struct netdev_private *np = netdev_priv(dev);
1074 void __iomem *ioaddr = ns_ioaddr(dev);
1076 /* The 83815 series has an internal transceiver; handle separately */
1077 if (dev->if_port == PORT_TP)
1078 writew(data, ioaddr+BasicControl+(reg<<2));
1080 miiport_write(dev, np->phy_addr_external, reg, data);
1083 static void init_phy_fixup(struct net_device *dev)
1085 struct netdev_private *np = netdev_priv(dev);
1086 void __iomem *ioaddr = ns_ioaddr(dev);
1091 /* restore stuff lost when power was out */
1092 tmp = mdio_read(dev, MII_BMCR);
1093 if (np->autoneg == AUTONEG_ENABLE) {
1094 /* renegotiate if something changed */
1095 if ((tmp & BMCR_ANENABLE) == 0
1096 || np->advertising != mdio_read(dev, MII_ADVERTISE))
1098 /* turn on autonegotiation and force negotiation */
1099 tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1100 mdio_write(dev, MII_ADVERTISE, np->advertising);
1103 /* turn off auto negotiation, set speed and duplexity */
1104 tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1105 if (np->speed == SPEED_100)
1106 tmp |= BMCR_SPEED100;
1107 if (np->duplex == DUPLEX_FULL)
1108 tmp |= BMCR_FULLDPLX;
1110 * Note: there is no good way to inform the link partner
1111 * that our capabilities changed. The user has to unplug
1112 * and replug the network cable after some changes, e.g.
1113 * after switching from 10HD, autoneg off to 100 HD,
1117 mdio_write(dev, MII_BMCR, tmp);
1118 readl(ioaddr + ChipConfig);
1121 /* find out what phy this is */
1122 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1123 + mdio_read(dev, MII_PHYSID2);
1125 /* handle external phys here */
1127 case PHYID_AM79C874:
1128 /* phy specific configuration for fibre/tp operation */
1129 tmp = mdio_read(dev, MII_MCTRL);
1130 tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1131 if (dev->if_port == PORT_FIBRE)
1135 mdio_write(dev, MII_MCTRL, tmp);
1140 cfg = readl(ioaddr + ChipConfig);
1141 if (cfg & CfgExtPhy)
1144 /* On page 78 of the spec, they recommend some settings for "optimum
1145 performance" to be done in sequence. These settings optimize some
1146 of the 100Mbit autodetection circuitry. They say we only want to
1147 do this for rev C of the chip, but engineers at NSC (Bradley
1148 Kennedy) recommends always setting them. If you don't, you get
1149 errors on some autonegotiations that make the device unusable.
1151 It seems that the DSP needs a few usec to reinitialize after
1152 the start of the phy. Just retry writing these values until they
1155 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1158 writew(1, ioaddr + PGSEL);
1159 writew(PMDCSR_VAL, ioaddr + PMDCSR);
1160 writew(TSTDAT_VAL, ioaddr + TSTDAT);
1161 np->dspcfg = (np->srr <= SRR_DP83815_C)?
1162 DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1163 writew(np->dspcfg, ioaddr + DSPCFG);
1164 writew(SDCFG_VAL, ioaddr + SDCFG);
1165 writew(0, ioaddr + PGSEL);
1166 readl(ioaddr + ChipConfig);
1169 writew(1, ioaddr + PGSEL);
1170 dspcfg = readw(ioaddr + DSPCFG);
1171 writew(0, ioaddr + PGSEL);
1172 if (np->dspcfg == dspcfg)
1176 if (netif_msg_link(np)) {
1177 if (i==NATSEMI_HW_TIMEOUT) {
1179 "%s: DSPCFG mismatch after retrying for %d usec.\n",
1183 "%s: DSPCFG accepted after %d usec.\n",
1188 * Enable PHY Specific event based interrupts. Link state change
1189 * and Auto-Negotiation Completion are among the affected.
1190 * Read the intr status to clear it (needed for wake events).
1192 readw(ioaddr + MIntrStatus);
1193 writew(MICRIntEn, ioaddr + MIntrCtrl);
1196 static int switch_port_external(struct net_device *dev)
1198 struct netdev_private *np = netdev_priv(dev);
1199 void __iomem *ioaddr = ns_ioaddr(dev);
1202 cfg = readl(ioaddr + ChipConfig);
1203 if (cfg & CfgExtPhy)
1206 if (netif_msg_link(np)) {
1207 printk(KERN_INFO "%s: switching to external transceiver.\n",
1211 /* 1) switch back to external phy */
1212 writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1213 readl(ioaddr + ChipConfig);
1216 /* 2) reset the external phy: */
1217 /* resetting the external PHY has been known to cause a hub supplying
1218 * power over Ethernet to kill the power. We don't want to kill
1219 * power to this computer, so we avoid resetting the phy.
1222 /* 3) reinit the phy fixup, it got lost during power down. */
1223 move_int_phy(dev, np->phy_addr_external);
1224 init_phy_fixup(dev);
1229 static int switch_port_internal(struct net_device *dev)
1231 struct netdev_private *np = netdev_priv(dev);
1232 void __iomem *ioaddr = ns_ioaddr(dev);
1237 cfg = readl(ioaddr + ChipConfig);
1238 if (!(cfg &CfgExtPhy))
1241 if (netif_msg_link(np)) {
1242 printk(KERN_INFO "%s: switching to internal transceiver.\n",
1245 /* 1) switch back to internal phy: */
1246 cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1247 writel(cfg, ioaddr + ChipConfig);
1248 readl(ioaddr + ChipConfig);
1251 /* 2) reset the internal phy: */
1252 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1253 writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1254 readl(ioaddr + ChipConfig);
1256 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1257 bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1258 if (!(bmcr & BMCR_RESET))
1262 if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1264 "%s: phy reset did not complete in %d usec.\n",
1267 /* 3) reinit the phy fixup, it got lost during power down. */
1268 init_phy_fixup(dev);
1273 /* Scan for a PHY on the external mii bus.
1274 * There are two tricky points:
1275 * - Do not scan while the internal phy is enabled. The internal phy will
1276 * crash: e.g. reads from the DSPCFG register will return odd values and
1277 * the nasty random phy reset code will reset the nic every few seconds.
1278 * - The internal phy must be moved around, an external phy could
1279 * have the same address as the internal phy.
1281 static int find_mii(struct net_device *dev)
1283 struct netdev_private *np = netdev_priv(dev);
1288 /* Switch to external phy */
1289 did_switch = switch_port_external(dev);
1291 /* Scan the possible phy addresses:
1293 * PHY address 0 means that the phy is in isolate mode. Not yet
1294 * supported due to lack of test hardware. User space should
1295 * handle it through ethtool.
1297 for (i = 1; i <= 31; i++) {
1298 move_int_phy(dev, i);
1299 tmp = miiport_read(dev, i, MII_BMSR);
1300 if (tmp != 0xffff && tmp != 0x0000) {
1301 /* found something! */
1302 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1303 + mdio_read(dev, MII_PHYSID2);
1304 if (netif_msg_probe(np)) {
1305 printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1306 pci_name(np->pci_dev), np->mii, i);
1311 /* And switch back to internal phy: */
1313 switch_port_internal(dev);
1317 /* CFG bits [13:16] [18:23] */
1318 #define CFG_RESET_SAVE 0xfde000
1319 /* WCSR bits [0:4] [9:10] */
1320 #define WCSR_RESET_SAVE 0x61f
1321 /* RFCR bits [20] [22] [27:31] */
1322 #define RFCR_RESET_SAVE 0xf8500000;
1324 static void natsemi_reset(struct net_device *dev)
1332 struct netdev_private *np = netdev_priv(dev);
1333 void __iomem *ioaddr = ns_ioaddr(dev);
1336 * Resetting the chip causes some registers to be lost.
1337 * Natsemi suggests NOT reloading the EEPROM while live, so instead
1338 * we save the state that would have been loaded from EEPROM
1339 * on a normal power-up (see the spec EEPROM map). This assumes
1340 * whoever calls this will follow up with init_registers() eventually.
1344 cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1346 wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1348 rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1350 for (i = 0; i < 3; i++) {
1351 writel(i*2, ioaddr + RxFilterAddr);
1352 pmatch[i] = readw(ioaddr + RxFilterData);
1355 for (i = 0; i < 3; i++) {
1356 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1357 sopass[i] = readw(ioaddr + RxFilterData);
1360 /* now whack the chip */
1361 writel(ChipReset, ioaddr + ChipCmd);
1362 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1363 if (!(readl(ioaddr + ChipCmd) & ChipReset))
1367 if (i==NATSEMI_HW_TIMEOUT) {
1368 printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1370 } else if (netif_msg_hw(np)) {
1371 printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1376 cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1377 /* turn on external phy if it was selected */
1378 if (dev->if_port == PORT_TP)
1379 cfg &= ~(CfgExtPhy | CfgPhyDis);
1381 cfg |= (CfgExtPhy | CfgPhyDis);
1382 writel(cfg, ioaddr + ChipConfig);
1384 wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1385 writel(wcsr, ioaddr + WOLCmd);
1387 rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1388 /* restore PMATCH */
1389 for (i = 0; i < 3; i++) {
1390 writel(i*2, ioaddr + RxFilterAddr);
1391 writew(pmatch[i], ioaddr + RxFilterData);
1393 for (i = 0; i < 3; i++) {
1394 writel(0xa+(i*2), ioaddr + RxFilterAddr);
1395 writew(sopass[i], ioaddr + RxFilterData);
1398 writel(rfcr, ioaddr + RxFilterAddr);
1401 static void reset_rx(struct net_device *dev)
1404 struct netdev_private *np = netdev_priv(dev);
1405 void __iomem *ioaddr = ns_ioaddr(dev);
1407 np->intr_status &= ~RxResetDone;
1409 writel(RxReset, ioaddr + ChipCmd);
1411 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1412 np->intr_status |= readl(ioaddr + IntrStatus);
1413 if (np->intr_status & RxResetDone)
1417 if (i==NATSEMI_HW_TIMEOUT) {
1418 printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1420 } else if (netif_msg_hw(np)) {
1421 printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1426 static void natsemi_reload_eeprom(struct net_device *dev)
1428 struct netdev_private *np = netdev_priv(dev);
1429 void __iomem *ioaddr = ns_ioaddr(dev);
1432 writel(EepromReload, ioaddr + PCIBusCfg);
1433 for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1435 if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1438 if (i==NATSEMI_HW_TIMEOUT) {
1439 printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1440 pci_name(np->pci_dev), i*50);
1441 } else if (netif_msg_hw(np)) {
1442 printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1443 pci_name(np->pci_dev), i*50);
1447 static void natsemi_stop_rxtx(struct net_device *dev)
1449 void __iomem * ioaddr = ns_ioaddr(dev);
1450 struct netdev_private *np = netdev_priv(dev);
1453 writel(RxOff | TxOff, ioaddr + ChipCmd);
1454 for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1455 if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1459 if (i==NATSEMI_HW_TIMEOUT) {
1460 printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1462 } else if (netif_msg_hw(np)) {
1463 printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1468 static int netdev_open(struct net_device *dev)
1470 struct netdev_private *np = netdev_priv(dev);
1471 void __iomem * ioaddr = ns_ioaddr(dev);
1474 /* Reset the chip, just in case. */
1477 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
1480 if (netif_msg_ifup(np))
1481 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1482 dev->name, dev->irq);
1483 i = alloc_ring(dev);
1485 free_irq(dev->irq, dev);
1489 spin_lock_irq(&np->lock);
1490 init_registers(dev);
1491 /* now set the MAC address according to dev->dev_addr */
1492 for (i = 0; i < 3; i++) {
1493 u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1495 writel(i*2, ioaddr + RxFilterAddr);
1496 writew(mac, ioaddr + RxFilterData);
1498 writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1499 spin_unlock_irq(&np->lock);
1501 netif_start_queue(dev);
1503 if (netif_msg_ifup(np))
1504 printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1505 dev->name, (int)readl(ioaddr + ChipCmd));
1507 /* Set the timer to check for link beat. */
1508 init_timer(&np->timer);
1509 np->timer.expires = jiffies + NATSEMI_TIMER_FREQ;
1510 np->timer.data = (unsigned long)dev;
1511 np->timer.function = &netdev_timer; /* timer handler */
1512 add_timer(&np->timer);
1517 static void do_cable_magic(struct net_device *dev)
1519 struct netdev_private *np = netdev_priv(dev);
1520 void __iomem *ioaddr = ns_ioaddr(dev);
1522 if (dev->if_port != PORT_TP)
1525 if (np->srr >= SRR_DP83816_A5)
1529 * 100 MBit links with short cables can trip an issue with the chip.
1530 * The problem manifests as lots of CRC errors and/or flickering
1531 * activity LED while idle. This process is based on instructions
1532 * from engineers at National.
1534 if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1537 writew(1, ioaddr + PGSEL);
1539 * coefficient visibility should already be enabled via
1542 data = readw(ioaddr + TSTDAT) & 0xff;
1544 * the value must be negative, and within certain values
1545 * (these values all come from National)
1547 if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1548 struct netdev_private *np = netdev_priv(dev);
1550 /* the bug has been triggered - fix the coefficient */
1551 writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1552 /* lock the value */
1553 data = readw(ioaddr + DSPCFG);
1554 np->dspcfg = data | DSPCFG_LOCK;
1555 writew(np->dspcfg, ioaddr + DSPCFG);
1557 writew(0, ioaddr + PGSEL);
1561 static void undo_cable_magic(struct net_device *dev)
1564 struct netdev_private *np = netdev_priv(dev);
1565 void __iomem * ioaddr = ns_ioaddr(dev);
1567 if (dev->if_port != PORT_TP)
1570 if (np->srr >= SRR_DP83816_A5)
1573 writew(1, ioaddr + PGSEL);
1574 /* make sure the lock bit is clear */
1575 data = readw(ioaddr + DSPCFG);
1576 np->dspcfg = data & ~DSPCFG_LOCK;
1577 writew(np->dspcfg, ioaddr + DSPCFG);
1578 writew(0, ioaddr + PGSEL);
1581 static void check_link(struct net_device *dev)
1583 struct netdev_private *np = netdev_priv(dev);
1584 void __iomem * ioaddr = ns_ioaddr(dev);
1585 int duplex = np->duplex;
1588 /* If we are ignoring the PHY then don't try reading it. */
1590 goto propagate_state;
1592 /* The link status field is latched: it remains low after a temporary
1593 * link failure until it's read. We need the current link status,
1596 mdio_read(dev, MII_BMSR);
1597 bmsr = mdio_read(dev, MII_BMSR);
1599 if (!(bmsr & BMSR_LSTATUS)) {
1600 if (netif_carrier_ok(dev)) {
1601 if (netif_msg_link(np))
1602 printk(KERN_NOTICE "%s: link down.\n",
1604 netif_carrier_off(dev);
1605 undo_cable_magic(dev);
1609 if (!netif_carrier_ok(dev)) {
1610 if (netif_msg_link(np))
1611 printk(KERN_NOTICE "%s: link up.\n", dev->name);
1612 netif_carrier_on(dev);
1613 do_cable_magic(dev);
1616 duplex = np->full_duplex;
1618 if (bmsr & BMSR_ANEGCOMPLETE) {
1619 int tmp = mii_nway_result(
1620 np->advertising & mdio_read(dev, MII_LPA));
1621 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1623 } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1628 /* if duplex is set then bit 28 must be set, too */
1629 if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1630 if (netif_msg_link(np))
1632 "%s: Setting %s-duplex based on negotiated "
1633 "link capability.\n", dev->name,
1634 duplex ? "full" : "half");
1636 np->rx_config |= RxAcceptTx;
1637 np->tx_config |= TxCarrierIgn | TxHeartIgn;
1639 np->rx_config &= ~RxAcceptTx;
1640 np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1642 writel(np->tx_config, ioaddr + TxConfig);
1643 writel(np->rx_config, ioaddr + RxConfig);
1647 static void init_registers(struct net_device *dev)
1649 struct netdev_private *np = netdev_priv(dev);
1650 void __iomem * ioaddr = ns_ioaddr(dev);
1652 init_phy_fixup(dev);
1654 /* clear any interrupts that are pending, such as wake events */
1655 readl(ioaddr + IntrStatus);
1657 writel(np->ring_dma, ioaddr + RxRingPtr);
1658 writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1659 ioaddr + TxRingPtr);
1661 /* Initialize other registers.
1662 * Configure the PCI bus bursts and FIFO thresholds.
1663 * Configure for standard, in-spec Ethernet.
1664 * Start with half-duplex. check_link will update
1665 * to the correct settings.
1668 /* DRTH: 2: start tx if 64 bytes are in the fifo
1669 * FLTH: 0x10: refill with next packet if 512 bytes are free
1670 * MXDMA: 0: up to 256 byte bursts.
1671 * MXDMA must be <= FLTH
1675 np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1676 TX_FLTH_VAL | TX_DRTH_VAL_START;
1677 writel(np->tx_config, ioaddr + TxConfig);
1679 /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
1680 * MXDMA 0: up to 256 byte bursts
1682 np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1683 /* if receive ring now has bigger buffers than normal, enable jumbo */
1684 if (np->rx_buf_sz > NATSEMI_LONGPKT)
1685 np->rx_config |= RxAcceptLong;
1687 writel(np->rx_config, ioaddr + RxConfig);
1690 * The PME bit is initialized from the EEPROM contents.
1691 * PCI cards probably have PME disabled, but motherboard
1692 * implementations may have PME set to enable WakeOnLan.
1693 * With PME set the chip will scan incoming packets but
1694 * nothing will be written to memory. */
1695 np->SavedClkRun = readl(ioaddr + ClkRun);
1696 writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1697 if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1698 printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1699 dev->name, readl(ioaddr + WOLCmd));
1705 /* Enable interrupts by setting the interrupt mask. */
1706 writel(DEFAULT_INTR, ioaddr + IntrMask);
1707 writel(1, ioaddr + IntrEnable);
1709 writel(RxOn | TxOn, ioaddr + ChipCmd);
1710 writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
1716 * 1) check for link changes. Usually they are handled by the MII interrupt
1717 * but it doesn't hurt to check twice.
1718 * 2) check for sudden death of the NIC:
1719 * It seems that a reference set for this chip went out with incorrect info,
1720 * and there exist boards that aren't quite right. An unexpected voltage
1721 * drop can cause the PHY to get itself in a weird state (basically reset).
1722 * NOTE: this only seems to affect revC chips.
1723 * 3) check of death of the RX path due to OOM
1725 static void netdev_timer(unsigned long data)
1727 struct net_device *dev = (struct net_device *)data;
1728 struct netdev_private *np = netdev_priv(dev);
1729 void __iomem * ioaddr = ns_ioaddr(dev);
1730 int next_tick = 5*HZ;
1732 if (netif_msg_timer(np)) {
1733 /* DO NOT read the IntrStatus register,
1734 * a read clears any pending interrupts.
1736 printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1740 if (dev->if_port == PORT_TP) {
1743 spin_lock_irq(&np->lock);
1744 /* check for a nasty random phy-reset - use dspcfg as a flag */
1745 writew(1, ioaddr+PGSEL);
1746 dspcfg = readw(ioaddr+DSPCFG);
1747 writew(0, ioaddr+PGSEL);
1748 if (dspcfg != np->dspcfg) {
1749 if (!netif_queue_stopped(dev)) {
1750 spin_unlock_irq(&np->lock);
1751 if (netif_msg_hw(np))
1752 printk(KERN_NOTICE "%s: possible phy reset: "
1753 "re-initializing\n", dev->name);
1754 disable_irq(dev->irq);
1755 spin_lock_irq(&np->lock);
1756 natsemi_stop_rxtx(dev);
1759 init_registers(dev);
1760 spin_unlock_irq(&np->lock);
1761 enable_irq(dev->irq);
1765 spin_unlock_irq(&np->lock);
1768 /* init_registers() calls check_link() for the above case */
1770 spin_unlock_irq(&np->lock);
1773 spin_lock_irq(&np->lock);
1775 spin_unlock_irq(&np->lock);
1778 disable_irq(dev->irq);
1781 enable_irq(dev->irq);
1783 writel(RxOn, ioaddr + ChipCmd);
1788 mod_timer(&np->timer, jiffies + next_tick);
1791 static void dump_ring(struct net_device *dev)
1793 struct netdev_private *np = netdev_priv(dev);
1795 if (netif_msg_pktdata(np)) {
1797 printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
1798 for (i = 0; i < TX_RING_SIZE; i++) {
1799 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1800 i, np->tx_ring[i].next_desc,
1801 np->tx_ring[i].cmd_status,
1802 np->tx_ring[i].addr);
1804 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1805 for (i = 0; i < RX_RING_SIZE; i++) {
1806 printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1807 i, np->rx_ring[i].next_desc,
1808 np->rx_ring[i].cmd_status,
1809 np->rx_ring[i].addr);
1814 static void tx_timeout(struct net_device *dev)
1816 struct netdev_private *np = netdev_priv(dev);
1817 void __iomem * ioaddr = ns_ioaddr(dev);
1819 disable_irq(dev->irq);
1820 spin_lock_irq(&np->lock);
1821 if (!np->hands_off) {
1822 if (netif_msg_tx_err(np))
1824 "%s: Transmit timed out, status %#08x,"
1826 dev->name, readl(ioaddr + IntrStatus));
1831 init_registers(dev);
1834 "%s: tx_timeout while in hands_off state?\n",
1837 spin_unlock_irq(&np->lock);
1838 enable_irq(dev->irq);
1840 dev->trans_start = jiffies;
1841 np->stats.tx_errors++;
1842 netif_wake_queue(dev);
1845 static int alloc_ring(struct net_device *dev)
1847 struct netdev_private *np = netdev_priv(dev);
1848 np->rx_ring = pci_alloc_consistent(np->pci_dev,
1849 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1853 np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1857 static void refill_rx(struct net_device *dev)
1859 struct netdev_private *np = netdev_priv(dev);
1861 /* Refill the Rx ring buffers. */
1862 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1863 struct sk_buff *skb;
1864 int entry = np->dirty_rx % RX_RING_SIZE;
1865 if (np->rx_skbuff[entry] == NULL) {
1866 unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1867 skb = dev_alloc_skb(buflen);
1868 np->rx_skbuff[entry] = skb;
1870 break; /* Better luck next round. */
1871 skb->dev = dev; /* Mark as being used by this device. */
1872 np->rx_dma[entry] = pci_map_single(np->pci_dev,
1873 skb->data, buflen, PCI_DMA_FROMDEVICE);
1874 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1876 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1878 if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1879 if (netif_msg_rx_err(np))
1880 printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1885 static void set_bufsize(struct net_device *dev)
1887 struct netdev_private *np = netdev_priv(dev);
1888 if (dev->mtu <= ETH_DATA_LEN)
1889 np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1891 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1894 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1895 static void init_ring(struct net_device *dev)
1897 struct netdev_private *np = netdev_priv(dev);
1901 np->dirty_tx = np->cur_tx = 0;
1902 for (i = 0; i < TX_RING_SIZE; i++) {
1903 np->tx_skbuff[i] = NULL;
1904 np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1905 +sizeof(struct netdev_desc)
1906 *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1907 np->tx_ring[i].cmd_status = 0;
1912 np->cur_rx = RX_RING_SIZE;
1916 np->rx_head_desc = &np->rx_ring[0];
1918 /* Please be carefull before changing this loop - at least gcc-2.95.1
1919 * miscompiles it otherwise.
1921 /* Initialize all Rx descriptors. */
1922 for (i = 0; i < RX_RING_SIZE; i++) {
1923 np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1924 +sizeof(struct netdev_desc)
1925 *((i+1)%RX_RING_SIZE));
1926 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
1927 np->rx_skbuff[i] = NULL;
1933 static void drain_tx(struct net_device *dev)
1935 struct netdev_private *np = netdev_priv(dev);
1938 for (i = 0; i < TX_RING_SIZE; i++) {
1939 if (np->tx_skbuff[i]) {
1940 pci_unmap_single(np->pci_dev,
1941 np->tx_dma[i], np->tx_skbuff[i]->len,
1943 dev_kfree_skb(np->tx_skbuff[i]);
1944 np->stats.tx_dropped++;
1946 np->tx_skbuff[i] = NULL;
1950 static void drain_rx(struct net_device *dev)
1952 struct netdev_private *np = netdev_priv(dev);
1953 unsigned int buflen = np->rx_buf_sz;
1956 /* Free all the skbuffs in the Rx queue. */
1957 for (i = 0; i < RX_RING_SIZE; i++) {
1958 np->rx_ring[i].cmd_status = 0;
1959 np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1960 if (np->rx_skbuff[i]) {
1961 pci_unmap_single(np->pci_dev,
1962 np->rx_dma[i], buflen,
1963 PCI_DMA_FROMDEVICE);
1964 dev_kfree_skb(np->rx_skbuff[i]);
1966 np->rx_skbuff[i] = NULL;
1970 static void drain_ring(struct net_device *dev)
1976 static void free_ring(struct net_device *dev)
1978 struct netdev_private *np = netdev_priv(dev);
1979 pci_free_consistent(np->pci_dev,
1980 sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1981 np->rx_ring, np->ring_dma);
1984 static void reinit_rx(struct net_device *dev)
1986 struct netdev_private *np = netdev_priv(dev);
1991 np->cur_rx = RX_RING_SIZE;
1992 np->rx_head_desc = &np->rx_ring[0];
1993 /* Initialize all Rx descriptors. */
1994 for (i = 0; i < RX_RING_SIZE; i++)
1995 np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2000 static void reinit_ring(struct net_device *dev)
2002 struct netdev_private *np = netdev_priv(dev);
2007 np->dirty_tx = np->cur_tx = 0;
2008 for (i=0;i<TX_RING_SIZE;i++)
2009 np->tx_ring[i].cmd_status = 0;
2014 static int start_tx(struct sk_buff *skb, struct net_device *dev)
2016 struct netdev_private *np = netdev_priv(dev);
2017 void __iomem * ioaddr = ns_ioaddr(dev);
2020 /* Note: Ordering is important here, set the field with the
2021 "ownership" bit last, and only then increment cur_tx. */
2023 /* Calculate the next Tx descriptor entry. */
2024 entry = np->cur_tx % TX_RING_SIZE;
2026 np->tx_skbuff[entry] = skb;
2027 np->tx_dma[entry] = pci_map_single(np->pci_dev,
2028 skb->data,skb->len, PCI_DMA_TODEVICE);
2030 np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2032 spin_lock_irq(&np->lock);
2034 if (!np->hands_off) {
2035 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2036 /* StrongARM: Explicitly cache flush np->tx_ring and
2037 * skb->data,skb->len. */
2040 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2041 netdev_tx_done(dev);
2042 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2043 netif_stop_queue(dev);
2045 /* Wake the potentially-idle transmit channel. */
2046 writel(TxOn, ioaddr + ChipCmd);
2048 dev_kfree_skb_irq(skb);
2049 np->stats.tx_dropped++;
2051 spin_unlock_irq(&np->lock);
2053 dev->trans_start = jiffies;
2055 if (netif_msg_tx_queued(np)) {
2056 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2057 dev->name, np->cur_tx, entry);
2062 static void netdev_tx_done(struct net_device *dev)
2064 struct netdev_private *np = netdev_priv(dev);
2066 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2067 int entry = np->dirty_tx % TX_RING_SIZE;
2068 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2070 if (netif_msg_tx_done(np))
2072 "%s: tx frame #%d finished, status %#08x.\n",
2073 dev->name, np->dirty_tx,
2074 le32_to_cpu(np->tx_ring[entry].cmd_status));
2075 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2076 np->stats.tx_packets++;
2077 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
2078 } else { /* Various Tx errors */
2080 le32_to_cpu(np->tx_ring[entry].cmd_status);
2081 if (tx_status & (DescTxAbort|DescTxExcColl))
2082 np->stats.tx_aborted_errors++;
2083 if (tx_status & DescTxFIFO)
2084 np->stats.tx_fifo_errors++;
2085 if (tx_status & DescTxCarrier)
2086 np->stats.tx_carrier_errors++;
2087 if (tx_status & DescTxOOWCol)
2088 np->stats.tx_window_errors++;
2089 np->stats.tx_errors++;
2091 pci_unmap_single(np->pci_dev,np->tx_dma[entry],
2092 np->tx_skbuff[entry]->len,
2094 /* Free the original skb. */
2095 dev_kfree_skb_irq(np->tx_skbuff[entry]);
2096 np->tx_skbuff[entry] = NULL;
2098 if (netif_queue_stopped(dev)
2099 && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2100 /* The ring is no longer full, wake queue. */
2101 netif_wake_queue(dev);
2105 /* The interrupt handler doesn't actually handle interrupts itself, it
2106 * schedules a NAPI poll if there is anything to do. */
2107 static irqreturn_t intr_handler(int irq, void *dev_instance)
2109 struct net_device *dev = dev_instance;
2110 struct netdev_private *np = netdev_priv(dev);
2111 void __iomem * ioaddr = ns_ioaddr(dev);
2116 /* Reading automatically acknowledges. */
2117 np->intr_status = readl(ioaddr + IntrStatus);
2119 if (netif_msg_intr(np))
2121 "%s: Interrupt, status %#08x, mask %#08x.\n",
2122 dev->name, np->intr_status,
2123 readl(ioaddr + IntrMask));
2125 if (!np->intr_status)
2128 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2130 if (netif_rx_schedule_prep(dev)) {
2131 /* Disable interrupts and register for poll */
2132 natsemi_irq_disable(dev);
2133 __netif_rx_schedule(dev);
2138 /* This is the NAPI poll routine. As well as the standard RX handling
2139 * it also handles all other interrupts that the chip might raise.
2141 static int natsemi_poll(struct net_device *dev, int *budget)
2143 struct netdev_private *np = netdev_priv(dev);
2144 void __iomem * ioaddr = ns_ioaddr(dev);
2146 int work_to_do = min(*budget, dev->quota);
2150 if (np->intr_status &
2151 (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2152 spin_lock(&np->lock);
2153 netdev_tx_done(dev);
2154 spin_unlock(&np->lock);
2157 /* Abnormal error summary/uncommon events handlers. */
2158 if (np->intr_status & IntrAbnormalSummary)
2159 netdev_error(dev, np->intr_status);
2161 if (np->intr_status &
2162 (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2163 IntrRxErr | IntrRxOverrun)) {
2164 netdev_rx(dev, &work_done, work_to_do);
2167 *budget -= work_done;
2168 dev->quota -= work_done;
2170 if (work_done >= work_to_do)
2173 np->intr_status = readl(ioaddr + IntrStatus);
2174 } while (np->intr_status);
2176 netif_rx_complete(dev);
2178 /* Reenable interrupts providing nothing is trying to shut
2180 spin_lock(&np->lock);
2181 if (!np->hands_off && netif_running(dev))
2182 natsemi_irq_enable(dev);
2183 spin_unlock(&np->lock);
2188 /* This routine is logically part of the interrupt handler, but separated
2189 for clarity and better register allocation. */
2190 static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2192 struct netdev_private *np = netdev_priv(dev);
2193 int entry = np->cur_rx % RX_RING_SIZE;
2194 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2195 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2196 unsigned int buflen = np->rx_buf_sz;
2197 void __iomem * ioaddr = ns_ioaddr(dev);
2199 /* If the driver owns the next entry it's a new packet. Send it up. */
2200 while (desc_status < 0) { /* e.g. & DescOwn */
2202 if (netif_msg_rx_status(np))
2204 " netdev_rx() entry %d status was %#08x.\n",
2205 entry, desc_status);
2209 if (*work_done >= work_to_do)
2214 pkt_len = (desc_status & DescSizeMask) - 4;
2215 if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2216 if (desc_status & DescMore) {
2217 if (netif_msg_rx_err(np))
2219 "%s: Oversized(?) Ethernet "
2220 "frame spanned multiple "
2221 "buffers, entry %#08x "
2222 "status %#08x.\n", dev->name,
2223 np->cur_rx, desc_status);
2224 np->stats.rx_length_errors++;
2226 /* The RX state machine has probably
2227 * locked up beneath us. Follow the
2228 * reset procedure documented in
2231 spin_lock_irq(&np->lock);
2234 writel(np->ring_dma, ioaddr + RxRingPtr);
2236 spin_unlock_irq(&np->lock);
2238 /* We'll enable RX on exit from this
2243 /* There was an error. */
2244 np->stats.rx_errors++;
2245 if (desc_status & (DescRxAbort|DescRxOver))
2246 np->stats.rx_over_errors++;
2247 if (desc_status & (DescRxLong|DescRxRunt))
2248 np->stats.rx_length_errors++;
2249 if (desc_status & (DescRxInvalid|DescRxAlign))
2250 np->stats.rx_frame_errors++;
2251 if (desc_status & DescRxCRC)
2252 np->stats.rx_crc_errors++;
2254 } else if (pkt_len > np->rx_buf_sz) {
2255 /* if this is the tail of a double buffer
2256 * packet, we've already counted the error
2257 * on the first part. Ignore the second half.
2260 struct sk_buff *skb;
2261 /* Omit CRC size. */
2262 /* Check if the packet is long enough to accept
2263 * without copying to a minimally-sized skbuff. */
2264 if (pkt_len < rx_copybreak
2265 && (skb = dev_alloc_skb(pkt_len + RX_OFFSET)) != NULL) {
2267 /* 16 byte align the IP header */
2268 skb_reserve(skb, RX_OFFSET);
2269 pci_dma_sync_single_for_cpu(np->pci_dev,
2272 PCI_DMA_FROMDEVICE);
2273 eth_copy_and_sum(skb,
2274 np->rx_skbuff[entry]->data, pkt_len, 0);
2275 skb_put(skb, pkt_len);
2276 pci_dma_sync_single_for_device(np->pci_dev,
2279 PCI_DMA_FROMDEVICE);
2281 pci_unmap_single(np->pci_dev, np->rx_dma[entry],
2282 buflen, PCI_DMA_FROMDEVICE);
2283 skb_put(skb = np->rx_skbuff[entry], pkt_len);
2284 np->rx_skbuff[entry] = NULL;
2286 skb->protocol = eth_type_trans(skb, dev);
2287 netif_receive_skb(skb);
2288 dev->last_rx = jiffies;
2289 np->stats.rx_packets++;
2290 np->stats.rx_bytes += pkt_len;
2292 entry = (++np->cur_rx) % RX_RING_SIZE;
2293 np->rx_head_desc = &np->rx_ring[entry];
2294 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2298 /* Restart Rx engine if stopped. */
2300 mod_timer(&np->timer, jiffies + 1);
2302 writel(RxOn, ioaddr + ChipCmd);
2305 static void netdev_error(struct net_device *dev, int intr_status)
2307 struct netdev_private *np = netdev_priv(dev);
2308 void __iomem * ioaddr = ns_ioaddr(dev);
2310 spin_lock(&np->lock);
2311 if (intr_status & LinkChange) {
2312 u16 lpa = mdio_read(dev, MII_LPA);
2313 if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE
2314 && netif_msg_link(np)) {
2316 "%s: Autonegotiation advertising"
2317 " %#04x partner %#04x.\n", dev->name,
2318 np->advertising, lpa);
2321 /* read MII int status to clear the flag */
2322 readw(ioaddr + MIntrStatus);
2325 if (intr_status & StatsMax) {
2328 if (intr_status & IntrTxUnderrun) {
2329 if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2330 np->tx_config += TX_DRTH_VAL_INC;
2331 if (netif_msg_tx_err(np))
2333 "%s: increased tx threshold, txcfg %#08x.\n",
2334 dev->name, np->tx_config);
2336 if (netif_msg_tx_err(np))
2338 "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2339 dev->name, np->tx_config);
2341 writel(np->tx_config, ioaddr + TxConfig);
2343 if (intr_status & WOLPkt && netif_msg_wol(np)) {
2344 int wol_status = readl(ioaddr + WOLCmd);
2345 printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2346 dev->name, wol_status);
2348 if (intr_status & RxStatusFIFOOver) {
2349 if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2350 printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2353 np->stats.rx_fifo_errors++;
2355 /* Hmmmmm, it's not clear how to recover from PCI faults. */
2356 if (intr_status & IntrPCIErr) {
2357 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2358 intr_status & IntrPCIErr);
2359 np->stats.tx_fifo_errors++;
2360 np->stats.rx_fifo_errors++;
2362 spin_unlock(&np->lock);
2365 static void __get_stats(struct net_device *dev)
2367 void __iomem * ioaddr = ns_ioaddr(dev);
2368 struct netdev_private *np = netdev_priv(dev);
2370 /* The chip only need report frame silently dropped. */
2371 np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2372 np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2375 static struct net_device_stats *get_stats(struct net_device *dev)
2377 struct netdev_private *np = netdev_priv(dev);
2379 /* The chip only need report frame silently dropped. */
2380 spin_lock_irq(&np->lock);
2381 if (netif_running(dev) && !np->hands_off)
2383 spin_unlock_irq(&np->lock);
2388 #ifdef CONFIG_NET_POLL_CONTROLLER
2389 static void natsemi_poll_controller(struct net_device *dev)
2391 disable_irq(dev->irq);
2392 intr_handler(dev->irq, dev);
2393 enable_irq(dev->irq);
2397 #define HASH_TABLE 0x200
2398 static void __set_rx_mode(struct net_device *dev)
2400 void __iomem * ioaddr = ns_ioaddr(dev);
2401 struct netdev_private *np = netdev_priv(dev);
2402 u8 mc_filter[64]; /* Multicast hash filter */
2405 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2406 rx_mode = RxFilterEnable | AcceptBroadcast
2407 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2408 } else if ((dev->mc_count > multicast_filter_limit)
2409 || (dev->flags & IFF_ALLMULTI)) {
2410 rx_mode = RxFilterEnable | AcceptBroadcast
2411 | AcceptAllMulticast | AcceptMyPhys;
2413 struct dev_mc_list *mclist;
2415 memset(mc_filter, 0, sizeof(mc_filter));
2416 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2417 i++, mclist = mclist->next) {
2418 int i = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
2419 mc_filter[i/8] |= (1 << (i & 0x07));
2421 rx_mode = RxFilterEnable | AcceptBroadcast
2422 | AcceptMulticast | AcceptMyPhys;
2423 for (i = 0; i < 64; i += 2) {
2424 writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2425 writel((mc_filter[i + 1] << 8) + mc_filter[i],
2426 ioaddr + RxFilterData);
2429 writel(rx_mode, ioaddr + RxFilterAddr);
2430 np->cur_rx_mode = rx_mode;
2433 static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2435 if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
2440 /* synchronized against open : rtnl_lock() held by caller */
2441 if (netif_running(dev)) {
2442 struct netdev_private *np = netdev_priv(dev);
2443 void __iomem * ioaddr = ns_ioaddr(dev);
2445 disable_irq(dev->irq);
2446 spin_lock(&np->lock);
2448 natsemi_stop_rxtx(dev);
2449 /* drain rx queue */
2451 /* change buffers */
2454 writel(np->ring_dma, ioaddr + RxRingPtr);
2455 /* restart engines */
2456 writel(RxOn | TxOn, ioaddr + ChipCmd);
2457 spin_unlock(&np->lock);
2458 enable_irq(dev->irq);
2463 static void set_rx_mode(struct net_device *dev)
2465 struct netdev_private *np = netdev_priv(dev);
2466 spin_lock_irq(&np->lock);
2469 spin_unlock_irq(&np->lock);
2472 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2474 struct netdev_private *np = netdev_priv(dev);
2475 strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
2476 strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
2477 strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
2480 static int get_regs_len(struct net_device *dev)
2482 return NATSEMI_REGS_SIZE;
2485 static int get_eeprom_len(struct net_device *dev)
2487 struct netdev_private *np = netdev_priv(dev);
2488 return np->eeprom_size;
2491 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2493 struct netdev_private *np = netdev_priv(dev);
2494 spin_lock_irq(&np->lock);
2495 netdev_get_ecmd(dev, ecmd);
2496 spin_unlock_irq(&np->lock);
2500 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2502 struct netdev_private *np = netdev_priv(dev);
2504 spin_lock_irq(&np->lock);
2505 res = netdev_set_ecmd(dev, ecmd);
2506 spin_unlock_irq(&np->lock);
2510 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2512 struct netdev_private *np = netdev_priv(dev);
2513 spin_lock_irq(&np->lock);
2514 netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2515 netdev_get_sopass(dev, wol->sopass);
2516 spin_unlock_irq(&np->lock);
2519 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2521 struct netdev_private *np = netdev_priv(dev);
2523 spin_lock_irq(&np->lock);
2524 netdev_set_wol(dev, wol->wolopts);
2525 res = netdev_set_sopass(dev, wol->sopass);
2526 spin_unlock_irq(&np->lock);
2530 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2532 struct netdev_private *np = netdev_priv(dev);
2533 regs->version = NATSEMI_REGS_VER;
2534 spin_lock_irq(&np->lock);
2535 netdev_get_regs(dev, buf);
2536 spin_unlock_irq(&np->lock);
2539 static u32 get_msglevel(struct net_device *dev)
2541 struct netdev_private *np = netdev_priv(dev);
2542 return np->msg_enable;
2545 static void set_msglevel(struct net_device *dev, u32 val)
2547 struct netdev_private *np = netdev_priv(dev);
2548 np->msg_enable = val;
2551 static int nway_reset(struct net_device *dev)
2555 /* if autoneg is off, it's an error */
2556 tmp = mdio_read(dev, MII_BMCR);
2557 if (tmp & BMCR_ANENABLE) {
2558 tmp |= (BMCR_ANRESTART);
2559 mdio_write(dev, MII_BMCR, tmp);
2565 static u32 get_link(struct net_device *dev)
2567 /* LSTATUS is latched low until a read - so read twice */
2568 mdio_read(dev, MII_BMSR);
2569 return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2572 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2574 struct netdev_private *np = netdev_priv(dev);
2578 eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2582 eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2583 spin_lock_irq(&np->lock);
2584 res = netdev_get_eeprom(dev, eebuf);
2585 spin_unlock_irq(&np->lock);
2587 memcpy(data, eebuf+eeprom->offset, eeprom->len);
2592 static const struct ethtool_ops ethtool_ops = {
2593 .get_drvinfo = get_drvinfo,
2594 .get_regs_len = get_regs_len,
2595 .get_eeprom_len = get_eeprom_len,
2596 .get_settings = get_settings,
2597 .set_settings = set_settings,
2600 .get_regs = get_regs,
2601 .get_msglevel = get_msglevel,
2602 .set_msglevel = set_msglevel,
2603 .nway_reset = nway_reset,
2604 .get_link = get_link,
2605 .get_eeprom = get_eeprom,
2608 static int netdev_set_wol(struct net_device *dev, u32 newval)
2610 struct netdev_private *np = netdev_priv(dev);
2611 void __iomem * ioaddr = ns_ioaddr(dev);
2612 u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2614 /* translate to bitmasks this chip understands */
2615 if (newval & WAKE_PHY)
2617 if (newval & WAKE_UCAST)
2618 data |= WakeUnicast;
2619 if (newval & WAKE_MCAST)
2620 data |= WakeMulticast;
2621 if (newval & WAKE_BCAST)
2622 data |= WakeBroadcast;
2623 if (newval & WAKE_ARP)
2625 if (newval & WAKE_MAGIC)
2627 if (np->srr >= SRR_DP83815_D) {
2628 if (newval & WAKE_MAGICSECURE) {
2629 data |= WakeMagicSecure;
2633 writel(data, ioaddr + WOLCmd);
2638 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2640 struct netdev_private *np = netdev_priv(dev);
2641 void __iomem * ioaddr = ns_ioaddr(dev);
2642 u32 regval = readl(ioaddr + WOLCmd);
2644 *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2645 | WAKE_ARP | WAKE_MAGIC);
2647 if (np->srr >= SRR_DP83815_D) {
2648 /* SOPASS works on revD and higher */
2649 *supported |= WAKE_MAGICSECURE;
2653 /* translate from chip bitmasks */
2654 if (regval & WakePhy)
2656 if (regval & WakeUnicast)
2658 if (regval & WakeMulticast)
2660 if (regval & WakeBroadcast)
2662 if (regval & WakeArp)
2664 if (regval & WakeMagic)
2666 if (regval & WakeMagicSecure) {
2667 /* this can be on in revC, but it's broken */
2668 *cur |= WAKE_MAGICSECURE;
2674 static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2676 struct netdev_private *np = netdev_priv(dev);
2677 void __iomem * ioaddr = ns_ioaddr(dev);
2678 u16 *sval = (u16 *)newval;
2681 if (np->srr < SRR_DP83815_D) {
2685 /* enable writing to these registers by disabling the RX filter */
2686 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2687 addr &= ~RxFilterEnable;
2688 writel(addr, ioaddr + RxFilterAddr);
2690 /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
2691 writel(addr | 0xa, ioaddr + RxFilterAddr);
2692 writew(sval[0], ioaddr + RxFilterData);
2694 writel(addr | 0xc, ioaddr + RxFilterAddr);
2695 writew(sval[1], ioaddr + RxFilterData);
2697 writel(addr | 0xe, ioaddr + RxFilterAddr);
2698 writew(sval[2], ioaddr + RxFilterData);
2700 /* re-enable the RX filter */
2701 writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2706 static int netdev_get_sopass(struct net_device *dev, u8 *data)
2708 struct netdev_private *np = netdev_priv(dev);
2709 void __iomem * ioaddr = ns_ioaddr(dev);
2710 u16 *sval = (u16 *)data;
2713 if (np->srr < SRR_DP83815_D) {
2714 sval[0] = sval[1] = sval[2] = 0;
2718 /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
2719 addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2721 writel(addr | 0xa, ioaddr + RxFilterAddr);
2722 sval[0] = readw(ioaddr + RxFilterData);
2724 writel(addr | 0xc, ioaddr + RxFilterAddr);
2725 sval[1] = readw(ioaddr + RxFilterData);
2727 writel(addr | 0xe, ioaddr + RxFilterAddr);
2728 sval[2] = readw(ioaddr + RxFilterData);
2730 writel(addr, ioaddr + RxFilterAddr);
2735 static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2737 struct netdev_private *np = netdev_priv(dev);
2740 ecmd->port = dev->if_port;
2741 ecmd->speed = np->speed;
2742 ecmd->duplex = np->duplex;
2743 ecmd->autoneg = np->autoneg;
2744 ecmd->advertising = 0;
2745 if (np->advertising & ADVERTISE_10HALF)
2746 ecmd->advertising |= ADVERTISED_10baseT_Half;
2747 if (np->advertising & ADVERTISE_10FULL)
2748 ecmd->advertising |= ADVERTISED_10baseT_Full;
2749 if (np->advertising & ADVERTISE_100HALF)
2750 ecmd->advertising |= ADVERTISED_100baseT_Half;
2751 if (np->advertising & ADVERTISE_100FULL)
2752 ecmd->advertising |= ADVERTISED_100baseT_Full;
2753 ecmd->supported = (SUPPORTED_Autoneg |
2754 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2755 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2756 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2757 ecmd->phy_address = np->phy_addr_external;
2759 * We intentionally report the phy address of the external
2760 * phy, even if the internal phy is used. This is necessary
2761 * to work around a deficiency of the ethtool interface:
2762 * It's only possible to query the settings of the active
2764 * # ethtool -s ethX port mii
2765 * actually sends an ioctl to switch to port mii with the
2766 * settings that are used for the current active port.
2767 * If we would report a different phy address in this
2769 * # ethtool -s ethX port tp;ethtool -s ethX port mii
2770 * would unintentionally change the phy address.
2772 * Fortunately the phy address doesn't matter with the
2776 /* set information based on active port type */
2777 switch (ecmd->port) {
2780 ecmd->advertising |= ADVERTISED_TP;
2781 ecmd->transceiver = XCVR_INTERNAL;
2784 ecmd->advertising |= ADVERTISED_MII;
2785 ecmd->transceiver = XCVR_EXTERNAL;
2788 ecmd->advertising |= ADVERTISED_FIBRE;
2789 ecmd->transceiver = XCVR_EXTERNAL;
2793 /* if autonegotiation is on, try to return the active speed/duplex */
2794 if (ecmd->autoneg == AUTONEG_ENABLE) {
2795 ecmd->advertising |= ADVERTISED_Autoneg;
2796 tmp = mii_nway_result(
2797 np->advertising & mdio_read(dev, MII_LPA));
2798 if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2799 ecmd->speed = SPEED_100;
2801 ecmd->speed = SPEED_10;
2802 if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2803 ecmd->duplex = DUPLEX_FULL;
2805 ecmd->duplex = DUPLEX_HALF;
2808 /* ignore maxtxpkt, maxrxpkt for now */
2813 static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2815 struct netdev_private *np = netdev_priv(dev);
2817 if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
2819 if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
2821 if (ecmd->autoneg == AUTONEG_ENABLE) {
2822 if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
2823 ADVERTISED_10baseT_Full |
2824 ADVERTISED_100baseT_Half |
2825 ADVERTISED_100baseT_Full)) == 0) {
2828 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
2829 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
2831 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
2838 * If we're ignoring the PHY then autoneg and the internal
2839 * transciever are really not going to work so don't let the
2842 if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE ||
2843 ecmd->port == PORT_TP))
2847 * maxtxpkt, maxrxpkt: ignored for now.
2850 * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
2851 * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
2852 * selects based on ecmd->port.
2854 * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
2855 * phys that are connected to the mii bus. It's used to apply fibre
2859 /* WHEW! now lets bang some bits */
2861 /* save the parms */
2862 dev->if_port = ecmd->port;
2863 np->autoneg = ecmd->autoneg;
2864 np->phy_addr_external = ecmd->phy_address & PhyAddrMask;
2865 if (np->autoneg == AUTONEG_ENABLE) {
2866 /* advertise only what has been requested */
2867 np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2868 if (ecmd->advertising & ADVERTISED_10baseT_Half)
2869 np->advertising |= ADVERTISE_10HALF;
2870 if (ecmd->advertising & ADVERTISED_10baseT_Full)
2871 np->advertising |= ADVERTISE_10FULL;
2872 if (ecmd->advertising & ADVERTISED_100baseT_Half)
2873 np->advertising |= ADVERTISE_100HALF;
2874 if (ecmd->advertising & ADVERTISED_100baseT_Full)
2875 np->advertising |= ADVERTISE_100FULL;
2877 np->speed = ecmd->speed;
2878 np->duplex = ecmd->duplex;
2879 /* user overriding the initial full duplex parm? */
2880 if (np->duplex == DUPLEX_HALF)
2881 np->full_duplex = 0;
2884 /* get the right phy enabled */
2885 if (ecmd->port == PORT_TP)
2886 switch_port_internal(dev);
2888 switch_port_external(dev);
2890 /* set parms and see how this affected our link status */
2891 init_phy_fixup(dev);
2896 static int netdev_get_regs(struct net_device *dev, u8 *buf)
2901 u32 *rbuf = (u32 *)buf;
2902 void __iomem * ioaddr = ns_ioaddr(dev);
2904 /* read non-mii page 0 of registers */
2905 for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
2906 rbuf[i] = readl(ioaddr + i*4);
2909 /* read current mii registers */
2910 for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
2911 rbuf[i] = mdio_read(dev, i & 0x1f);
2913 /* read only the 'magic' registers from page 1 */
2914 writew(1, ioaddr + PGSEL);
2915 rbuf[i++] = readw(ioaddr + PMDCSR);
2916 rbuf[i++] = readw(ioaddr + TSTDAT);
2917 rbuf[i++] = readw(ioaddr + DSPCFG);
2918 rbuf[i++] = readw(ioaddr + SDCFG);
2919 writew(0, ioaddr + PGSEL);
2921 /* read RFCR indexed registers */
2922 rfcr = readl(ioaddr + RxFilterAddr);
2923 for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
2924 writel(j*2, ioaddr + RxFilterAddr);
2925 rbuf[i++] = readw(ioaddr + RxFilterData);
2927 writel(rfcr, ioaddr + RxFilterAddr);
2929 /* the interrupt status is clear-on-read - see if we missed any */
2930 if (rbuf[4] & rbuf[5]) {
2932 "%s: shoot, we dropped an interrupt (%#08x)\n",
2933 dev->name, rbuf[4] & rbuf[5]);
2939 #define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
2940 | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
2941 | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
2942 | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
2943 | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
2944 | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
2945 | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
2946 | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
2948 static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
2951 u16 *ebuf = (u16 *)buf;
2952 void __iomem * ioaddr = ns_ioaddr(dev);
2953 struct netdev_private *np = netdev_priv(dev);
2955 /* eeprom_read reads 16 bits, and indexes by 16 bits */
2956 for (i = 0; i < np->eeprom_size/2; i++) {
2957 ebuf[i] = eeprom_read(ioaddr, i);
2958 /* The EEPROM itself stores data bit-swapped, but eeprom_read
2959 * reads it back "sanely". So we swap it back here in order to
2960 * present it to userland as it is stored. */
2961 ebuf[i] = SWAP_BITS(ebuf[i]);
2966 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2968 struct mii_ioctl_data *data = if_mii(rq);
2969 struct netdev_private *np = netdev_priv(dev);
2972 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2973 case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
2974 data->phy_id = np->phy_addr_external;
2977 case SIOCGMIIREG: /* Read MII PHY register. */
2978 case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
2979 /* The phy_id is not enough to uniquely identify
2980 * the intended target. Therefore the command is sent to
2981 * the given mii on the current port.
2983 if (dev->if_port == PORT_TP) {
2984 if ((data->phy_id & 0x1f) == np->phy_addr_external)
2985 data->val_out = mdio_read(dev,
2986 data->reg_num & 0x1f);
2990 move_int_phy(dev, data->phy_id & 0x1f);
2991 data->val_out = miiport_read(dev, data->phy_id & 0x1f,
2992 data->reg_num & 0x1f);
2996 case SIOCSMIIREG: /* Write MII PHY register. */
2997 case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
2998 if (!capable(CAP_NET_ADMIN))
3000 if (dev->if_port == PORT_TP) {
3001 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3002 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3003 np->advertising = data->val_in;
3004 mdio_write(dev, data->reg_num & 0x1f,
3008 if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3009 if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3010 np->advertising = data->val_in;
3012 move_int_phy(dev, data->phy_id & 0x1f);
3013 miiport_write(dev, data->phy_id & 0x1f,
3014 data->reg_num & 0x1f,
3023 static void enable_wol_mode(struct net_device *dev, int enable_intr)
3025 void __iomem * ioaddr = ns_ioaddr(dev);
3026 struct netdev_private *np = netdev_priv(dev);
3028 if (netif_msg_wol(np))
3029 printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3032 /* For WOL we must restart the rx process in silent mode.
3033 * Write NULL to the RxRingPtr. Only possible if
3034 * rx process is stopped
3036 writel(0, ioaddr + RxRingPtr);
3038 /* read WoL status to clear */
3039 readl(ioaddr + WOLCmd);
3041 /* PME on, clear status */
3042 writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3044 /* and restart the rx process */
3045 writel(RxOn, ioaddr + ChipCmd);
3048 /* enable the WOL interrupt.
3049 * Could be used to send a netlink message.
3051 writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3052 writel(1, ioaddr + IntrEnable);
3056 static int netdev_close(struct net_device *dev)
3058 void __iomem * ioaddr = ns_ioaddr(dev);
3059 struct netdev_private *np = netdev_priv(dev);
3061 if (netif_msg_ifdown(np))
3063 "%s: Shutting down ethercard, status was %#04x.\n",
3064 dev->name, (int)readl(ioaddr + ChipCmd));
3065 if (netif_msg_pktdata(np))
3067 "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
3068 dev->name, np->cur_tx, np->dirty_tx,
3069 np->cur_rx, np->dirty_rx);
3072 * FIXME: what if someone tries to close a device
3073 * that is suspended?
3074 * Should we reenable the nic to switch to
3075 * the final WOL settings?
3078 del_timer_sync(&np->timer);
3079 disable_irq(dev->irq);
3080 spin_lock_irq(&np->lock);
3081 natsemi_irq_disable(dev);
3083 spin_unlock_irq(&np->lock);
3084 enable_irq(dev->irq);
3086 free_irq(dev->irq, dev);
3088 /* Interrupt disabled, interrupt handler released,
3089 * queue stopped, timer deleted, rtnl_lock held
3090 * All async codepaths that access the driver are disabled.
3092 spin_lock_irq(&np->lock);
3094 readl(ioaddr + IntrMask);
3095 readw(ioaddr + MIntrStatus);
3098 writel(StatsFreeze, ioaddr + StatsCtrl);
3100 /* Stop the chip's Tx and Rx processes. */
3101 natsemi_stop_rxtx(dev);
3104 spin_unlock_irq(&np->lock);
3106 /* clear the carrier last - an interrupt could reenable it otherwise */
3107 netif_carrier_off(dev);
3108 netif_stop_queue(dev);
3115 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3117 /* restart the NIC in WOL mode.
3118 * The nic must be stopped for this.
3120 enable_wol_mode(dev, 0);
3122 /* Restore PME enable bit unmolested */
3123 writel(np->SavedClkRun, ioaddr + ClkRun);
3130 static void __devexit natsemi_remove1 (struct pci_dev *pdev)
3132 struct net_device *dev = pci_get_drvdata(pdev);
3133 void __iomem * ioaddr = ns_ioaddr(dev);
3135 unregister_netdev (dev);
3136 pci_release_regions (pdev);
3139 pci_set_drvdata(pdev, NULL);
3145 * The ns83815 chip doesn't have explicit RxStop bits.
3146 * Kicking the Rx or Tx process for a new packet reenables the Rx process
3147 * of the nic, thus this function must be very careful:
3149 * suspend/resume synchronization:
3151 * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
3152 * start_tx, tx_timeout
3154 * No function accesses the hardware without checking np->hands_off.
3155 * the check occurs under spin_lock_irq(&np->lock);
3157 * * netdev_ioctl: noncritical access.
3158 * * netdev_open: cannot happen due to the device_detach
3159 * * netdev_close: doesn't hurt.
3160 * * netdev_timer: timer stopped by natsemi_suspend.
3161 * * intr_handler: doesn't acquire the spinlock. suspend calls
3162 * disable_irq() to enforce synchronization.
3163 * * natsemi_poll: checks before reenabling interrupts. suspend
3164 * sets hands_off, disables interrupts and then waits with
3165 * netif_poll_disable().
3167 * Interrupts must be disabled, otherwise hands_off can cause irq storms.
3170 static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3172 struct net_device *dev = pci_get_drvdata (pdev);
3173 struct netdev_private *np = netdev_priv(dev);
3174 void __iomem * ioaddr = ns_ioaddr(dev);
3177 if (netif_running (dev)) {
3178 del_timer_sync(&np->timer);
3180 disable_irq(dev->irq);
3181 spin_lock_irq(&np->lock);
3183 writel(0, ioaddr + IntrEnable);
3185 natsemi_stop_rxtx(dev);
3186 netif_stop_queue(dev);
3188 spin_unlock_irq(&np->lock);
3189 enable_irq(dev->irq);
3191 netif_poll_disable(dev);
3193 /* Update the error counts. */
3196 /* pci_power_off(pdev, -1); */
3199 u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3200 /* Restore PME enable bit */
3202 /* restart the NIC in WOL mode.
3203 * The nic must be stopped for this.
3204 * FIXME: use the WOL interrupt
3206 enable_wol_mode(dev, 0);
3208 /* Restore PME enable bit unmolested */
3209 writel(np->SavedClkRun, ioaddr + ClkRun);
3213 netif_device_detach(dev);
3219 static int natsemi_resume (struct pci_dev *pdev)
3221 struct net_device *dev = pci_get_drvdata (pdev);
3222 struct netdev_private *np = netdev_priv(dev);
3225 if (netif_device_present(dev))
3227 if (netif_running(dev)) {
3228 BUG_ON(!np->hands_off);
3229 pci_enable_device(pdev);
3230 /* pci_power_on(pdev); */
3234 disable_irq(dev->irq);
3235 spin_lock_irq(&np->lock);
3237 init_registers(dev);
3238 netif_device_attach(dev);
3239 spin_unlock_irq(&np->lock);
3240 enable_irq(dev->irq);
3242 mod_timer(&np->timer, jiffies + 1*HZ);
3244 netif_device_attach(dev);
3245 netif_poll_enable(dev);
3251 #endif /* CONFIG_PM */
3253 static struct pci_driver natsemi_driver = {
3255 .id_table = natsemi_pci_tbl,
3256 .probe = natsemi_probe1,
3257 .remove = __devexit_p(natsemi_remove1),
3259 .suspend = natsemi_suspend,
3260 .resume = natsemi_resume,
3264 static int __init natsemi_init_mod (void)
3266 /* when a module, this is printed whether or not devices are found in probe */
3271 return pci_register_driver(&natsemi_driver);
3274 static void __exit natsemi_exit_mod (void)
3276 pci_unregister_driver (&natsemi_driver);
3279 module_init(natsemi_init_mod);
3280 module_exit(natsemi_exit_mod);