2 /* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
3 * Copyright (C) 2002 Advanced Micro Devices
6 * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8 * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10 * Copyright 1993 United States Government as represented by the
11 * Director, National Security Agency.[ pcnet32.c ]
12 * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
37 AMD8111 based 10/100 Ethernet Controller Driver.
48 #include <linux/config.h>
49 #include <linux/module.h>
50 #include <linux/kernel.h>
51 #include <linux/types.h>
52 #include <linux/compiler.h>
53 #include <linux/slab.h>
54 #include <linux/delay.h>
55 #include <linux/init.h>
56 #include <linux/ioport.h>
57 #include <linux/pci.h>
58 #include <linux/netdevice.h>
59 #include <linux/etherdevice.h>
60 #include <linux/skbuff.h>
61 #include <linux/ethtool.h>
62 #include <linux/mii.h>
63 #include <linux/if_vlan.h>
64 #include <linux/ctype.h>
65 #include <linux/crc32.h>
67 #include <asm/system.h>
69 #include <asm/byteorder.h>
70 #include <asm/uaccess.h>
72 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
73 #define AMD8111E_VLAN_TAG_USED 1
75 #define AMD8111E_VLAN_TAG_USED 0
79 #define MODULE_NAME "amd8111e"
80 #define MODULE_VERSION "3.0.0"
81 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
82 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.0");
83 MODULE_LICENSE("GPL");
85 MODULE_PARM(speed_duplex, "1-" __MODULE_STRING (MAX_UNITS) "i");
86 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
88 static struct pci_device_id amd8111e_pci_tbl[] __devinitdata = {
90 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
91 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
97 This function will set PHY speed. During initialization sets the original speed to 100 full.
99 static void amd8111e_set_ext_phy(struct net_device *dev)
101 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
102 unsigned long reg_val = 0;
103 void * mmio = lp->mmio;
104 struct amd8111e_link_config *link_config = &lp->link_config;
107 /* Initializing SPEED_100 and DUPLEX_FULL as original values */
108 link_config->orig_speed = SPEED_100;
109 link_config->orig_duplex = DUPLEX_FULL;
110 link_config->orig_phy_option = XPHYSP |XPHYFD;
112 reg_val = lp->ext_phy_option;
114 /* Disable port manager */
115 writel((u32) EN_PMGR, mmio + CMD3 );
118 writel((u32)XPHYRST | lp->ext_phy_option, mmio + CTRL2);
120 /* Enable port manager */
121 writel((u32)VAL1 | EN_PMGR, mmio + CMD3 );
125 This function will unmap skb->data space and will free
126 all transmit and receive skbuffs.
128 static int amd8111e_free_skbs(struct net_device *dev)
130 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
131 struct sk_buff* rx_skbuff;
134 /* Freeing transmit skbs */
135 for(i = 0; i < NUM_TX_BUFFERS; i++){
136 if(lp->tx_skbuff[i]){
137 pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
138 dev_kfree_skb (lp->tx_skbuff[i]);
139 lp->tx_skbuff[i] = NULL;
140 lp->tx_dma_addr[i] = 0;
143 /* Freeing previously allocated receive buffers */
144 for (i = 0; i < NUM_RX_BUFFERS; i++){
145 rx_skbuff = lp->rx_skbuff[i];
146 if(rx_skbuff != NULL){
147 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
148 lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
149 dev_kfree_skb(lp->rx_skbuff[i]);
150 lp->rx_skbuff[i] = NULL;
151 lp->rx_dma_addr[i] = 0;
159 This will set the receive buffer length corresponding to the mtu size of network interface.
161 static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
163 struct amd8111e_priv* lp = dev->priv;
164 unsigned int mtu = dev->mtu;
166 if (mtu > ETH_DATA_LEN){
167 /* MTU + ethernet header + FCS + optional VLAN tag */
168 lp->rx_buff_len = mtu + ETH_HLEN + 8;
169 lp->options |= OPTION_JUMBO_ENABLE;
171 lp->rx_buff_len = PKT_BUFF_SZ;
172 lp->options &= ~OPTION_JUMBO_ENABLE;
177 This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
179 static int amd8111e_init_ring(struct net_device *dev)
181 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
184 lp->rx_idx = lp->tx_idx = 0;
185 lp->tx_complete_idx = 0;
190 /* Free previously allocated transmit and receive skbs */
191 amd8111e_free_skbs(dev);
194 /* allocate the tx and rx descriptors */
195 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
196 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
197 &lp->tx_ring_dma_addr)) == NULL)
201 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
202 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
203 &lp->rx_ring_dma_addr)) == NULL)
205 goto err_free_tx_ring;
208 /* Set new receive buff size */
209 amd8111e_set_rx_buff_len(dev);
211 /* Allocating receive skbs */
212 for (i = 0; i < NUM_RX_BUFFERS; i++) {
214 if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
215 /* Release previos allocated skbs */
216 for(--i; i >= 0 ;i--)
217 dev_kfree_skb(lp->rx_skbuff[i]);
218 goto err_free_rx_ring;
220 skb_reserve(lp->rx_skbuff[i],2);
222 /* Initilaizing receive descriptors */
223 for (i = 0; i < NUM_RX_BUFFERS; i++) {
224 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
225 lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
227 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
228 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len);
229 lp->rx_ring[i].rx_dr_offset10 = cpu_to_le16(OWN_BIT);
232 /* Initializing transmit descriptors */
233 for (i = 0; i < NUM_TX_RING_DR; i++) {
234 lp->tx_ring[i].buff_phy_addr = 0;
235 lp->tx_ring[i].tx_dr_offset2 = 0;
236 lp->tx_ring[i].buff_count = 0;
243 pci_free_consistent(lp->pci_dev,
244 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
245 lp->rx_ring_dma_addr);
249 pci_free_consistent(lp->pci_dev,
250 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
251 lp->tx_ring_dma_addr);
257 This function initializes the device registers and starts the device.
259 static int amd8111e_restart(struct net_device *dev)
261 struct amd8111e_priv *lp = (struct amd8111e_priv* )dev->priv;
262 void * mmio = lp->mmio;
266 writel(RUN, mmio + CMD0);
268 if(amd8111e_init_ring(dev))
271 amd8111e_set_ext_phy(dev);
273 /* set control registers */
274 reg_val = readl(mmio + CTRL1);
276 writel( reg_val| XMTSP_128 | CACHE_ALIGN | B1_MASK, mmio + CTRL1 );
278 /* enable interrupt */
279 writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
280 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
281 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
283 writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
285 /* initialize tx and rx ring base addresses */
286 writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
287 writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
289 writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
290 writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
292 if(lp->options & OPTION_JUMBO_ENABLE){
293 writel((u32)VAL2|JUMBO, mmio + CMD3);
295 writel( REX_UFLO, mmio + CMD2);
296 /* Should not set REX_UFLO for jumbo frames */
297 writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
299 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
301 #if AMD8111E_VLAN_TAG_USED
302 writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
304 writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
306 /* Setting the MAC address to the device */
307 for(i = 0; i < ETH_ADDR_LEN; i++)
308 writeb( dev->dev_addr[i], mmio + PADR + i );
310 /* set RUN bit to start the chip */
311 writel(VAL2 | RDMD0, mmio + CMD0);
312 writel(VAL0 | INTREN | RUN, mmio + CMD0);
317 This function clears necessary the device registers.
319 static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
321 unsigned int reg_val;
322 unsigned int logic_filter[2] ={0,};
323 void * mmio = lp->mmio;
326 /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
327 writew( 0x8101, mmio + AUTOPOLL0);
329 /* Clear RCV_RING_BASE_ADDR */
330 writel(0, mmio + RCV_RING_BASE_ADDR0);
332 /* Clear XMT_RING_BASE_ADDR */
333 writel(0, mmio + XMT_RING_BASE_ADDR0);
334 writel(0, mmio + XMT_RING_BASE_ADDR1);
335 writel(0, mmio + XMT_RING_BASE_ADDR2);
336 writel(0, mmio + XMT_RING_BASE_ADDR3);
339 writel(CMD0_CLEAR,mmio + CMD0);
342 writel(CMD2_CLEAR, mmio +CMD2);
345 writel(CMD7_CLEAR , mmio + CMD7);
347 /* Clear DLY_INT_A and DLY_INT_B */
348 writel(0x0, mmio + DLY_INT_A);
349 writel(0x0, mmio + DLY_INT_B);
351 /* Clear FLOW_CONTROL */
352 writel(0x0, mmio + FLOW_CONTROL);
354 /* Clear INT0 write 1 to clear register */
355 reg_val = readl(mmio + INT0);
356 writel(reg_val, mmio + INT0);
359 writel(0x0, mmio + STVAL);
362 writel( INTEN0_CLEAR, mmio + INTEN0);
365 writel(0x0 , mmio + LADRF);
367 /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
368 writel( 0x80010,mmio + SRAM_SIZE);
370 /* Clear RCV_RING0_LEN */
371 writel(0x0, mmio + RCV_RING_LEN0);
373 /* Clear XMT_RING0/1/2/3_LEN */
374 writel(0x0, mmio + XMT_RING_LEN0);
375 writel(0x0, mmio + XMT_RING_LEN1);
376 writel(0x0, mmio + XMT_RING_LEN2);
377 writel(0x0, mmio + XMT_RING_LEN3);
379 /* Clear XMT_RING_LIMIT */
380 writel(0x0, mmio + XMT_RING_LIMIT);
383 writew(MIB_CLEAR, mmio + MIB_ADDR);
386 AMD8111E_WRITE_REG64(mmio, LADRF,logic_filter);
388 /* SRAM_SIZE register */
389 reg_val = readl(mmio + SRAM_SIZE);
391 if(lp->options & OPTION_JUMBO_ENABLE)
392 writel( VAL2|JUMBO, mmio + CMD3);
393 #if AMD8111E_VLAN_TAG_USED
394 writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
397 reg_val = readl(mmio + CMD2);
402 This function disables the interrupt and clears all the pending
405 static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
409 /* Disable interrupt */
410 writel(INTREN, lp->mmio + CMD0);
413 intr0 = readl(lp->mmio + INT0);
414 writel(intr0, lp->mmio + INT0);
419 This function stops the chip.
421 static void amd8111e_stop_chip(struct amd8111e_priv* lp)
423 writel(RUN, lp->mmio + CMD0);
427 This function frees the transmiter and receiver descriptor rings.
429 static void amd8111e_free_ring(struct amd8111e_priv* lp)
432 /* Free transmit and receive skbs */
433 amd8111e_free_skbs(lp->amd8111e_net_dev);
435 /* Free transmit and receive descriptor rings */
437 pci_free_consistent(lp->pci_dev,
438 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
439 lp->rx_ring, lp->rx_ring_dma_addr);
444 pci_free_consistent(lp->pci_dev,
445 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
446 lp->tx_ring, lp->tx_ring_dma_addr);
452 #if AMD8111E_VLAN_TAG_USED
454 This is the receive indication function for packets with vlan tag.
456 static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
458 return vlan_hwaccel_rx(skb, lp->vlgrp, vlan_tag);
463 This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
465 static int amd8111e_tx(struct net_device *dev)
467 struct amd8111e_priv* lp = dev->priv;
468 int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
471 /* Complete all the transmit packet */
472 while (lp->tx_complete_idx != lp->tx_idx){
473 tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
474 status = le16_to_cpu(lp->tx_ring[tx_index].tx_dr_offset2);
477 break; /* It still hasn't been Txed */
479 lp->tx_ring[tx_index].buff_phy_addr = 0;
481 /* We must free the original skb */
482 if (lp->tx_skbuff[tx_index]) {
483 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
484 lp->tx_skbuff[tx_index]->len,
486 dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
487 lp->tx_skbuff[tx_index] = 0;
488 lp->tx_dma_addr[tx_index] = 0;
490 lp->tx_complete_idx++;
492 if (netif_queue_stopped(dev) &&
493 lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
494 /* The ring is no longer full, clear tbusy. */
495 netif_wake_queue (dev);
502 This function will check the ownership of receive buffers and descriptors. It will indicate to kernel up to half the number of maximum receive buffers in the descriptor ring, in a single receive interrupt. It will also replenish the descriptors with new skbs.
504 static int amd8111e_rx(struct net_device *dev)
506 struct amd8111e_priv *lp = dev->priv;
507 struct sk_buff *skb,*new_skb;
508 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
509 int min_pkt_len, status;
511 int max_rx_pkt = NUM_RX_BUFFERS/2;
513 #if AMD8111E_VLAN_TAG_USED
517 /* If we own the next entry, it's a new packet. Send it up. */
518 while(++num_rx_pkt <= max_rx_pkt){
519 if(lp->rx_ring[rx_index].rx_dr_offset10 & OWN_BIT)
522 /* check if err summary bit is set */
523 if(le16_to_cpu(lp->rx_ring[rx_index].rx_dr_offset10) & ERR_BIT){
525 * There is a tricky error noted by John Murphy,
526 * <murf@perftech.com> to Russ Nelson: Even with full-sized
527 * buffers it's possible for a jabber packet to use two
528 * buffers, with only the last correctly noting the error. */
530 lp->rx_ring[rx_index].rx_dr_offset10 &=
531 cpu_to_le16(RESET_RX_FLAGS);
534 /* check for STP and ENP */
535 status = le16_to_cpu(lp->rx_ring[rx_index].rx_dr_offset10);
536 if(!((status & STP_BIT) && (status & ENP_BIT))){
538 lp->rx_ring[rx_index].rx_dr_offset10 &=
539 cpu_to_le16(RESET_RX_FLAGS);
542 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
544 #if AMD8111E_VLAN_TAG_USED
545 vtag = le16_to_cpu(lp->rx_ring[rx_index].rx_dr_offset10) & TT_MASK;
546 /*MAC will strip vlan tag*/
547 if(lp->vlgrp != NULL && vtag !=0)
548 min_pkt_len =MIN_PKT_LEN - 4;
551 min_pkt_len =MIN_PKT_LEN;
553 if (pkt_len < min_pkt_len) {
554 lp->rx_ring[rx_index].rx_dr_offset10 &=
555 cpu_to_le16(RESET_RX_FLAGS);
556 lp->stats.rx_errors++;
559 if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
560 /* if allocation fail,
561 ignore that pkt and go to next one */
562 lp->rx_ring[rx_index].rx_dr_offset10 &=
563 cpu_to_le16(RESET_RX_FLAGS);
564 lp->stats.rx_errors++;
568 skb_reserve(new_skb, 2);
569 skb = lp->rx_skbuff[rx_index];
570 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
571 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
572 skb_put(skb, pkt_len);
574 lp->rx_skbuff[rx_index] = new_skb;
576 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
577 new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
579 skb->protocol = eth_type_trans(skb, dev);
581 #if AMD8111E_VLAN_TAG_USED
583 vtag = lp->rx_ring[rx_index].rx_dr_offset10 & TT_MASK;
584 if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
585 amd8111e_vlan_rx(lp, skb,
586 lp->rx_ring[rx_index].tag_ctrl_info);
590 dev->last_rx = jiffies;
594 lp->rx_ring[rx_index].buff_phy_addr
595 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
596 lp->rx_ring[rx_index].buff_count =
597 cpu_to_le16(lp->rx_buff_len-2);
598 lp->rx_ring[rx_index].rx_dr_offset10 |= cpu_to_le16(OWN_BIT);
599 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
606 This function will store the original speed to restore later, if autoneg is turned on. This speed will be set later when the autoneg is turned off. If the link status indicates that link is down, that will be indicated to the kernel */
608 static int amd8111e_link_change(struct net_device* dev)
610 struct amd8111e_priv *lp = dev->priv;
613 /* read the link change */
614 status0 = readl(lp->mmio + STAT0);
616 if(status0 & LINK_STATS){
617 if(status0 & AUTONEG_COMPLETE){
618 /* keeping the original speeds */
619 if((lp->link_config.speed != SPEED_INVALID)&&
620 (lp->link_config.duplex != DUPLEX_INVALID)){
621 lp->link_config.orig_speed = lp->link_config.speed;
622 lp->link_config.orig_duplex = lp->link_config.duplex;
623 lp->link_config.orig_phy_option = lp->ext_phy_option;
626 lp->link_config.speed = SPEED_INVALID;
627 lp->link_config.duplex = DUPLEX_INVALID;
628 lp->link_config.autoneg = AUTONEG_ENABLE;
629 netif_carrier_on(dev);
632 if(status0 & FULL_DPLX)
633 lp->link_config.duplex = DUPLEX_FULL;
635 lp->link_config.duplex = DUPLEX_HALF;
636 speed = (status0 & SPEED_MASK) >> 7;
637 if(speed == PHY_SPEED_10)
638 lp->link_config.speed = SPEED_10;
639 else if(speed == PHY_SPEED_100)
640 lp->link_config.speed = SPEED_100;
641 lp->link_config.autoneg = AUTONEG_DISABLE;
642 netif_carrier_on(dev);
645 lp->link_config.speed = SPEED_INVALID;
646 lp->link_config.duplex = DUPLEX_INVALID;
647 lp->link_config.autoneg = AUTONEG_INVALID;
648 netif_carrier_off(dev);
654 This function reads the mib counters.
656 static int amd8111e_read_mib(void* mmio, u8 MIB_COUNTER)
660 unsigned int repeat = REPEAT_CNT;
662 writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
664 status = readw(mmio + MIB_ADDR);
665 udelay(2); /* controller takes MAX 2 us to get mib data */
667 while (--repeat && (status & MIB_CMD_ACTIVE));
669 data = readl(mmio + MIB_DATA);
674 This function retuurns the reads the mib registers and returns the hardware statistics. It adds the previous statistics with new values.*/
675 static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
677 struct amd8111e_priv *lp = dev->priv;
678 void * mmio = lp->mmio;
680 struct net_device_stats *prev_stats = &lp->prev_stats;
681 struct net_device_stats* new_stats = &lp->stats;
685 spin_lock_irqsave (&lp->lock, flags);
687 /* stats.rx_packets */
688 new_stats->rx_packets = prev_stats->rx_packets+
689 amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
690 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
691 amd8111e_read_mib(mmio, rcv_unicast_pkts);
693 /* stats.tx_packets */
694 new_stats->tx_packets = prev_stats->tx_packets+
695 amd8111e_read_mib(mmio, xmt_packets);
698 new_stats->rx_bytes = prev_stats->rx_bytes+
699 amd8111e_read_mib(mmio, rcv_octets);
702 new_stats->tx_bytes = prev_stats->tx_bytes+
703 amd8111e_read_mib(mmio, xmt_octets);
705 /* stats.rx_errors */
706 new_stats->rx_errors = prev_stats->rx_errors+
707 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
708 amd8111e_read_mib(mmio, rcv_fragments)+
709 amd8111e_read_mib(mmio, rcv_jabbers)+
710 amd8111e_read_mib(mmio, rcv_alignment_errors)+
711 amd8111e_read_mib(mmio, rcv_fcs_errors)+
712 amd8111e_read_mib(mmio, rcv_miss_pkts);
714 /* stats.tx_errors */
715 new_stats->tx_errors = prev_stats->tx_errors+
716 amd8111e_read_mib(mmio, xmt_underrun_pkts);
718 /* stats.rx_dropped*/
719 new_stats->rx_dropped = prev_stats->rx_dropped+
720 amd8111e_read_mib(mmio, rcv_miss_pkts);
722 /* stats.tx_dropped*/
723 new_stats->tx_dropped = prev_stats->tx_dropped+
724 amd8111e_read_mib(mmio, xmt_underrun_pkts);
727 new_stats->multicast = prev_stats->multicast+
728 amd8111e_read_mib(mmio, rcv_multicast_pkts);
730 /* stats.collisions*/
731 new_stats->collisions = prev_stats->collisions+
732 amd8111e_read_mib(mmio, xmt_collisions);
734 /* stats.rx_length_errors*/
735 new_stats->rx_length_errors = prev_stats->rx_length_errors+
736 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
737 amd8111e_read_mib(mmio, rcv_oversize_pkts);
739 /* stats.rx_over_errors*/
740 new_stats->rx_over_errors = prev_stats->rx_over_errors+
741 amd8111e_read_mib(mmio, rcv_miss_pkts);
743 /* stats.rx_crc_errors*/
744 new_stats->rx_crc_errors = prev_stats->rx_crc_errors+
745 amd8111e_read_mib(mmio, rcv_fcs_errors);
747 /* stats.rx_frame_errors*/
748 new_stats->rx_frame_errors = prev_stats->rx_frame_errors+
749 amd8111e_read_mib(mmio, rcv_alignment_errors);
751 /* stats.rx_fifo_errors */
752 new_stats->rx_fifo_errors = prev_stats->rx_fifo_errors+
753 amd8111e_read_mib(mmio, rcv_miss_pkts);
755 /* stats.rx_missed_errors */
756 new_stats->rx_missed_errors = prev_stats->rx_missed_errors+
757 amd8111e_read_mib(mmio, rcv_miss_pkts);
759 /* stats.tx_aborted_errors*/
760 new_stats->tx_aborted_errors = prev_stats->tx_aborted_errors+
761 amd8111e_read_mib(mmio, xmt_excessive_collision);
763 /* stats.tx_carrier_errors*/
764 new_stats->tx_carrier_errors = prev_stats->tx_carrier_errors+
765 amd8111e_read_mib(mmio, xmt_loss_carrier);
767 /* stats.tx_fifo_errors*/
768 new_stats->tx_fifo_errors = prev_stats->tx_fifo_errors+
769 amd8111e_read_mib(mmio, xmt_underrun_pkts);
771 /* stats.tx_window_errors*/
772 new_stats->tx_window_errors = prev_stats->tx_window_errors+
773 amd8111e_read_mib(mmio, xmt_late_collision);
775 spin_unlock_irqrestore (&lp->lock, flags);
781 This is device interrupt function. It handles transmit, receive and link change interrupts.
783 static void amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
786 struct net_device * dev = (struct net_device *) dev_id;
787 struct amd8111e_priv *lp = dev->priv;
788 void * mmio = lp->mmio;
794 spin_lock (&lp->lock);
795 /* disabling interrupt */
796 writel(INTREN, mmio + CMD0);
798 /* Read interrupt status */
799 intr0 = readl(mmio + INT0);
801 /* Process all the INT event until INTR bit is clear. */
804 goto err_no_interrupt;
806 /* Current driver processes 3 interrupts : RINT,TINT,LCINT */
807 writel(intr0, mmio + INT0);
809 /* Check if Receive Interrupt has occurred. */
812 writel(VAL2 | RDMD0, mmio + CMD0);
815 /* Check if Transmit Interrupt has occurred. */
819 /* Check if Link Change Interrupt has occurred. */
821 amd8111e_link_change(dev);
824 writel( VAL0 | INTREN,mmio + CMD0);
825 spin_unlock(&lp->lock);
830 This function closes the network interface and copies the new set of statistics into the previous statistics structure so that most recent statistics will be available after the interface is down.
832 static int amd8111e_close(struct net_device * dev)
834 struct amd8111e_priv *lp = dev->priv;
835 netif_stop_queue(dev);
837 spin_lock_irq(&lp->lock);
839 amd8111e_disable_interrupt(lp);
840 amd8111e_stop_chip(lp);
841 amd8111e_free_ring(lp);
843 netif_carrier_off(lp->amd8111e_net_dev);
845 spin_unlock_irq(&lp->lock);
847 free_irq(dev->irq, dev);
848 memcpy(&lp->prev_stats,amd8111e_get_stats(dev), sizeof(lp->prev_stats));
852 /* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
854 static int amd8111e_open(struct net_device * dev )
856 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
858 if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, SA_SHIRQ,
862 spin_lock_irq(&lp->lock);
864 amd8111e_init_hw_default(lp);
866 if(amd8111e_restart(dev)){
867 spin_unlock_irq(&lp->lock);
873 spin_unlock_irq(&lp->lock);
875 netif_start_queue(dev);
880 This function checks if there is any transmit descriptors available to queue more packet.
882 static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
884 int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
885 if(lp->tx_skbuff[tx_index] != 0)
892 This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
895 static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
897 struct amd8111e_priv *lp = dev->priv;
901 spin_lock_irqsave(&lp->lock, flags);
903 tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
905 lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
907 lp->tx_skbuff[tx_index] = skb;
908 lp->tx_ring[tx_index].tx_dr_offset2 = 0;
910 #if AMD8111E_VLAN_TAG_USED
911 if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
913 lp->tx_ring[tx_index].tag_ctrl_cmd |=
914 cpu_to_le32(TCC_VLAN_INSERT);
915 lp->tx_ring[tx_index].tag_ctrl_info =
916 cpu_to_le16(vlan_tx_tag_get(skb));
920 lp->tx_dma_addr[tx_index] =
921 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
922 lp->tx_ring[tx_index].buff_phy_addr =
923 (u32) cpu_to_le32(lp->tx_dma_addr[tx_index]);
925 /* Set FCS and LTINT bits */
926 lp->tx_ring[tx_index].tx_dr_offset2 |=
927 cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
931 /* Trigger an immediate send poll. */
932 writel( VAL1 | TDMD0, lp->mmio + CMD0);
933 writel( VAL2 | RDMD0,lp->mmio + CMD0);
935 dev->trans_start = jiffies;
937 if(amd8111e_tx_queue_avail(lp) < 0){
938 netif_stop_queue(dev);
940 spin_unlock_irqrestore(&lp->lock, flags);
944 This function returns all the memory mapped registers of the device.
946 static char* amd8111e_read_regs(struct amd8111e_priv* lp)
948 void * mmio = lp->mmio;
949 unsigned char * reg_buff;
953 reg_buff = kmalloc( AMD8111E_REG_DUMP_LEN,GFP_KERNEL);
956 for( i=0; i< AMD8111E_REG_DUMP_LEN;i+=4);
957 reg_buff[i]= readl(mmio + i);
961 This function sets promiscuos mode, all-multi mode or the multicast address
964 static void amd8111e_set_multicast_list(struct net_device *dev)
966 struct dev_mc_list* mc_ptr;
967 struct amd8111e_priv *lp = dev->priv;
971 if(dev->flags & IFF_PROMISC){
972 printk("%s: Setting promiscuous mode.\n",dev->name);
973 writel( VAL2 | PROM, lp->mmio + CMD2);
977 writel( PROM, lp->mmio + CMD2);
978 if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
979 /* get all multicast packet */
980 mc_filter[1] = mc_filter[0] = 0xffffffff;
981 lp->mc_list = dev->mc_list;
982 lp->options |= OPTION_MULTICAST_ENABLE;
983 AMD8111E_WRITE_REG64(lp->mmio, LADRF,mc_filter);
986 if( dev->mc_count == 0 ){
987 /* get only own packets */
988 mc_filter[1] = mc_filter[0] = 0;
990 lp->options &= ~OPTION_MULTICAST_ENABLE;
991 AMD8111E_WRITE_REG64(lp->mmio, LADRF,mc_filter);
992 /* disable promiscous mode */
993 writel(PROM, lp->mmio + CMD2);
996 /* load all the multicast addresses in the logic filter */
997 lp->options |= OPTION_MULTICAST_ENABLE;
998 lp->mc_list = dev->mc_list;
999 mc_filter[1] = mc_filter[0] = 0;
1000 for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
1001 i++, mc_ptr = mc_ptr->next) {
1002 bit_num = ether_crc(ETH_ALEN, mc_ptr->dmi_addr) >> 26;
1004 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1007 AMD8111E_WRITE_REG64(lp->mmio, LADRF, mc_filter);
1011 This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1014 static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr)
1016 struct amd8111e_priv *lp = dev->priv;
1017 struct pci_dev *pci_dev = lp->pci_dev;
1020 if( useraddr == NULL)
1022 if(copy_from_user (ðcmd, useraddr, sizeof (ethcmd)))
1027 case ETHTOOL_GDRVINFO:{
1028 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1029 strcpy (info.driver, MODULE_NAME);
1030 strcpy (info.version, MODULE_VERSION);
1031 memset(&info.fw_version, 0, sizeof(info.fw_version));
1032 strcpy (info.bus_info, pci_dev->slot_name);
1033 info.eedump_len = 0;
1034 info.regdump_len = AMD8111E_REG_DUMP_LEN;
1035 if (copy_to_user (useraddr, &info, sizeof(info)))
1040 struct ethtool_cmd cmd = { ETHTOOL_GSET };
1045 cmd.supported = SUPPORTED_Autoneg |
1046 SUPPORTED_100baseT_Half |
1047 SUPPORTED_100baseT_Full |
1048 SUPPORTED_10baseT_Half |
1049 SUPPORTED_10baseT_Full |
1052 cmd.advertising = ADVERTISED_Autoneg |
1053 ADVERTISED_100baseT_Half |
1054 ADVERTISED_100baseT_Full |
1055 ADVERTISED_10baseT_Half |
1056 ADVERTISED_10baseT_Full |
1058 cmd.speed = lp->link_config.speed;
1059 cmd.duplex = lp->link_config.duplex;
1061 cmd.phy_address = PHY_ID;
1062 cmd.transceiver = XCVR_EXTERNAL;
1063 cmd.autoneg = lp->link_config.autoneg;
1064 cmd.maxtxpkt = 0; /* not implemented interrupt coalasing */
1065 cmd.maxrxpkt = 0; /* not implemented interrupt coalasing */
1066 if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
1070 case ETHTOOL_SSET: {
1072 struct ethtool_cmd cmd;
1076 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1079 spin_lock_irq(&lp->lock);
1081 if(cmd.autoneg == AUTONEG_ENABLE){
1082 /* keeping the original speeds */
1083 if((lp->link_config.speed != SPEED_INVALID)&&
1084 (lp->link_config.duplex != DUPLEX_INVALID)){
1085 lp->link_config.orig_speed = lp->link_config.speed;
1086 lp->link_config.orig_duplex = lp->link_config.duplex;
1087 lp->link_config.orig_phy_option = lp->ext_phy_option;
1090 lp->ext_phy_option = XPHYANE;
1092 else if(cmd.speed == SPEED_100 && cmd.duplex == DUPLEX_HALF)
1093 lp->ext_phy_option = XPHYSP;
1094 else if(cmd.speed == SPEED_100 && cmd.duplex == DUPLEX_FULL)
1095 lp->ext_phy_option = XPHYSP |XPHYFD;
1096 else if(cmd.speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1097 lp->ext_phy_option = 0;
1098 else if(cmd.speed == SPEED_10 && cmd.duplex == DUPLEX_FULL)
1099 lp->ext_phy_option = XPHYFD;
1101 /* setting the original speed */
1102 cmd.speed = lp->link_config.orig_speed;
1103 cmd.duplex = lp->link_config.orig_duplex;
1104 lp->ext_phy_option = lp->link_config.orig_phy_option;
1106 lp->link_config.autoneg = cmd.autoneg;
1107 if (cmd.autoneg == AUTONEG_ENABLE) {
1109 lp->link_config.speed = SPEED_INVALID;
1110 lp->link_config.duplex = DUPLEX_INVALID;
1112 lp->link_config.speed = cmd.speed;
1113 lp->link_config.duplex = cmd.duplex;
1115 amd8111e_set_ext_phy(dev);
1116 spin_unlock_irq(&lp->lock);
1119 case ETHTOOL_GREGS: {
1120 struct ethtool_regs regs;
1124 if (copy_from_user(®s, useraddr, sizeof(regs)))
1126 if (regs.len > AMD8111E_REG_DUMP_LEN)
1127 regs.len = AMD8111E_REG_DUMP_LEN;
1129 if (copy_to_user(useraddr, ®s, sizeof(regs)))
1132 regbuf = amd8111e_read_regs(lp);
1136 useraddr += offsetof(struct ethtool_regs, data);
1138 if (copy_to_user(useraddr, regbuf, regs.len))
1143 case ETHTOOL_NWAY_RST: {
1145 spin_lock_irq(&lp->lock);
1146 if(lp->link_config.autoneg == AUTONEG_ENABLE){
1147 lp->ext_phy_option = XPHYANE;
1148 amd8111e_set_ext_phy(dev);
1152 spin_unlock_irq(&lp->lock);
1155 case ETHTOOL_GLINK: {
1156 struct ethtool_value val = { ETHTOOL_GLINK };
1158 val.data = netif_carrier_ok(dev) ? 1 : 0;
1159 if (copy_to_user(useraddr, &val, sizeof(val)))
1162 case ETHTOOL_GWOL: {
1163 struct ethtool_wolinfo wol_info = { ETHTOOL_GWOL };
1165 wol_info.supported = WAKE_MAGIC|WAKE_PHY;
1166 wol_info.wolopts = 0;
1167 if (lp->options & OPTION_WOL_ENABLE)
1168 wol_info.wolopts = WAKE_MAGIC;
1169 memset(&wol_info.sopass, 0, sizeof(wol_info.sopass));
1170 if (copy_to_user(useraddr, &wol_info, sizeof(wol_info)))
1174 case ETHTOOL_SWOL: {
1175 struct ethtool_wolinfo wol_info;
1177 if (copy_from_user(&wol_info, useraddr, sizeof(wol_info)))
1179 if (wol_info.wolopts & ~(WAKE_MAGIC |WAKE_PHY))
1181 spin_lock_irq(&lp->lock);
1182 if(wol_info.wolopts & WAKE_MAGIC)
1184 (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1185 else if(wol_info.wolopts & WAKE_PHY)
1187 (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1189 lp->options &= ~OPTION_WOL_ENABLE;
1190 spin_unlock_irq(&lp->lock);
1199 static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
1201 void * mmio = lp->mmio;
1202 unsigned int reg_val;
1203 unsigned int repeat= REPEAT_CNT;
1205 reg_val = readl(mmio + PHY_ACCESS);
1206 while (reg_val & PHY_CMD_ACTIVE)
1207 reg_val = readl( mmio + PHY_ACCESS );
1209 writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
1210 ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
1212 reg_val = readl(mmio + PHY_ACCESS);
1213 udelay(30); /* It takes 30 us to read/write data */
1214 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
1215 if(reg_val & PHY_RD_ERR)
1218 *val = reg_val & 0xffff;
1225 static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
1227 unsigned int repeat = REPEAT_CNT
1228 void * mmio = lp->mmio;
1229 unsigned int reg_val;
1232 reg_val = readl(mmio + PHY_ACCESS);
1233 while (reg_val & PHY_CMD_ACTIVE)
1234 reg_val = readl( mmio + PHY_ACCESS );
1236 writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
1237 ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
1240 reg_val = readl(mmio + PHY_ACCESS);
1241 udelay(30); /* It takes 30 us to read/write the data */
1242 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
1244 if(reg_val & PHY_RD_ERR)
1253 static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1255 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
1256 struct amd8111e_priv *lp = dev->priv;
1260 if (!capable(CAP_NET_ADMIN))
1265 return amd8111e_ethtool_ioctl(dev, (void *) ifr->ifr_data);
1267 data->phy_id = PHY_ID;
1272 spin_lock_irq(&lp->lock);
1273 err = amd8111e_read_phy(lp, data->phy_id,
1274 data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1275 spin_unlock_irq(&lp->lock);
1277 data->val_out = mii_regval;
1282 spin_lock_irq(&lp->lock);
1283 err = amd8111e_write_phy(lp, data->phy_id,
1284 data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1285 spin_unlock_irq(&lp->lock);
1296 This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers.
1298 int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1300 struct amd8111e_priv *lp = dev->priv;
1303 if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1306 if (!netif_running(dev)) {
1307 /* new_mtu will be used
1308 when device starts netxt time */
1313 spin_lock_irq(&lp->lock);
1316 writel(RUN, lp->mmio + CMD0);
1320 /* if (new_mtu > ETH_DATA_LEN)
1321 lp->options |= OPTION_JUMBO_ENABLE;
1323 lp->options &= ~OPTION_JUMBO_ENABLE;
1325 err = amd8111e_restart(dev);
1326 spin_unlock_irq(&lp->lock);
1328 netif_start_queue(dev);
1332 #if AMD8111E_VLAN_TAG_USED
1333 static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1335 struct amd8111e_priv *lp = dev->priv;
1336 spin_lock_irq(&lp->lock);
1338 spin_unlock_irq(&lp->lock);
1341 static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1343 struct amd8111e_priv *lp = dev->priv;
1344 spin_lock_irq(&lp->lock);
1346 lp->vlgrp->vlan_devices[vid] = NULL;
1347 spin_unlock_irq(&lp->lock);
1350 static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1352 writel( VAL1|MPPLBA, lp->mmio + CMD3);
1353 writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1357 static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1359 /* Adapter is already stoped/suspended/interrupt-disabled */
1360 writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1365 This function sets the power state of the device. When the device go to lower power states 1,2, and 3 it enables the wake on lan
1367 static int amd8111e_set_power_state(struct amd8111e_priv* lp, u32 state)
1370 int pm = lp->pm_cap;
1372 pci_read_config_word(lp->pci_dev,
1376 power_control |= PCI_PM_CTRL_PME_STATUS;
1377 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1381 pci_write_config_word(lp->pci_dev,
1399 printk(KERN_WARNING "%s: Invalid power state (%d) requested.\n",
1400 lp->amd8111e_net_dev->name, state);
1404 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1405 amd8111e_enable_magicpkt(lp);
1406 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1407 amd8111e_enable_link_change(lp);
1409 /* Setting new power state. */
1410 pci_write_config_word(lp->pci_dev, pm + PCI_PM_CTRL, power_control);
1416 static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state)
1418 struct net_device *dev = pci_get_drvdata(pci_dev);
1419 struct amd8111e_priv *lp = dev->priv;
1422 if (!netif_running(dev))
1425 /* disable the interrupt */
1426 spin_lock_irq(&lp->lock);
1427 amd8111e_disable_interrupt(lp);
1428 spin_unlock_irq(&lp->lock);
1430 netif_device_detach(dev);
1433 spin_lock_irq(&lp->lock);
1434 amd8111e_stop_chip(lp);
1435 spin_unlock_irq(&lp->lock);
1437 err = amd8111e_set_power_state(lp, state);
1440 spin_lock_irq(&lp->lock);
1441 amd8111e_restart(dev);
1442 spin_unlock_irq(&lp->lock);
1444 netif_device_attach(dev);
1448 static int amd8111e_resume(struct pci_dev *pci_dev)
1450 struct net_device *dev = pci_get_drvdata(pci_dev);
1451 struct amd8111e_priv *lp = dev->priv;
1454 if (!netif_running(dev))
1457 err = amd8111e_set_power_state(lp, 0);
1461 netif_device_attach(dev);
1463 spin_lock_irq(&lp->lock);
1464 amd8111e_restart(dev);
1465 spin_unlock_irq(&lp->lock);
1471 static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1473 struct net_device *dev = pci_get_drvdata(pdev);
1475 unregister_netdev(dev);
1476 iounmap((void *) ((struct amd8111e_priv *)(dev->priv))->mmio);
1478 pci_release_regions(pdev);
1479 pci_disable_device(pdev);
1480 pci_set_drvdata(pdev, NULL);
1484 static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1485 const struct pci_device_id *ent)
1488 unsigned long reg_addr,reg_len;
1489 struct amd8111e_priv* lp;
1490 struct net_device* dev;
1491 unsigned int chip_version;
1493 err = pci_enable_device(pdev);
1495 printk(KERN_ERR "amd8111e: Cannot enable new PCI device,"
1500 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1501 printk(KERN_ERR "amd8111e: Cannot find PCI base address"
1504 goto err_disable_pdev;
1507 err = pci_request_regions(pdev, MODULE_NAME);
1509 printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1511 goto err_disable_pdev;
1514 pci_set_master(pdev);
1516 /* Find power-management capability. */
1517 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1518 printk(KERN_ERR "amd8111e: No Power Management capability, "
1523 /* Initialize DMA */
1524 if(!pci_dma_supported(pdev, 0xffffffff)){
1525 printk(KERN_ERR "amd8111e: DMA not supported,"
1529 pdev->dma_mask = 0xffffffff;
1531 reg_addr = pci_resource_start(pdev, 0);
1532 reg_len = pci_resource_len(pdev, 0);
1534 dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1536 printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
1541 SET_MODULE_OWNER(dev);
1543 #if AMD8111E_VLAN_TAG_USED
1544 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
1545 dev->vlan_rx_register =amd8111e_vlan_rx_register;
1546 dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
1550 memset (lp, 0, sizeof (*lp));
1552 lp->amd8111e_net_dev = dev;
1553 lp->pm_cap = pm_cap;
1555 spin_lock_init(&lp->lock);
1557 lp->mmio = ioremap(reg_addr, reg_len);
1558 if (lp->mmio == 0) {
1559 printk(KERN_ERR "amd8111e: Cannot map device registers, "
1565 /* Initializing MAC address */
1566 for(i = 0; i < ETH_ADDR_LEN; i++)
1567 dev->dev_addr[i] =readb(lp->mmio + PADR + i);
1568 /* Setting user defined speed */
1569 if (speed_duplex[card_idx] > sizeof(speed_duplex_mapping))
1570 lp->ext_phy_option = XPHYANE;
1572 lp->ext_phy_option =
1573 speed_duplex_mapping[speed_duplex[card_idx]];
1574 /* Initialize driver entry points */
1575 dev->open = amd8111e_open;
1576 dev->hard_start_xmit = amd8111e_start_xmit;
1577 dev->stop = amd8111e_close;
1578 dev->get_stats = amd8111e_get_stats;
1579 dev->set_multicast_list = amd8111e_set_multicast_list;
1580 dev->do_ioctl = amd8111e_ioctl;
1581 dev->change_mtu = amd8111e_change_mtu;
1582 dev->irq =pdev->irq;
1584 #if AMD8111E_VLAN_TAG_USED
1585 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1586 dev->vlan_rx_register =amd8111e_vlan_rx_register;
1587 dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
1590 /* Set receive buffer length and set jumbo option*/
1591 amd8111e_set_rx_buff_len(dev);
1594 /* dev->tx_timeout = tg3_tx_timeout; */
1595 /* dev->watchdog_timeo = TG3_TX_TIMEOUT; */
1597 err = register_netdev(dev);
1599 printk(KERN_ERR "amd8111e: Cannot register net device, "
1604 pci_set_drvdata(pdev, dev);
1606 /* display driver and device information */
1608 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1609 printk("%s: AMD-8111e Driver Version: %s\n",dev->name,MODULE_VERSION);
1610 printk("%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version);
1611 for (i = 0; i < 6; i++)
1612 printk("%2.2x%c", dev->dev_addr[i],i == 5 ? ' ' : ':');
1616 iounmap((void *) lp->mmio);
1622 pci_release_regions(pdev);
1625 pci_disable_device(pdev);
1626 pci_set_drvdata(pdev, NULL);
1631 static struct pci_driver amd8111e_driver = {
1633 id_table: amd8111e_pci_tbl,
1634 probe: amd8111e_probe_one,
1635 remove: __devexit_p(amd8111e_remove_one),
1636 suspend: amd8111e_suspend,
1637 resume: amd8111e_resume
1640 static int __init amd8111e_init(void)
1642 return pci_module_init(&amd8111e_driver);
1645 static void __exit amd8111e_cleanup(void)
1647 pci_unregister_driver(&amd8111e_driver);
1650 module_init(amd8111e_init);
1651 module_exit(amd8111e_cleanup);