2 * BK Id: SCCS/s.enet.c 1.10 10/08/01 16:49:24 trini
5 * Ethernet driver for Motorola MPC8260.
6 * Copyright (c) 1999 Dan Malek (dmalek@jlc.net)
7 * Copyright (c) 2000 MontaVista Software Inc. (source@mvista.com)
10 * I copied this from the 8xx CPM Ethernet driver, so follow the
11 * credits back through that.
13 * This version of the driver is somewhat selectable for the different
14 * processor/board combinations. It works for the boards I know about
15 * now, and should be easily modified to include others. Some of the
16 * configuration information is contained in <asm/commproc.h> and the
19 * Buffer descriptors are kept in the CPM dual port RAM, and the frame
20 * buffers are in the host memory.
22 * Right now, I am very watseful with the buffers. I allocate memory
23 * pages and then divide them into 2K frame buffers. This way I know I
24 * have buffers large enough to hold one frame within one buffer descriptor.
25 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
26 * will be much more memory efficient and will easily handle lots of
30 #include <linux/kernel.h>
31 #include <linux/sched.h>
32 #include <linux/string.h>
33 #include <linux/ptrace.h>
34 #include <linux/errno.h>
35 #include <linux/ioport.h>
36 #include <linux/slab.h>
37 #include <linux/interrupt.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/spinlock.h>
46 #include <asm/immap_8260.h>
47 #include <asm/pgtable.h>
48 #include <asm/mpc8260.h>
49 #include <asm/bitops.h>
50 #include <asm/uaccess.h>
51 #include <asm/cpm_8260.h>
57 * The MPC8260 CPM performs the Ethernet processing on an SCC. It can use
58 * an aribtrary number of buffers on byte boundaries, but must have at
59 * least two receive buffers to prevent constant overrun conditions.
61 * The buffer descriptors are allocated from the CPM dual port memory
62 * with the data buffers allocated from host memory, just like all other
63 * serial communication protocols. The host memory buffers are allocated
64 * from the free page pool, and then divided into smaller receive and
65 * transmit buffers. The size of the buffers should be a power of two,
66 * since that nicely divides the page. This creates a ring buffer
67 * structure similar to the LANCE and other controllers.
69 * Like the LANCE driver:
70 * The driver runs as two independent, single-threaded flows of control. One
71 * is the send-packet routine, which enforces single-threaded use by the
72 * cep->tx_busy flag. The other thread is the interrupt handler, which is
73 * single threaded by the hardware and other software.
76 /* The transmitter timeout
78 #define TX_TIMEOUT (2*HZ)
80 /* The number of Tx and Rx buffers. These are allocated from the page
81 * pool. The code may assume these are power of two, so it is best
82 * to keep them that size.
83 * We don't need to allocate pages for the transmitter. We just use
84 * the skbuffer directly.
86 #define CPM_ENET_RX_PAGES 4
87 #define CPM_ENET_RX_FRSIZE 2048
88 #define CPM_ENET_RX_FRPPG (PAGE_SIZE / CPM_ENET_RX_FRSIZE)
89 #define RX_RING_SIZE (CPM_ENET_RX_FRPPG * CPM_ENET_RX_PAGES)
90 #define TX_RING_SIZE 8 /* Must be power of two */
91 #define TX_RING_MOD_MASK 7 /* for this to work */
93 /* The CPM stores dest/src/type, data, and checksum for receive packets.
95 #define PKT_MAXBUF_SIZE 1518
96 #define PKT_MINBUF_SIZE 64
97 #define PKT_MAXBLR_SIZE 1520
99 /* The CPM buffer descriptors track the ring buffers. The rx_bd_base and
100 * tx_bd_base always point to the base of the buffer descriptors. The
101 * cur_rx and cur_tx point to the currently available buffer.
102 * The dirty_tx tracks the current buffer that is being sent by the
103 * controller. The cur_tx and dirty_tx are equal under both completely
104 * empty and completely full conditions. The empty/ready indicator in
105 * the buffer descriptor determines the actual condition.
107 struct scc_enet_private {
108 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
109 struct sk_buff* tx_skbuff[TX_RING_SIZE];
113 /* CPM dual port RAM relative addresses.
115 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
117 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
118 cbd_t *dirty_tx; /* The ring entries to be free()ed. */
120 struct net_device_stats stats;
125 static int scc_enet_open(struct net_device *dev);
126 static int scc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
127 static int scc_enet_rx(struct net_device *dev);
128 static void scc_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs);
129 static int scc_enet_close(struct net_device *dev);
130 static struct net_device_stats *scc_enet_get_stats(struct net_device *dev);
131 static void set_multicast_list(struct net_device *dev);
133 /* These will be configurable for the SCC choice.
135 #define CPM_ENET_BLOCK CPM_CR_SCC1_SBLOCK
136 #define CPM_ENET_PAGE CPM_CR_SCC1_PAGE
137 #define PROFF_ENET PROFF_SCC1
139 #define SIU_INT_ENET SIU_INT_SCC1
141 /* These are both board and SCC dependent....
143 #define PD_ENET_RXD ((uint)0x00000001)
144 #define PD_ENET_TXD ((uint)0x00000002)
145 #define PD_ENET_TENA ((uint)0x00000004)
146 #define PC_ENET_RENA ((uint)0x00020000)
147 #define PC_ENET_CLSN ((uint)0x00000004)
148 #define PC_ENET_TXCLK ((uint)0x00000800)
149 #define PC_ENET_RXCLK ((uint)0x00000400)
150 #define CMX_CLK_ROUTE ((uint)0x25000000)
151 #define CMX_CLK_MASK ((uint)0xff000000)
153 /* Specific to a board.
155 #define PC_EST8260_ENET_LOOPBACK ((uint)0x80000000)
156 #define PC_EST8260_ENET_SQE ((uint)0x40000000)
157 #define PC_EST8260_ENET_NOTFD ((uint)0x20000000)
160 scc_enet_open(struct net_device *dev)
163 /* I should reset the ring buffers here, but I don't yet know
164 * a simple way to do that.
166 netif_start_queue(dev);
167 return 0; /* Always succeed */
171 scc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
173 struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv;
177 /* Fill in a Tx ring entry */
180 #ifndef final_version
181 if (bdp->cbd_sc & BD_ENET_TX_READY) {
182 /* Ooops. All transmit buffers are full. Bail out.
183 * This should not happen, since cep->tx_full should be set.
185 printk("%s: tx queue full!.\n", dev->name);
190 /* Clear all of the status flags.
192 bdp->cbd_sc &= ~BD_ENET_TX_STATS;
194 /* If the frame is short, tell CPM to pad it.
196 if (skb->len <= ETH_ZLEN)
197 bdp->cbd_sc |= BD_ENET_TX_PAD;
199 bdp->cbd_sc &= ~BD_ENET_TX_PAD;
201 /* Set buffer length and buffer pointer.
203 bdp->cbd_datlen = skb->len;
204 bdp->cbd_bufaddr = __pa(skb->data);
208 cep->tx_skbuff[cep->skb_cur] = skb;
210 cep->stats.tx_bytes += skb->len;
211 cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
213 spin_lock_irq(&cep->lock);
215 /* Send it on its way. Tell CPM its ready, interrupt when done,
216 * its the last BD of the frame, and to put the CRC on the end.
218 bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
220 dev->trans_start = jiffies;
222 /* If this was the last BD in the ring, start at the beginning again.
224 if (bdp->cbd_sc & BD_ENET_TX_WRAP)
225 bdp = cep->tx_bd_base;
229 if (bdp->cbd_sc & BD_ENET_TX_READY) {
230 netif_stop_queue(dev);
234 cep->cur_tx = (cbd_t *)bdp;
236 spin_unlock_irq(&cep->lock);
242 scc_enet_timeout(struct net_device *dev)
244 struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv;
246 printk("%s: transmit timed out.\n", dev->name);
247 cep->stats.tx_errors++;
248 #ifndef final_version
252 printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n",
253 cep->cur_tx, cep->tx_full ? " (full)" : "",
255 bdp = cep->tx_bd_base;
256 printk(" Tx @base %p :\n", bdp);
257 for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
258 printk("%04x %04x %08x\n",
262 bdp = cep->rx_bd_base;
263 printk(" Rx @base %p :\n", bdp);
264 for (i = 0 ; i < RX_RING_SIZE; i++, bdp++)
265 printk("%04x %04x %08x\n",
272 netif_wake_queue(dev);
275 /* The interrupt handler.
276 * This is called from the CPM handler, not the MPC core interrupt.
279 scc_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs)
281 struct net_device *dev = dev_id;
282 volatile struct scc_enet_private *cep;
287 cep = (struct scc_enet_private *)dev->priv;
289 /* Get the interrupt events that caused us to be here.
291 int_events = cep->sccp->scc_scce;
292 cep->sccp->scc_scce = int_events;
295 /* Handle receive event in its own function.
297 if (int_events & SCCE_ENET_RXF)
300 /* Check for a transmit error. The manual is a little unclear
301 * about this, so the debug code until I get it figured out. It
302 * appears that if TXE is set, then TXB is not set. However,
303 * if carrier sense is lost during frame transmission, the TXE
304 * bit is set, "and continues the buffer transmission normally."
305 * I don't know if "normally" implies TXB is set when the buffer
306 * descriptor is closed.....trial and error :-).
309 /* Transmit OK, or non-fatal error. Update the buffer descriptors.
311 if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) {
312 spin_lock(&cep->lock);
314 while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
315 if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
318 if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */
319 cep->stats.tx_heartbeat_errors++;
320 if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */
321 cep->stats.tx_window_errors++;
322 if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */
323 cep->stats.tx_aborted_errors++;
324 if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */
325 cep->stats.tx_fifo_errors++;
326 if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */
327 cep->stats.tx_carrier_errors++;
330 /* No heartbeat or Lost carrier are not really bad errors.
331 * The others require a restart transmit command.
334 (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
336 cep->stats.tx_errors++;
339 cep->stats.tx_packets++;
341 /* Deferred means some collisions occurred during transmit,
342 * but we eventually sent the packet OK.
344 if (bdp->cbd_sc & BD_ENET_TX_DEF)
345 cep->stats.collisions++;
347 /* Free the sk buffer associated with this last transmit.
349 dev_kfree_skb_irq(cep->tx_skbuff[cep->skb_dirty]);
350 cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;
352 /* Update pointer to next buffer descriptor to be transmitted.
354 if (bdp->cbd_sc & BD_ENET_TX_WRAP)
355 bdp = cep->tx_bd_base;
359 /* I don't know if we can be held off from processing these
360 * interrupts for more than one frame time. I really hope
361 * not. In such a case, we would now want to check the
362 * currently available BD (cur_tx) and determine if any
363 * buffers between the dirty_tx and cur_tx have also been
364 * sent. We would want to process anything in between that
365 * does not have BD_ENET_TX_READY set.
368 /* Since we have freed up a buffer, the ring is no longer
373 if (netif_queue_stopped(dev)) {
374 netif_wake_queue(dev);
378 cep->dirty_tx = (cbd_t *)bdp;
382 volatile cpm8260_t *cp;
384 /* Some transmit errors cause the transmitter to shut
385 * down. We now issue a restart transmit. Since the
386 * errors close the BD and update the pointers, the restart
387 * _should_ pick up without having to reset any of our
393 mk_cr_cmd(CPM_ENET_PAGE, CPM_ENET_BLOCK, 0,
394 CPM_CR_RESTART_TX) | CPM_CR_FLG;
395 while (cp->cp_cpcr & CPM_CR_FLG);
397 spin_unlock(&cep->lock);
400 /* Check for receive busy, i.e. packets coming but no place to
401 * put them. This "can't happen" because the receive interrupt
402 * is tossing previous frames.
404 if (int_events & SCCE_ENET_BSY) {
405 cep->stats.rx_dropped++;
406 printk("SCC ENET: BSY can't happen.\n");
412 /* During a receive, the cur_rx points to the current incoming buffer.
413 * When we update through the ring, if the next incoming buffer has
414 * not been given to the system, we just set the empty indicator,
415 * effectively tossing the packet.
418 scc_enet_rx(struct net_device *dev)
420 struct scc_enet_private *cep;
425 cep = (struct scc_enet_private *)dev->priv;
427 /* First, grab all of the stats for the incoming packet.
428 * These get messed up if we get called due to a busy condition.
433 if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
436 #ifndef final_version
437 /* Since we have allocated space to hold a complete frame, both
438 * the first and last indicators should be set.
440 if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
441 (BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
442 printk("CPM ENET: rcv is not first+last\n");
445 /* Frame too long or too short.
447 if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
448 cep->stats.rx_length_errors++;
449 if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */
450 cep->stats.rx_frame_errors++;
451 if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */
452 cep->stats.rx_crc_errors++;
453 if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */
454 cep->stats.rx_crc_errors++;
456 /* Report late collisions as a frame error.
457 * On this error, the BD is closed, but we don't know what we
458 * have in the buffer. So, just drop this frame on the floor.
460 if (bdp->cbd_sc & BD_ENET_RX_CL) {
461 cep->stats.rx_frame_errors++;
465 /* Process the incoming frame.
467 cep->stats.rx_packets++;
468 pkt_len = bdp->cbd_datlen;
469 cep->stats.rx_bytes += pkt_len;
471 /* This does 16 byte alignment, much more than we need.
472 * The packet length includes FCS, but we don't want to
473 * include that when passing upstream as it messes up
474 * bridging applications.
476 skb = dev_alloc_skb(pkt_len-4);
479 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
480 cep->stats.rx_dropped++;
484 skb_put(skb,pkt_len-4); /* Make room */
485 eth_copy_and_sum(skb,
486 (unsigned char *)__va(bdp->cbd_bufaddr),
488 skb->protocol=eth_type_trans(skb,dev);
493 /* Clear the status flags for this buffer.
495 bdp->cbd_sc &= ~BD_ENET_RX_STATS;
497 /* Mark the buffer empty.
499 bdp->cbd_sc |= BD_ENET_RX_EMPTY;
501 /* Update BD pointer to next entry.
503 if (bdp->cbd_sc & BD_ENET_RX_WRAP)
504 bdp = cep->rx_bd_base;
509 cep->cur_rx = (cbd_t *)bdp;
515 scc_enet_close(struct net_device *dev)
517 /* Don't know what to do yet.
519 netif_stop_queue(dev);
524 static struct net_device_stats *scc_enet_get_stats(struct net_device *dev)
526 struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv;
531 /* Set or clear the multicast filter for this adaptor.
532 * Skeleton taken from sunlance driver.
533 * The CPM Ethernet implementation allows Multicast as well as individual
534 * MAC address filtering. Some of the drivers check to make sure it is
535 * a group multicast address, and discard those that are not. I guess I
536 * will do the same for now, but just remove the test if you want
537 * individual filtering as well (do the upper net layers want or support
538 * this kind of feature?).
541 static void set_multicast_list(struct net_device *dev)
543 struct scc_enet_private *cep;
544 struct dev_mc_list *dmi;
545 u_char *mcptr, *tdptr;
546 volatile scc_enet_t *ep;
548 cep = (struct scc_enet_private *)dev->priv;
550 /* Get pointer to SCC area in parameter RAM.
552 ep = (scc_enet_t *)dev->base_addr;
554 if (dev->flags&IFF_PROMISC) {
556 /* Log any net taps. */
557 printk("%s: Promiscuous mode enabled.\n", dev->name);
558 cep->sccp->scc_pmsr |= SCC_PSMR_PRO;
561 cep->sccp->scc_pmsr &= ~SCC_PSMR_PRO;
563 if (dev->flags & IFF_ALLMULTI) {
564 /* Catch all multicast addresses, so set the
567 ep->sen_gaddr1 = 0xffff;
568 ep->sen_gaddr2 = 0xffff;
569 ep->sen_gaddr3 = 0xffff;
570 ep->sen_gaddr4 = 0xffff;
573 /* Clear filter and add the addresses in the list.
582 for (i=0; i<dev->mc_count; i++) {
584 /* Only support group multicast for now.
586 if (!(dmi->dmi_addr[0] & 1))
589 /* The address in dmi_addr is LSB first,
590 * and taddr is MSB first. We have to
591 * copy bytes MSB first from dmi_addr.
593 mcptr = (u_char *)dmi->dmi_addr + 5;
594 tdptr = (u_char *)&ep->sen_taddrh;
598 /* Ask CPM to run CRC and set bit in
601 cpmp->cp_cpcr = mk_cr_cmd(CPM_ENET_PAGE,
603 CPM_CR_SET_GADDR) | CPM_CR_FLG;
604 /* this delay is necessary here -- Cort */
606 while (cpmp->cp_cpcr & CPM_CR_FLG);
612 /* Initialize the CPM Ethernet on SCC.
614 int __init scc_enet_init(void)
616 struct net_device *dev;
617 struct scc_enet_private *cep;
620 unsigned long mem_addr;
623 volatile cpm8260_t *cp;
624 volatile scc_t *sccp;
625 volatile scc_enet_t *ep;
626 volatile immap_t *immap;
627 volatile iop8260_t *io;
629 cp = cpmp; /* Get pointer to Communication Processor */
631 immap = (immap_t *)IMAP_ADDR; /* and to internal registers */
632 io = &immap->im_ioport;
636 /* Allocate some private information.
638 cep = (struct scc_enet_private *)kmalloc(sizeof(*cep), GFP_KERNEL);
642 __clear_user(cep,sizeof(*cep));
643 spin_lock_init(&cep->lock);
645 /* Create an Ethernet device instance.
647 dev = init_etherdev(0, 0);
649 /* Get pointer to SCC area in parameter RAM.
651 ep = (scc_enet_t *)(&immap->im_dprambase[PROFF_ENET]);
653 /* And another to the SCC register area.
655 sccp = (volatile scc_t *)(&immap->im_scc[SCC_ENET]);
656 cep->sccp = (scc_t *)sccp; /* Keep the pointer handy */
658 /* Disable receive and transmit in case someone left it running.
660 sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
662 /* Configure port C and D pins for SCC Ethernet. This
663 * won't work for all SCC possibilities....it will be
664 * board/port specific.
667 (PC_ENET_RENA | PC_ENET_CLSN | PC_ENET_TXCLK | PC_ENET_RXCLK);
669 ~(PC_ENET_RENA | PC_ENET_CLSN | PC_ENET_TXCLK | PC_ENET_RXCLK);
671 ~(PC_ENET_RENA | PC_ENET_TXCLK | PC_ENET_RXCLK);
672 io->iop_psorc |= PC_ENET_CLSN;
674 io->iop_ppard |= (PD_ENET_RXD | PD_ENET_TXD | PD_ENET_TENA);
675 io->iop_pdird |= (PD_ENET_TXD | PD_ENET_TENA);
676 io->iop_pdird &= ~PD_ENET_RXD;
677 io->iop_psord |= PD_ENET_TXD;
678 io->iop_psord &= ~(PD_ENET_RXD | PD_ENET_TENA);
680 /* Configure Serial Interface clock routing.
681 * First, clear all SCC bits to zero, then set the ones we want.
683 immap->im_cpmux.cmx_scr &= ~CMX_CLK_MASK;
684 immap->im_cpmux.cmx_scr |= CMX_CLK_ROUTE;
686 /* Allocate space for the buffer descriptors in the DP ram.
687 * These are relative offsets in the DP ram address space.
688 * Initialize base addresses for the buffer descriptors.
690 i = m8260_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
691 ep->sen_genscc.scc_rbase = i;
692 cep->rx_bd_base = (cbd_t *)&immap->im_dprambase[i];
694 i = m8260_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
695 ep->sen_genscc.scc_tbase = i;
696 cep->tx_bd_base = (cbd_t *)&immap->im_dprambase[i];
698 cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
699 cep->cur_rx = cep->rx_bd_base;
701 ep->sen_genscc.scc_rfcr = CPMFCR_GBL | CPMFCR_EB;
702 ep->sen_genscc.scc_tfcr = CPMFCR_GBL | CPMFCR_EB;
704 /* Set maximum bytes per receive buffer.
705 * This appears to be an Ethernet frame size, not the buffer
706 * fragment size. It must be a multiple of four.
708 ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE;
710 /* Set CRC preset and mask.
712 ep->sen_cpres = 0xffffffff;
713 ep->sen_cmask = 0xdebb20e3;
715 ep->sen_crcec = 0; /* CRC Error counter */
716 ep->sen_alec = 0; /* alignment error counter */
717 ep->sen_disfc = 0; /* discard frame counter */
719 ep->sen_pads = 0x8888; /* Tx short frame pad character */
720 ep->sen_retlim = 15; /* Retry limit threshold */
722 ep->sen_maxflr = PKT_MAXBUF_SIZE; /* maximum frame length register */
723 ep->sen_minflr = PKT_MINBUF_SIZE; /* minimum frame length register */
725 ep->sen_maxd1 = PKT_MAXBLR_SIZE; /* maximum DMA1 length */
726 ep->sen_maxd2 = PKT_MAXBLR_SIZE; /* maximum DMA2 length */
728 /* Clear hash tables.
739 /* Set Ethernet station address.
741 * This is supplied in the board information structure, so we
742 * copy that into the controller.
744 eap = (unsigned char *)&(ep->sen_paddrh);
746 *eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i];
748 ep->sen_pper = 0; /* 'cause the book says so */
749 ep->sen_taddrl = 0; /* temp address (LSB) */
751 ep->sen_taddrh = 0; /* temp address (MSB) */
753 /* Now allocate the host memory pages and initialize the
754 * buffer descriptors.
756 bdp = cep->tx_bd_base;
757 for (i=0; i<TX_RING_SIZE; i++) {
759 /* Initialize the BD for every fragment in the page.
762 bdp->cbd_bufaddr = 0;
766 /* Set the last buffer to wrap.
769 bdp->cbd_sc |= BD_SC_WRAP;
771 bdp = cep->rx_bd_base;
772 for (i=0; i<CPM_ENET_RX_PAGES; i++) {
776 mem_addr = __get_free_page(GFP_KERNEL);
778 /* Initialize the BD for every fragment in the page.
780 for (j=0; j<CPM_ENET_RX_FRPPG; j++) {
781 bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
782 bdp->cbd_bufaddr = __pa(mem_addr);
783 mem_addr += CPM_ENET_RX_FRSIZE;
788 /* Set the last buffer to wrap.
791 bdp->cbd_sc |= BD_SC_WRAP;
793 /* Let's re-initialize the channel now. We have to do it later
794 * than the manual describes because we have just now finished
795 * the BD initialization.
797 cpmp->cp_cpcr = mk_cr_cmd(CPM_ENET_PAGE, CPM_ENET_BLOCK, 0,
798 CPM_CR_INIT_TRX) | CPM_CR_FLG;
799 while (cp->cp_cpcr & CPM_CR_FLG);
801 cep->skb_cur = cep->skb_dirty = 0;
803 sccp->scc_scce = 0xffff; /* Clear any pending events */
805 /* Enable interrupts for transmit error, complete frame
806 * received, and any transmit buffer we have also set the
809 sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
811 /* Install our interrupt handler.
813 request_8xxirq(SIU_INT_ENET, scc_enet_interrupt, 0, "enet", dev);
815 /* Set GSMR_H to enable all normal operating modes.
816 * Set GSMR_L to enable Ethernet to MC68160.
819 sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET);
821 /* Set sync/delimiters.
823 sccp->scc_dsr = 0xd555;
825 /* Set processing mode. Use Ethernet CRC, catch broadcast, and
826 * start frame search 22 bit times after RENA.
828 sccp->scc_pmsr = (SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
830 /* It is now OK to enable the Ethernet transmitter.
831 * Unfortunately, there are board implementation differences here.
833 io->iop_pparc &= ~(PC_EST8260_ENET_LOOPBACK |
834 PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD);
835 io->iop_psorc &= ~(PC_EST8260_ENET_LOOPBACK |
836 PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD);
837 io->iop_pdirc |= (PC_EST8260_ENET_LOOPBACK |
838 PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD);
839 io->iop_pdatc &= ~(PC_EST8260_ENET_LOOPBACK | PC_EST8260_ENET_SQE);
840 io->iop_pdatc |= PC_EST8260_ENET_NOTFD;
842 dev->base_addr = (unsigned long)ep;
845 /* The CPM Ethernet specific entries in the device structure. */
846 dev->open = scc_enet_open;
847 dev->hard_start_xmit = scc_enet_start_xmit;
848 dev->tx_timeout = scc_enet_timeout;
849 dev->watchdog_timeo = TX_TIMEOUT;
850 dev->stop = scc_enet_close;
851 dev->get_stats = scc_enet_get_stats;
852 dev->set_multicast_list = set_multicast_list;
854 /* And last, enable the transmit and receive processing.
856 sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
858 printk("%s: SCC ENET Version 0.1, ", dev->name);
860 printk("%02x:", dev->dev_addr[i]);
861 printk("%02x\n", dev->dev_addr[5]);