1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2 munged into HPPA boxen .
4 This driver is based upon 82596.c, original credits are below...
5 but there were too many hoops which HP wants jumped through to
6 keep this code in there in a sane manner.
8 3 primary sources of the mess --
9 1) hppa needs *lots* of cacheline flushing to keep this kind of
12 2) The 82596 needs to see all of its pointers as their physical
13 address. Thus virt_to_bus/bus_to_virt are *everywhere*.
15 3) The implementation HP is using seems to be significantly pickier
16 about when and how the command and RX units are started. some
17 command ordering was changed.
19 Examination of the mach driver leads one to believe that there
20 might be a saner way to pull this off... anyone who feels like a
21 full rewrite can be my guest.
23 Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
25 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
26 03/02/2000 changes for better/correct(?) cache-flushing (deller)
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
32 Written 1994 by Mark Evans.
33 This driver is for the Apricot 82596 bus-master interface
35 Modularised 12/94 Mark Evans
38 Modified to support the 82596 ethernet chips on 680x0 VME boards.
39 by Richard Hirst <richard@sleepie.demon.co.uk>
42 980825: Changed to receive directly in to sk_buffs which are
43 allocated at open() time. Eliminates copy on incoming frames
44 (small ones are still copied). Shared data now held in a
45 non-cached page, so we can run on 68060 in copyback mode.
48 * look at deferring rx frames rather than discarding (as per tulip)
49 * handle tx ring full as per tulip
50 * performace test to tune rx_copybreak
52 Most of my modifications relate to the braindead big-endian
53 implementation by Intel. When the i596 is operating in
54 'big-endian' mode, it thinks a 32 bit value of 0x12345678
55 should be stored as 0x56781234. This is a real pain, when
56 you have linked lists which are shared by the 680x0 and the
60 Written 1993 by Donald Becker.
61 Copyright 1993 United States Government as represented by the Director,
62 National Security Agency. This software may only be used and distributed
63 according to the terms of the GNU General Public License as modified by SRC,
64 incorporated herein by reference.
66 The author may be reached as becker@scyld.com, or C/O
67 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
71 #include <linux/module.h>
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
75 #include <linux/string.h>
76 #include <linux/ptrace.h>
77 #include <linux/errno.h>
78 #include <linux/ioport.h>
79 #include <linux/slab.h>
80 #include <linux/interrupt.h>
81 #include <linux/delay.h>
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/skbuff.h>
85 #include <linux/init.h>
86 #include <linux/pci.h>
87 #include <linux/types.h>
89 #include <asm/bitops.h>
91 #include <asm/pgtable.h>
92 #include <asm/pgalloc.h>
97 #include <asm/cache.h>
99 static char version[] __devinitdata =
100 "82596.c $Revision: 1.1.1.1 $\n";
105 #define DEB_INIT 0x0001
106 #define DEB_PROBE 0x0002
107 #define DEB_SERIOUS 0x0004
108 #define DEB_ERRORS 0x0008
109 #define DEB_MULTI 0x0010
110 #define DEB_TDR 0x0020
111 #define DEB_OPEN 0x0040
112 #define DEB_RESET 0x0080
113 #define DEB_ADDCMD 0x0100
114 #define DEB_STATUS 0x0200
115 #define DEB_STARTTX 0x0400
116 #define DEB_RXADDR 0x0800
117 #define DEB_TXADDR 0x1000
118 #define DEB_RXFRAME 0x2000
119 #define DEB_INTS 0x4000
120 #define DEB_STRUCT 0x8000
121 #define DEB_ANY 0xffff
124 #define DEB(x,y) if (i596_debug & (x)) { y; }
127 #define CHECK_WBACK(addr,len) \
128 do { if (!dma_consistent) dma_cache_wback((unsigned long)addr,len); } while (0)
130 #define CHECK_INV(addr,len) \
131 do { if (!dma_consistent) dma_cache_inv((unsigned long)addr,len); } while(0)
133 #define CHECK_WBACK_INV(addr,len) \
134 do { if (!dma_consistent) dma_cache_wback_inv((unsigned long)addr,len); } while (0)
137 #define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
138 #define PA_CPU_PORT_L_ACCESS 4
139 #define PA_CHANNEL_ATTENTION 8
143 * Define various macros for Channel Attention, word swapping etc., dependent
144 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
148 #define WSWAPrfd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
149 #define WSWAPrbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
150 #define WSWAPiscp(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
151 #define WSWAPscb(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
152 #define WSWAPcmd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
153 #define WSWAPtbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
154 #define WSWAPchar(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
155 #define ISCP_BUSY 0x00010000
156 #define MACH_IS_APRICOT 0
158 #define WSWAPrfd(x) ((struct i596_rfd *)(x))
159 #define WSWAPrbd(x) ((struct i596_rbd *)(x))
160 #define WSWAPiscp(x) ((struct i596_iscp *)(x))
161 #define WSWAPscb(x) ((struct i596_scb *)(x))
162 #define WSWAPcmd(x) ((struct i596_cmd *)(x))
163 #define WSWAPtbd(x) ((struct i596_tbd *)(x))
164 #define WSWAPchar(x) ((char *)(x))
165 #define ISCP_BUSY 0x0001
166 #define MACH_IS_APRICOT 1
170 * The MPU_PORT command allows direct access to the 82596. With PORT access
171 * the following commands are available (p5-18). The 32-bit port command
172 * must be word-swapped with the most significant word written first.
173 * This only applies to VME boards.
175 #define PORT_RESET 0x00 /* reset 82596 */
176 #define PORT_SELFTEST 0x01 /* selftest */
177 #define PORT_ALTSCP 0x02 /* alternate SCB address */
178 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
180 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
182 MODULE_AUTHOR("Richard Hirst");
183 MODULE_DESCRIPTION("i82596 driver");
184 MODULE_LICENSE("GPL");
185 MODULE_PARM(i596_debug, "i");
186 MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
189 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
190 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
192 static int rx_copybreak = 100;
194 #define MAX_DRIVERS 4 /* max count of drivers */
196 #define PKT_BUF_SZ 1536
197 #define MAX_MC_CNT 64
199 #define I596_NULL ((u32)0xffffffff)
201 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
202 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
203 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
205 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
208 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
209 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
212 #define STAT_C 0x8000 /* Set to 0 after execution */
213 #define STAT_B 0x4000 /* Command being executed */
214 #define STAT_OK 0x2000 /* Command executed ok */
215 #define STAT_A 0x1000 /* Command aborted */
217 #define CUC_START 0x0100
218 #define CUC_RESUME 0x0200
219 #define CUC_SUSPEND 0x0300
220 #define CUC_ABORT 0x0400
221 #define RX_START 0x0010
222 #define RX_RESUME 0x0020
223 #define RX_SUSPEND 0x0030
224 #define RX_ABORT 0x0040
228 #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
232 unsigned short porthi;
233 unsigned short portlo;
238 #define SIZE_MASK 0x3fff
245 u32 cache_pad[5]; /* Total 32 bytes... */
248 /* The command structure has two 'next' pointers; v_next is the address of
249 * the next command as seen by the CPU, b_next is the address of the next
250 * command as seen by the 82596. The b_next pointer, as used by the 82596
251 * always references the status field of the next command, rather than the
252 * v_next field, because the 82596 is unaware of v_next. It may seem more
253 * logical to put v_next at the end of the structure, but we cannot do that
254 * because the 82596 expects other fields to be there, depending on command
259 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
260 unsigned short status;
261 unsigned short command;
262 dma_addr_t b_next; /* Address from i596 viewpoint */
270 struct sk_buff *skb; /* So we can free it after tx */
273 u32 cache_pad[6]; /* Total 64 bytes... */
275 u32 cache_pad[1]; /* Total 32 bytes... */
281 unsigned short status;
288 char mc_addrs[MAX_MC_CNT*6];
298 char i596_config[16];
304 dma_addr_t b_next; /* Address from i596 viewpoint */
306 unsigned short count;
308 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
309 struct i596_rfd *v_prev;
311 u32 cache_pad[2]; /* Total 32 bytes... */
317 unsigned short count;
318 unsigned short zero1;
320 dma_addr_t b_data; /* Address from i596 viewpoint */
322 unsigned short zero2;
325 struct i596_rbd *v_next;
326 dma_addr_t b_addr; /* This rbd addr from i596 view */
327 unsigned char *v_data; /* Address from CPUs viewpoint */
328 /* Total 32 bytes... */
334 /* These values as chosen so struct i596_private fits in one page... */
336 #define TX_RING_SIZE 32
337 #define RX_RING_SIZE 16
340 unsigned short status;
341 unsigned short command;
351 unsigned short t_off;
365 struct i596_private {
366 volatile struct i596_scp scp __attribute__((aligned(32)));
367 volatile struct i596_iscp iscp __attribute__((aligned(32)));
368 volatile struct i596_scb scb __attribute__((aligned(32)));
369 struct sa_cmd sa_cmd __attribute__((aligned(32)));
370 struct cf_cmd cf_cmd __attribute__((aligned(32)));
371 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
372 struct mc_cmd mc_cmd __attribute__((aligned(32)));
373 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
374 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
375 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
376 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
379 struct i596_rfd *rfd_head;
380 struct i596_rbd *rbd_head;
381 struct i596_cmd *cmd_tail;
382 struct i596_cmd *cmd_head;
385 struct net_device_stats stats;
392 static char init_setup[] =
394 0x8E, /* length, prefetch on */
395 0xC8, /* fifo to 8, monitor off */
396 0x80, /* don't save bad frames */
397 0x2E, /* No source address insertion, 8 byte preamble */
398 0x00, /* priority and backoff defaults */
399 0x60, /* interframe spacing */
400 0x00, /* slot time LSB */
401 0xf2, /* slot time and retries */
402 0x00, /* promiscuous mode */
403 0x00, /* collision detect */
404 0x40, /* minimum frame length */
407 0x7f /* *multi IA */ };
409 static struct pci_dev *fake_pci_dev; /* The fake pci_dev needed for
410 pci_* functions under ccio. */
411 static int dma_consistent = 1; /* Zero if pci_alloc_consistent() fails */
413 static int i596_open(struct net_device *dev);
414 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
415 static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
416 static int i596_close(struct net_device *dev);
417 static struct net_device_stats *i596_get_stats(struct net_device *dev);
418 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
419 static void i596_tx_timeout (struct net_device *dev);
420 static void print_eth(unsigned char *buf, char *str);
421 static void set_multicast_list(struct net_device *dev);
423 static int rx_ring_size = RX_RING_SIZE;
424 static int ticks_limit = 100;
425 static int max_cmd_backlog = TX_RING_SIZE-1;
428 static inline void CA(struct net_device *dev)
430 gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
434 static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
436 struct i596_private *lp = (struct i596_private *) dev->priv;
438 u32 v = (u32) (c) | (u32) (x);
441 if (lp->options & OPT_SWAP_PORT) {
449 gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
451 gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
455 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
457 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
458 while (--delcnt && lp->iscp.stat) {
460 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
463 printk("%s: %s, iscp.stat %04x, didn't clear\n",
464 dev->name, str, lp->iscp.stat);
472 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
474 CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
475 while (--delcnt && lp->scb.command) {
477 CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
480 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
481 dev->name, str, lp->scb.status, lp->scb.command);
489 static void i596_display_data(struct net_device *dev)
491 struct i596_private *lp = (struct i596_private *) dev->priv;
492 struct i596_cmd *cmd;
493 struct i596_rfd *rfd;
494 struct i596_rbd *rbd;
496 printk("lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
497 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
498 printk("iscp at %p, iscp.stat = %08x, .scb = %08x\n",
499 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
500 printk("scb at %p, scb.status = %04x, .command = %04x,"
501 " .cmd = %08x, .rfd = %08x\n",
502 &lp->scb, lp->scb.status, lp->scb.command,
503 lp->scb.cmd, lp->scb.rfd);
504 printk(" errors: crc %x, align %x, resource %x,"
505 " over %x, rcvdt %x, short %x\n",
506 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
507 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
509 while (cmd != NULL) {
510 printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %08x\n",
511 cmd, cmd->status, cmd->command, cmd->b_next);
515 printk("rfd_head = %p\n", rfd);
517 printk (" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
519 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
522 } while (rfd != lp->rfd_head);
524 printk("rbd_head = %p\n", rbd);
526 printk(" %p .count %04x, b_next %08x, b_data %08x, size %04x\n",
527 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
529 } while (rbd != lp->rbd_head);
530 CHECK_INV(lp, sizeof(struct i596_private));
534 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
535 static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
537 struct net_device *dev = dev_id;
538 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
542 printk("%s: Error interrupt\n", dev->name);
543 i596_display_data(dev);
547 #define virt_to_dma(lp,v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)(lp)))
549 static inline void init_rx_bufs(struct net_device *dev)
551 struct i596_private *lp = (struct i596_private *)dev->priv;
553 struct i596_rfd *rfd;
554 struct i596_rbd *rbd;
556 /* First build the Receive Buffer Descriptor List */
558 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
560 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
563 panic("82596: alloc_skb() failed");
565 dma_addr = pci_map_single(fake_pci_dev, skb->tail,PKT_BUF_SZ,
569 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
570 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
572 rbd->v_data = skb->tail;
573 rbd->b_data = WSWAPchar(dma_addr);
574 rbd->size = PKT_BUF_SZ;
576 lp->rbd_head = lp->rbds;
577 rbd = lp->rbds + rx_ring_size - 1;
578 rbd->v_next = lp->rbds;
579 rbd->b_next = WSWAPrbd(virt_to_dma(lp,lp->rbds));
581 /* Now build the Receive Frame Descriptor List */
583 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
584 rfd->rbd = I596_NULL;
587 rfd->b_next = WSWAPrfd(virt_to_dma(lp,rfd+1));
590 lp->rfd_head = lp->rfds;
591 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
593 rfd->rbd = WSWAPrbd(virt_to_dma(lp,lp->rbd_head));
594 rfd->v_prev = lp->rfds + rx_ring_size - 1;
595 rfd = lp->rfds + rx_ring_size - 1;
596 rfd->v_next = lp->rfds;
597 rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
598 rfd->cmd = CMD_EOL|CMD_FLEX;
600 CHECK_WBACK_INV(lp, sizeof(struct i596_private));
603 static inline void remove_rx_bufs(struct net_device *dev)
605 struct i596_private *lp = (struct i596_private *)dev->priv;
606 struct i596_rbd *rbd;
609 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
610 if (rbd->skb == NULL)
612 pci_unmap_single(fake_pci_dev,
613 (dma_addr_t)WSWAPchar(rbd->b_data),
614 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
615 dev_kfree_skb(rbd->skb);
620 static void rebuild_rx_bufs(struct net_device *dev)
622 struct i596_private *lp = (struct i596_private *) dev->priv;
625 /* Ensure rx frame/buffer descriptors are tidy */
627 for (i = 0; i < rx_ring_size; i++) {
628 lp->rfds[i].rbd = I596_NULL;
629 lp->rfds[i].cmd = CMD_FLEX;
631 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
632 lp->rfd_head = lp->rfds;
633 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
634 lp->rbd_head = lp->rbds;
635 lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
637 CHECK_WBACK_INV(lp, sizeof(struct i596_private));
641 static int init_i596_mem(struct net_device *dev)
643 struct i596_private *lp = (struct i596_private *) dev->priv;
646 disable_irq(dev->irq); /* disable IRQs from LAN */
648 printk("RESET 82596 port: %08lX (with IRQ%d disabled)\n",
649 dev->base_addr + PA_I82596_RESET,
652 gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
653 udelay(100); /* Wait 100us - seems to help */
655 /* change the scp address */
657 lp->last_cmd = jiffies;
660 lp->scp.sysbus = 0x0000006c;
661 lp->scp.iscp = WSWAPiscp(virt_to_dma(lp,&(lp->iscp)));
662 lp->iscp.scb = WSWAPscb(virt_to_dma(lp,&(lp->scb)));
663 lp->iscp.stat = ISCP_BUSY;
667 lp->scb.cmd = I596_NULL;
669 DEB(DEB_INIT,printk("%s: starting i82596.\n", dev->name));
671 CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
672 CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
674 MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
678 if (wait_istat(dev,lp,1000,"initialization timed out"))
680 DEB(DEB_INIT,printk("%s: i82596 initialization successful\n", dev->name));
682 /* Ensure rx frame/buffer descriptors are tidy */
683 rebuild_rx_bufs(dev);
686 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
688 enable_irq(dev->irq); /* enable IRQs from LAN */
690 DEB(DEB_INIT,printk("%s: queuing CmdConfigure\n", dev->name));
691 memcpy(lp->cf_cmd.i596_config, init_setup, 14);
692 lp->cf_cmd.cmd.command = CmdConfigure;
693 CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
694 i596_add_cmd(dev, &lp->cf_cmd.cmd);
696 DEB(DEB_INIT,printk("%s: queuing CmdSASetup\n", dev->name));
697 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
698 lp->sa_cmd.cmd.command = CmdSASetup;
699 CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
700 i596_add_cmd(dev, &lp->sa_cmd.cmd);
702 DEB(DEB_INIT,printk("%s: queuing CmdTDR\n", dev->name));
703 lp->tdr_cmd.cmd.command = CmdTDR;
704 CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
705 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
707 spin_lock_irqsave (&lp->lock, flags);
709 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
710 spin_unlock_irqrestore (&lp->lock, flags);
713 DEB(DEB_INIT,printk("%s: Issuing RX_START\n", dev->name));
714 lp->scb.command = RX_START;
715 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
716 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
720 spin_unlock_irqrestore (&lp->lock, flags);
722 if (wait_cmd(dev,lp,1000,"RX_START not processed"))
724 DEB(DEB_INIT,printk("%s: Receive unit started OK\n", dev->name));
729 printk("%s: Failed to initialise 82596\n", dev->name);
730 MPU_PORT(dev, PORT_RESET, 0);
735 static inline int i596_rx(struct net_device *dev)
737 struct i596_private *lp = (struct i596_private *)dev->priv;
738 struct i596_rfd *rfd;
739 struct i596_rbd *rbd;
742 DEB(DEB_RXFRAME,printk ("i596_rx(), rfd_head %p, rbd_head %p\n",
743 lp->rfd_head, lp->rbd_head));
746 rfd = lp->rfd_head; /* Ref next frame to check */
748 CHECK_INV(rfd, sizeof(struct i596_rfd));
749 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
750 if (rfd->rbd == I596_NULL)
752 else if (rfd->rbd == lp->rbd_head->b_addr) {
754 CHECK_INV(rbd, sizeof(struct i596_rbd));
757 printk("%s: rbd chain broken!\n", dev->name);
761 DEB(DEB_RXFRAME, printk(" rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
762 rfd, rfd->rbd, rfd->stat));
764 if (rbd != NULL && ((rfd->stat) & STAT_OK)) {
766 int pkt_len = rbd->count & 0x3fff;
767 struct sk_buff *skb = rbd->skb;
770 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
773 /* Check if the packet is long enough to just accept
774 * without copying to a properly sized skbuff.
777 if (pkt_len > rx_copybreak) {
778 struct sk_buff *newskb;
781 pci_unmap_single(fake_pci_dev,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
782 /* Get fresh skbuff to replace filled one. */
783 newskb = dev_alloc_skb(PKT_BUF_SZ + 4);
784 if (newskb == NULL) {
785 skb = NULL; /* drop pkt */
788 skb_reserve(newskb, 2);
790 /* Pass up the skb already on the Rx ring. */
791 skb_put(skb, pkt_len);
795 dma_addr = pci_map_single(fake_pci_dev, newskb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
796 rbd->v_data = newskb->tail;
797 rbd->b_data = WSWAPchar(dma_addr);
798 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
801 skb = dev_alloc_skb(pkt_len + 2);
804 /* XXX tulip.c can defer packets here!! */
805 printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
806 lp->stats.rx_dropped++;
811 /* 16 byte align the data fields */
812 pci_dma_sync_single(fake_pci_dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
814 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
817 skb->protocol=eth_type_trans(skb,dev);
819 dev->last_rx = jiffies;
820 lp->stats.rx_packets++;
821 lp->stats.rx_bytes+=pkt_len;
825 DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
826 dev->name, rfd->stat));
827 lp->stats.rx_errors++;
828 if ((rfd->stat) & 0x0001)
829 lp->stats.collisions++;
830 if ((rfd->stat) & 0x0080)
831 lp->stats.rx_length_errors++;
832 if ((rfd->stat) & 0x0100)
833 lp->stats.rx_over_errors++;
834 if ((rfd->stat) & 0x0200)
835 lp->stats.rx_fifo_errors++;
836 if ((rfd->stat) & 0x0400)
837 lp->stats.rx_frame_errors++;
838 if ((rfd->stat) & 0x0800)
839 lp->stats.rx_crc_errors++;
840 if ((rfd->stat) & 0x1000)
841 lp->stats.rx_length_errors++;
844 /* Clear the buffer descriptor count and EOF + F flags */
846 if (rbd != NULL && (rbd->count & 0x4000)) {
848 lp->rbd_head = rbd->v_next;
849 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
852 /* Tidy the frame descriptor, marking it as end of list */
854 rfd->rbd = I596_NULL;
856 rfd->cmd = CMD_EOL|CMD_FLEX;
859 /* Remove end-of-list from old end descriptor */
861 rfd->v_prev->cmd = CMD_FLEX;
863 /* Update record of next frame descriptor to process */
865 lp->scb.rfd = rfd->b_next;
866 lp->rfd_head = rfd->v_next;
867 CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
868 CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
870 CHECK_INV(rfd, sizeof(struct i596_rfd));
873 DEB(DEB_RXFRAME,printk ("frames %d\n", frames));
879 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
881 struct i596_cmd *ptr;
883 while (lp->cmd_head != NULL) {
885 lp->cmd_head = ptr->v_next;
888 switch ((ptr->command) & 0x7) {
891 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
892 struct sk_buff *skb = tx_cmd->skb;
893 pci_unmap_single(fake_pci_dev, tx_cmd->dma_addr, skb->len, PCI_DMA_TODEVICE);
897 lp->stats.tx_errors++;
898 lp->stats.tx_aborted_errors++;
901 ptr->b_next = I596_NULL;
902 tx_cmd->cmd.command = 0; /* Mark as free */
907 ptr->b_next = I596_NULL;
909 CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
912 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
913 lp->scb.cmd = I596_NULL;
914 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
918 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
922 DEB(DEB_RESET,printk("i596_reset\n"));
924 spin_lock_irqsave (&lp->lock, flags);
926 wait_cmd(dev,lp,100,"i596_reset timed out");
928 netif_stop_queue(dev);
930 /* FIXME: this command might cause an lpmc */
931 lp->scb.command = CUC_ABORT | RX_ABORT;
932 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
935 /* wait for shutdown */
936 wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
937 spin_unlock_irqrestore (&lp->lock, flags);
939 i596_cleanup_cmd(dev,lp);
942 netif_start_queue(dev);
947 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
949 struct i596_private *lp = (struct i596_private *) dev->priv;
952 DEB(DEB_ADDCMD,printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
955 cmd->command |= (CMD_EOL | CMD_INTR);
957 cmd->b_next = I596_NULL;
958 CHECK_WBACK(cmd, sizeof(struct i596_cmd));
960 spin_lock_irqsave (&lp->lock, flags);
962 if (lp->cmd_head != NULL) {
963 lp->cmd_tail->v_next = cmd;
964 lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
965 CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
968 wait_cmd(dev,lp,100,"i596_add_cmd timed out");
969 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
970 lp->scb.command = CUC_START;
971 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
977 spin_unlock_irqrestore (&lp->lock, flags);
979 if (lp->cmd_backlog > max_cmd_backlog) {
980 unsigned long tickssofar = jiffies - lp->last_cmd;
982 if (tickssofar < ticks_limit)
985 printk("%s: command unit timed out, status resetting.\n", dev->name);
993 /* this function makes a perfectly adequate probe... but we have a
995 static int i596_test(struct net_device *dev)
997 struct i596_private *lp = (struct i596_private *) dev->priv;
1001 tint = (volatile int *)(&(lp->scp));
1002 data = virt_to_dma(lp,tint);
1005 CHECK_WBACK(tint,PAGE_SIZE);
1007 MPU_PORT(dev, 1, data);
1009 for(data = 1000000; data; data--) {
1010 CHECK_INV(tint,PAGE_SIZE);
1016 printk("i596_test result %d\n", tint[1]);
1022 static int i596_open(struct net_device *dev)
1024 DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
1028 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
1029 printk("%s: IRQ %d not free\n", dev->name, dev->irq);
1035 if (init_i596_mem(dev)) {
1036 printk("%s: Failed to init memory\n", dev->name);
1037 goto out_remove_rx_bufs;
1040 netif_start_queue(dev);
1045 remove_rx_bufs(dev);
1046 free_irq(dev->irq, dev);
1053 static void i596_tx_timeout (struct net_device *dev)
1055 struct i596_private *lp = (struct i596_private *) dev->priv;
1057 /* Transmitter timeout, serious problems. */
1058 DEB(DEB_ERRORS,printk("%s: transmit timed out, status resetting.\n",
1061 lp->stats.tx_errors++;
1063 /* Try to restart the adaptor */
1064 if (lp->last_restart == lp->stats.tx_packets) {
1065 DEB(DEB_ERRORS,printk ("Resetting board.\n"));
1066 /* Shutdown and restart */
1067 i596_reset (dev, lp);
1069 /* Issue a channel attention signal */
1070 DEB(DEB_ERRORS,printk ("Kicking board.\n"));
1071 lp->scb.command = CUC_START | RX_START;
1072 CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
1074 lp->last_restart = lp->stats.tx_packets;
1077 dev->trans_start = jiffies;
1078 netif_wake_queue (dev);
1082 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1084 struct i596_private *lp = (struct i596_private *) dev->priv;
1085 struct tx_cmd *tx_cmd;
1086 struct i596_tbd *tbd;
1087 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1088 dev->trans_start = jiffies;
1090 DEB(DEB_STARTTX,printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
1091 skb->len, skb->data));
1093 netif_stop_queue(dev);
1095 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1096 tbd = lp->tbds + lp->next_tx_cmd;
1098 if (tx_cmd->cmd.command) {
1099 DEB(DEB_ERRORS,printk ("%s: xmit ring full, dropping packet.\n",
1101 lp->stats.tx_dropped++;
1105 if (++lp->next_tx_cmd == TX_RING_SIZE)
1106 lp->next_tx_cmd = 0;
1107 tx_cmd->tbd = WSWAPtbd(virt_to_dma(lp,tbd));
1108 tbd->next = I596_NULL;
1110 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1116 tbd->size = EOF | length;
1118 tx_cmd->dma_addr = pci_map_single(fake_pci_dev, skb->data, skb->len,
1120 tbd->data = WSWAPchar(tx_cmd->dma_addr);
1122 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1123 CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
1124 CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
1125 i596_add_cmd(dev, &tx_cmd->cmd);
1127 lp->stats.tx_packets++;
1128 lp->stats.tx_bytes += length;
1131 netif_start_queue(dev);
1136 static void print_eth(unsigned char *add, char *str)
1140 printk("i596 0x%p, ", add);
1141 for (i = 0; i < 6; i++)
1142 printk(" %02X", add[i + 6]);
1144 for (i = 0; i < 6; i++)
1145 printk(" %02X", add[i]);
1146 printk(" %02X%02X, %s\n", add[12], add[13], str);
1150 #define LAN_PROM_ADDR 0xF0810000
1152 static int __devinit i82596_probe(struct net_device *dev)
1155 struct i596_private *lp;
1157 dma_addr_t dma_addr;
1159 /* This lot is ensure things have been cache line aligned. */
1160 if (sizeof(struct i596_rfd) != 32) {
1161 printk("82596: sizeof(struct i596_rfd) = %d\n",
1162 sizeof(struct i596_rfd));
1165 if ((sizeof(struct i596_rbd) % 32) != 0) {
1166 printk("82596: sizeof(struct i596_rbd) = %d\n",
1167 sizeof(struct i596_rbd));
1170 if ((sizeof(struct tx_cmd) % 32) != 0) {
1171 printk("82596: sizeof(struct tx_cmd) = %d\n",
1172 sizeof(struct tx_cmd));
1175 if (sizeof(struct i596_tbd) != 32) {
1176 printk("82596: sizeof(struct i596_tbd) = %d\n",
1177 sizeof(struct i596_tbd));
1181 if (sizeof(struct i596_private) > 4096) {
1182 printk("82596: sizeof(struct i596_private) = %d\n",
1183 sizeof(struct i596_private));
1188 if (!dev->base_addr || !dev->irq)
1191 if (pdc_lan_station_id(eth_addr, dev->base_addr)) {
1192 for (i=0; i < 6; i++) {
1193 eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
1195 printk("82596.c: MAC of HP700 LAN read from EEPROM\n");
1198 dev->mem_start = (unsigned long) pci_alloc_consistent(fake_pci_dev,
1199 sizeof(struct i596_private), &dma_addr);
1200 if (!dev->mem_start) {
1201 printk("%s: Couldn't get consistent shared memory\n", dev->name);
1203 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1204 if (!dev->mem_start) {
1205 printk("%s: Couldn't get shared memory\n", dev->name);
1208 dma_addr = virt_to_bus(dev->mem_start);
1212 DEB(DEB_PROBE,printk("%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1214 for (i = 0; i < 6; i++)
1215 DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1217 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1219 DEB(DEB_PROBE,printk(version));
1221 /* The 82596-specific entries in the device structure. */
1222 dev->open = i596_open;
1223 dev->stop = i596_close;
1224 dev->hard_start_xmit = i596_start_xmit;
1225 dev->get_stats = i596_get_stats;
1226 dev->set_multicast_list = set_multicast_list;
1227 dev->tx_timeout = i596_tx_timeout;
1228 dev->watchdog_timeo = TX_TIMEOUT;
1230 dev->priv = (void *)(dev->mem_start);
1232 lp = (struct i596_private *) dev->priv;
1233 DEB(DEB_INIT,printk ("%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
1234 dev->name, (unsigned long)lp,
1235 sizeof(struct i596_private), (unsigned long)&lp->scb));
1236 memset(lp, 0, sizeof(struct i596_private));
1238 lp->scb.command = 0;
1239 lp->scb.cmd = I596_NULL;
1240 lp->scb.rfd = I596_NULL;
1241 lp->lock = SPIN_LOCK_UNLOCKED;
1242 lp->dma_addr = dma_addr;
1244 CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
1250 static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1252 struct net_device *dev = dev_id;
1253 struct i596_private *lp;
1254 unsigned short status, ack_cmd = 0;
1257 printk("i596_interrupt(): irq %d for unknown device.\n", irq);
1261 lp = (struct i596_private *) dev->priv;
1263 spin_lock (&lp->lock);
1265 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1266 status = lp->scb.status;
1268 DEB(DEB_INTS,printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1269 dev->name, irq, status));
1271 ack_cmd = status & 0xf000;
1274 DEB(DEB_ERRORS, printk("%s: interrupt with no events\n", dev->name));
1275 spin_unlock (&lp->lock);
1279 if ((status & 0x8000) || (status & 0x2000)) {
1280 struct i596_cmd *ptr;
1282 if ((status & 0x8000))
1283 DEB(DEB_INTS,printk("%s: i596 interrupt completed command.\n", dev->name));
1284 if ((status & 0x2000))
1285 DEB(DEB_INTS,printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1287 while (lp->cmd_head != NULL) {
1288 CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
1289 if (!(lp->cmd_head->status & STAT_C))
1294 DEB(DEB_STATUS,printk("cmd_head->status = %04x, ->command = %04x\n",
1295 lp->cmd_head->status, lp->cmd_head->command));
1296 lp->cmd_head = ptr->v_next;
1299 switch ((ptr->command) & 0x7) {
1302 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1303 struct sk_buff *skb = tx_cmd->skb;
1305 if ((ptr->status) & STAT_OK) {
1306 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1308 lp->stats.tx_errors++;
1309 if ((ptr->status) & 0x0020)
1310 lp->stats.collisions++;
1311 if (!((ptr->status) & 0x0040))
1312 lp->stats.tx_heartbeat_errors++;
1313 if ((ptr->status) & 0x0400)
1314 lp->stats.tx_carrier_errors++;
1315 if ((ptr->status) & 0x0800)
1316 lp->stats.collisions++;
1317 if ((ptr->status) & 0x1000)
1318 lp->stats.tx_aborted_errors++;
1320 pci_unmap_single(fake_pci_dev, tx_cmd->dma_addr, skb->len, PCI_DMA_TODEVICE);
1321 dev_kfree_skb_irq(skb);
1323 tx_cmd->cmd.command = 0; /* Mark free */
1328 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1330 if (status & 0x8000) {
1331 DEB(DEB_ANY,printk("%s: link ok.\n", dev->name));
1333 if (status & 0x4000)
1334 printk("%s: Transceiver problem.\n", dev->name);
1335 if (status & 0x2000)
1336 printk("%s: Termination problem.\n", dev->name);
1337 if (status & 0x1000)
1338 printk("%s: Short circuit.\n", dev->name);
1340 DEB(DEB_TDR,printk("%s: Time %d.\n", dev->name, status & 0x07ff));
1345 /* Zap command so set_multicast_list() knows it is free */
1350 ptr->b_next = I596_NULL;
1351 CHECK_WBACK(ptr, sizeof(struct i596_cmd));
1352 lp->last_cmd = jiffies;
1355 /* This mess is arranging that only the last of any outstanding
1356 * commands has the interrupt bit set. Should probably really
1357 * only add to the cmd queue when the CU is stopped.
1360 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1361 struct i596_cmd *prev = ptr;
1363 ptr->command &= 0x1fff;
1365 CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
1368 if ((lp->cmd_head != NULL))
1369 ack_cmd |= CUC_START;
1370 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
1371 CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
1373 if ((status & 0x1000) || (status & 0x4000)) {
1374 if ((status & 0x4000))
1375 DEB(DEB_INTS,printk("%s: i596 interrupt received a frame.\n", dev->name));
1377 /* Only RX_START if stopped - RGH 07-07-96 */
1378 if (status & 0x1000) {
1379 if (netif_running(dev)) {
1380 DEB(DEB_ERRORS,printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1381 ack_cmd |= RX_START;
1382 lp->stats.rx_errors++;
1383 lp->stats.rx_fifo_errors++;
1384 rebuild_rx_bufs(dev);
1388 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1389 lp->scb.command = ack_cmd;
1390 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1392 /* DANGER: I suspect that some kind of interrupt
1393 acknowledgement aside from acking the 82596 might be needed
1394 here... but it's running acceptably without */
1398 wait_cmd(dev,lp,100,"i596 interrupt, exit timeout");
1399 DEB(DEB_INTS,printk("%s: exiting interrupt.\n", dev->name));
1401 spin_unlock (&lp->lock);
1405 static int i596_close(struct net_device *dev)
1407 struct i596_private *lp = (struct i596_private *) dev->priv;
1408 unsigned long flags;
1410 netif_stop_queue(dev);
1412 DEB(DEB_INIT,printk("%s: Shutting down ethercard, status was %4.4x.\n",
1413 dev->name, lp->scb.status));
1415 spin_lock_irqsave(&lp->lock, flags);
1417 wait_cmd(dev,lp,100,"close1 timed out");
1418 lp->scb.command = CUC_ABORT | RX_ABORT;
1419 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1423 wait_cmd(dev,lp,100,"close2 timed out");
1424 spin_unlock_irqrestore(&lp->lock, flags);
1425 DEB(DEB_STRUCT,i596_display_data(dev));
1426 i596_cleanup_cmd(dev,lp);
1428 disable_irq(dev->irq);
1430 free_irq(dev->irq, dev);
1431 remove_rx_bufs(dev);
1438 static struct net_device_stats *
1439 i596_get_stats(struct net_device *dev)
1441 struct i596_private *lp = (struct i596_private *) dev->priv;
1447 * Set or clear the multicast filter for this adaptor.
1450 static void set_multicast_list(struct net_device *dev)
1452 struct i596_private *lp = (struct i596_private *) dev->priv;
1453 int config = 0, cnt;
1455 DEB(DEB_MULTI,printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n", dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF", dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1457 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1458 lp->cf_cmd.i596_config[8] |= 0x01;
1461 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1462 lp->cf_cmd.i596_config[8] &= ~0x01;
1465 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1466 lp->cf_cmd.i596_config[11] &= ~0x20;
1469 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1470 lp->cf_cmd.i596_config[11] |= 0x20;
1474 if (lp->cf_cmd.cmd.command)
1475 printk("%s: config change request already queued\n",
1478 lp->cf_cmd.cmd.command = CmdConfigure;
1479 CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
1480 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1484 cnt = dev->mc_count;
1485 if (cnt > MAX_MC_CNT)
1488 printk("%s: Only %d multicast addresses supported",
1492 if (dev->mc_count > 0) {
1493 struct dev_mc_list *dmi;
1498 cmd->cmd.command = CmdMulticastList;
1499 cmd->mc_cnt = dev->mc_count * 6;
1501 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1502 memcpy(cp, dmi->dmi_addr, 6);
1504 DEB(DEB_MULTI,printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1505 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1507 CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
1508 i596_add_cmd(dev, &cmd->cmd);
1512 MODULE_PARM(debug, "i");
1513 MODULE_PARM_DESC(debug, "lasi_82596 debug mask");
1514 static int debug = -1;
1516 static int num_drivers;
1517 static struct net_device *netdevs[MAX_DRIVERS];
1519 static int __devinit
1520 lan_init_chip(struct parisc_device *dev)
1522 struct net_device *netdevice;
1525 if (num_drivers >= MAX_DRIVERS) {
1526 /* max count of possible i82596 drivers reached */
1530 fake_pci_dev = ccio_get_fake(dev);
1533 printk(KERN_ERR __FILE__ ": IRQ not found for i82596 at 0x%lx\n", dev->hpa);
1537 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa, dev->irq);
1539 netdevice = alloc_etherdev(0);
1543 netdevice->base_addr = dev->hpa;
1544 netdevice->irq = dev->irq;
1545 netdevice->init = i82596_probe;
1547 retval = register_netdev(netdevice);
1549 printk(KERN_WARNING __FILE__ ": register_netdevice ret'd %d\n", retval);
1553 if (dev->id.sversion == 0x72) {
1554 ((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
1557 netdevs[num_drivers++] = netdevice;
1563 static struct parisc_device_id lan_tbl[] = {
1564 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
1565 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 },
1569 MODULE_DEVICE_TABLE(parisc, lan_tbl);
1571 static struct parisc_driver lan_driver = {
1574 probe: lan_init_chip,
1577 static int __devinit lasi_82596_init(void)
1581 return register_parisc_driver(&lan_driver);
1584 module_init(lasi_82596_init);
1586 static void __exit lasi_82596_exit(void)
1590 for (i=0; i<MAX_DRIVERS; i++) {
1591 struct i596_private *lp;
1592 struct net_device *netdevice;
1594 netdevice = netdevs[i];
1598 unregister_netdev(netdevice);
1600 lp = (struct i596_private *) netdevice->priv;
1602 pci_free_consistent(fake_pci_dev,
1603 sizeof(struct i596_private),
1604 (void *)netdevice->mem_start, lp->dma_addr);
1606 free_page(netdevice->mem_start);
1608 netdevice->priv = NULL;
1611 unregister_parisc_driver(&lan_driver);
1614 module_exit(lasi_82596_exit);