1 /* 82596.c: A generic 82596 ethernet driver for linux. */
4 Written 1994 by Mark Evans.
5 This driver is for the Apricot 82596 bus-master interface
7 Modularised 12/94 Mark Evans
10 Modified to support the 82596 ethernet chips on 680x0 VME boards.
11 by Richard Hirst <richard@sleepie.demon.co.uk>
14 980825: Changed to receive directly in to sk_buffs which are
15 allocated at open() time. Eliminates copy on incoming frames
16 (small ones are still copied). Shared data now held in a
17 non-cached page, so we can run on 68060 in copyback mode.
20 * look at deferring rx frames rather than discarding (as per tulip)
21 * handle tx ring full as per tulip
22 * performace test to tune rx_copybreak
24 Most of my modifications relate to the braindead big-endian
25 implementation by Intel. When the i596 is operating in
26 'big-endian' mode, it thinks a 32 bit value of 0x12345678
27 should be stored as 0x56781234. This is a real pain, when
28 you have linked lists which are shared by the 680x0 and the
32 Written 1993 by Donald Becker.
33 Copyright 1993 United States Government as represented by the Director,
34 National Security Agency. This software may only be used and distributed
35 according to the terms of the GNU General Public License as modified by SRC,
36 incorporated herein by reference.
38 The author may be reached as becker@scyld.com, or C/O
39 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
43 #include <linux/config.h>
44 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/sched.h>
48 #include <linux/string.h>
49 #include <linux/ptrace.h>
50 #include <linux/errno.h>
51 #include <linux/ioport.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/delay.h>
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
58 #include <linux/init.h>
60 #include <asm/bitops.h>
63 #include <asm/pgtable.h>
64 #include <asm/pgalloc.h>
66 static char version[] __initdata =
67 "82596.c $Revision: 1.1.1.1 $\n";
72 #define DEB_INIT 0x0001
73 #define DEB_PROBE 0x0002
74 #define DEB_SERIOUS 0x0004
75 #define DEB_ERRORS 0x0008
76 #define DEB_MULTI 0x0010
77 #define DEB_TDR 0x0020
78 #define DEB_OPEN 0x0040
79 #define DEB_RESET 0x0080
80 #define DEB_ADDCMD 0x0100
81 #define DEB_STATUS 0x0200
82 #define DEB_STARTTX 0x0400
83 #define DEB_RXADDR 0x0800
84 #define DEB_TXADDR 0x1000
85 #define DEB_RXFRAME 0x2000
86 #define DEB_INTS 0x4000
87 #define DEB_STRUCT 0x8000
88 #define DEB_ANY 0xffff
91 #define DEB(x,y) if (i596_debug & (x)) y
94 #if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
95 #define ENABLE_MVME16x_NET
97 #if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
98 #define ENABLE_BVME6000_NET
100 #if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
101 #define ENABLE_APRICOT
104 #ifdef ENABLE_MVME16x_NET
105 #include <asm/mvme16xhw.h>
107 #ifdef ENABLE_BVME6000_NET
108 #include <asm/bvme6000hw.h>
112 * Define various macros for Channel Attention, word swapping etc., dependent
113 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
117 #define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
118 #define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
119 #define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
120 #define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
121 #define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
122 #define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
123 #define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
124 #define ISCP_BUSY 0x00010000
125 #define MACH_IS_APRICOT 0
127 #define WSWAPrfd(x) ((struct i596_rfd *)(x))
128 #define WSWAPrbd(x) ((struct i596_rbd *)(x))
129 #define WSWAPiscp(x) ((struct i596_iscp *)(x))
130 #define WSWAPscb(x) ((struct i596_scb *)(x))
131 #define WSWAPcmd(x) ((struct i596_cmd *)(x))
132 #define WSWAPtbd(x) ((struct i596_tbd *)(x))
133 #define WSWAPchar(x) ((char *)(x))
134 #define ISCP_BUSY 0x0001
135 #define MACH_IS_APRICOT 1
139 * The MPU_PORT command allows direct access to the 82596. With PORT access
140 * the following commands are available (p5-18). The 32-bit port command
141 * must be word-swapped with the most significant word written first.
142 * This only applies to VME boards.
144 #define PORT_RESET 0x00 /* reset 82596 */
145 #define PORT_SELFTEST 0x01 /* selftest */
146 #define PORT_ALTSCP 0x02 /* alternate SCB address */
147 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
149 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
151 MODULE_AUTHOR("Richard Hirst");
152 MODULE_DESCRIPTION("i82596 driver");
153 MODULE_LICENSE("GPL");
155 MODULE_PARM(i596_debug, "i");
156 MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
159 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
160 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
162 static int rx_copybreak = 100;
164 #define PKT_BUF_SZ 1536
165 #define MAX_MC_CNT 64
167 #define I596_TOTAL_SIZE 17
169 #define I596_NULL ((void *)0xffffffff)
171 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
172 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
173 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
175 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
178 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
179 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
182 #define STAT_C 0x8000 /* Set to 0 after execution */
183 #define STAT_B 0x4000 /* Command being executed */
184 #define STAT_OK 0x2000 /* Command executed ok */
185 #define STAT_A 0x1000 /* Command aborted */
187 #define CUC_START 0x0100
188 #define CUC_RESUME 0x0200
189 #define CUC_SUSPEND 0x0300
190 #define CUC_ABORT 0x0400
191 #define RX_START 0x0010
192 #define RX_RESUME 0x0020
193 #define RX_SUSPEND 0x0030
194 #define RX_ABORT 0x0040
200 unsigned short porthi;
201 unsigned short portlo;
206 #define SIZE_MASK 0x3fff
211 struct i596_tbd *next;
215 /* The command structure has two 'next' pointers; v_next is the address of
216 * the next command as seen by the CPU, b_next is the address of the next
217 * command as seen by the 82596. The b_next pointer, as used by the 82596
218 * always references the status field of the next command, rather than the
219 * v_next field, because the 82596 is unaware of v_next. It may seem more
220 * logical to put v_next at the end of the structure, but we cannot do that
221 * because the 82596 expects other fields to be there, depending on command
226 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
227 unsigned short status;
228 unsigned short command;
229 struct i596_cmd *b_next; /* Address from i596 viewpoint */
234 struct i596_tbd *tbd;
237 struct sk_buff *skb; /* So we can free it after tx */
242 unsigned short status;
249 char mc_addrs[MAX_MC_CNT*6];
259 char i596_config[16];
265 struct i596_rfd *b_next; /* Address from i596 viewpoint */
266 struct i596_rbd *rbd;
267 unsigned short count;
269 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
270 struct i596_rfd *v_prev;
274 unsigned short count;
275 unsigned short zero1;
276 struct i596_rbd *b_next;
277 unsigned char *b_data; /* Address from i596 viewpoint */
279 unsigned short zero2;
281 struct i596_rbd *v_next;
282 struct i596_rbd *b_addr; /* This rbd addr from i596 view */
283 unsigned char *v_data; /* Address from CPUs viewpoint */
286 #define TX_RING_SIZE 64
287 #define RX_RING_SIZE 16
290 unsigned short status;
291 unsigned short command;
292 struct i596_cmd *cmd;
293 struct i596_rfd *rfd;
294 unsigned long crc_err;
295 unsigned long align_err;
296 unsigned long resource_err;
297 unsigned long over_err;
298 unsigned long rcvdt_err;
299 unsigned long short_err;
301 unsigned short t_off;
306 struct i596_scb *scb;
310 unsigned long sysbus;
312 struct i596_iscp *iscp;
315 struct i596_private {
316 volatile struct i596_scp scp;
317 volatile struct i596_iscp iscp;
318 volatile struct i596_scb scb;
319 struct sa_cmd sa_cmd;
320 struct cf_cmd cf_cmd;
321 struct tdr_cmd tdr_cmd;
322 struct mc_cmd mc_cmd;
324 int last_restart __attribute__((aligned(4)));
325 struct i596_rfd *rfd_head;
326 struct i596_rbd *rbd_head;
327 struct i596_cmd *cmd_tail;
328 struct i596_cmd *cmd_head;
330 unsigned long last_cmd;
331 struct net_device_stats stats;
332 struct i596_rfd rfds[RX_RING_SIZE];
333 struct i596_rbd rbds[RX_RING_SIZE];
334 struct tx_cmd tx_cmds[TX_RING_SIZE];
335 struct i596_tbd tbds[TX_RING_SIZE];
340 static char init_setup[] =
342 0x8E, /* length, prefetch on */
343 0xC8, /* fifo to 8, monitor off */
345 0xc0, /* don't save bad frames */
347 0x80, /* don't save bad frames */
349 0x2E, /* No source address insertion, 8 byte preamble */
350 0x00, /* priority and backoff defaults */
351 0x60, /* interframe spacing */
352 0x00, /* slot time LSB */
353 0xf2, /* slot time and retries */
354 0x00, /* promiscuous mode */
355 0x00, /* collision detect */
356 0x40, /* minimum frame length */
359 0x7f /* *multi IA */ };
361 static int i596_open(struct net_device *dev);
362 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
363 static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
364 static int i596_close(struct net_device *dev);
365 static struct net_device_stats *i596_get_stats(struct net_device *dev);
366 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
367 static void i596_tx_timeout (struct net_device *dev);
368 static void print_eth(unsigned char *buf, char *str);
369 static void set_multicast_list(struct net_device *dev);
371 static int rx_ring_size = RX_RING_SIZE;
372 static int ticks_limit = 25;
373 static int max_cmd_backlog = TX_RING_SIZE-1;
376 static inline void CA(struct net_device *dev)
378 #ifdef ENABLE_MVME16x_NET
379 if (MACH_IS_MVME16x) {
380 ((struct i596_reg *) dev->base_addr)->ca = 1;
383 #ifdef ENABLE_BVME6000_NET
384 if (MACH_IS_BVME6000) {
387 i = *(volatile u32 *) (dev->base_addr);
390 #ifdef ENABLE_APRICOT
391 if (MACH_IS_APRICOT) {
392 outw(0, (short) (dev->base_addr) + 4);
398 static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
400 #ifdef ENABLE_MVME16x_NET
401 if (MACH_IS_MVME16x) {
402 struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
403 p->porthi = ((c) | (u32) (x)) & 0xffff;
404 p->portlo = ((c) | (u32) (x)) >> 16;
407 #ifdef ENABLE_BVME6000_NET
408 if (MACH_IS_BVME6000) {
409 u32 v = (u32) (c) | (u32) (x);
410 v = ((u32) (v) << 16) | ((u32) (v) >> 16);
411 *(volatile u32 *) dev->base_addr = v;
413 *(volatile u32 *) dev->base_addr = v;
419 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
421 while (--delcnt && lp->iscp.stat)
424 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
425 dev->name, str, lp->scb.status, lp->scb.command);
433 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
435 while (--delcnt && lp->scb.command)
438 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
439 dev->name, str, lp->scb.status, lp->scb.command);
447 static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
449 volatile struct i596_cmd *c = cmd;
451 while (--delcnt && c->command)
454 printk(KERN_ERR "%s: %s.\n", dev->name, str);
462 static void i596_display_data(struct net_device *dev)
464 struct i596_private *lp = (struct i596_private *) dev->priv;
465 struct i596_cmd *cmd;
466 struct i596_rfd *rfd;
467 struct i596_rbd *rbd;
469 printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
470 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
471 printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n",
472 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
473 printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x,"
474 " .cmd = %p, .rfd = %p\n",
475 &lp->scb, lp->scb.status, lp->scb.command,
476 lp->scb.cmd, lp->scb.rfd);
477 printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx,"
478 " over %lx, rcvdt %lx, short %lx\n",
479 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
480 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
482 while (cmd != I596_NULL) {
483 printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
484 cmd, cmd->status, cmd->command, cmd->b_next);
488 printk(KERN_ERR "rfd_head = %p\n", rfd);
490 printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
492 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
495 } while (rfd != lp->rfd_head);
497 printk(KERN_ERR "rbd_head = %p\n", rbd);
499 printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n",
500 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
502 } while (rbd != lp->rbd_head);
506 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
507 static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
509 struct net_device *dev = dev_id;
510 #ifdef ENABLE_MVME16x_NET
511 if (MACH_IS_MVME16x) {
512 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
518 #ifdef ENABLE_BVME6000_NET
519 if (MACH_IS_BVME6000) {
520 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
526 printk(KERN_ERR "%s: Error interrupt\n", dev->name);
527 i596_display_data(dev);
531 static inline void init_rx_bufs(struct net_device *dev)
533 struct i596_private *lp = (struct i596_private *)dev->priv;
535 struct i596_rfd *rfd;
536 struct i596_rbd *rbd;
538 /* First build the Receive Buffer Descriptor List */
540 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
541 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
544 panic("82596: alloc_skb() failed");
547 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
548 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
550 rbd->v_data = skb->tail;
551 rbd->b_data = WSWAPchar(virt_to_bus(skb->tail));
552 rbd->size = PKT_BUF_SZ;
554 cache_clear(virt_to_phys(skb->tail), PKT_BUF_SZ);
557 lp->rbd_head = lp->rbds;
558 rbd = lp->rbds + rx_ring_size - 1;
559 rbd->v_next = lp->rbds;
560 rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
562 /* Now build the Receive Frame Descriptor List */
564 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
565 rfd->rbd = I596_NULL;
568 rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
571 lp->rfd_head = lp->rfds;
572 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
574 rfd->rbd = lp->rbd_head;
575 rfd->v_prev = lp->rfds + rx_ring_size - 1;
576 rfd = lp->rfds + rx_ring_size - 1;
577 rfd->v_next = lp->rfds;
578 rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
579 rfd->cmd = CMD_EOL|CMD_FLEX;
582 static inline void remove_rx_bufs(struct net_device *dev)
584 struct i596_private *lp = (struct i596_private *)dev->priv;
585 struct i596_rbd *rbd;
588 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
589 if (rbd->skb == NULL)
591 dev_kfree_skb(rbd->skb);
596 static void rebuild_rx_bufs(struct net_device *dev)
598 struct i596_private *lp = (struct i596_private *) dev->priv;
601 /* Ensure rx frame/buffer descriptors are tidy */
603 for (i = 0; i < rx_ring_size; i++) {
604 lp->rfds[i].rbd = I596_NULL;
605 lp->rfds[i].cmd = CMD_FLEX;
607 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
608 lp->rfd_head = lp->rfds;
609 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
610 lp->rbd_head = lp->rbds;
611 lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
615 static int init_i596_mem(struct net_device *dev)
617 struct i596_private *lp = (struct i596_private *) dev->priv;
618 #if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET)
619 short ioaddr = dev->base_addr;
623 MPU_PORT(dev, PORT_RESET, 0);
625 udelay(100); /* Wait 100us - seems to help */
627 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
628 #ifdef ENABLE_MVME16x_NET
629 if (MACH_IS_MVME16x) {
630 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
632 /* Disable all ints for now */
635 /* Following disables snooping. Snooping is not required
636 * as we make appropriate use of non-cached pages for
637 * shared data, and cache_push/cache_clear.
642 #ifdef ENABLE_BVME6000_NET
643 if (MACH_IS_BVME6000) {
644 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
650 /* change the scp address */
652 MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus(&lp->scp));
654 #elif defined(ENABLE_APRICOT)
657 u32 scp = virt_to_bus(&lp->scp);
659 /* change the scp address */
662 outb(4, ioaddr + 0xf);
663 outw(scp | 2, ioaddr);
664 outw(scp >> 16, ioaddr);
668 lp->last_cmd = jiffies;
670 #ifdef ENABLE_MVME16x_NET
672 lp->scp.sysbus = 0x00000054;
674 #ifdef ENABLE_BVME6000_NET
675 if (MACH_IS_BVME6000)
676 lp->scp.sysbus = 0x0000004c;
678 #ifdef ENABLE_APRICOT
680 lp->scp.sysbus = 0x00440000;
683 lp->scp.iscp = WSWAPiscp(virt_to_bus(&(lp->iscp)));
684 lp->iscp.scb = WSWAPscb(virt_to_bus(&(lp->scb)));
685 lp->iscp.stat = ISCP_BUSY;
688 lp->cmd_head = lp->scb.cmd = I596_NULL;
690 #ifdef ENABLE_BVME6000_NET
691 if (MACH_IS_BVME6000) {
692 lp->scb.t_on = 7 * 25;
693 lp->scb.t_off = 1 * 25;
697 DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
699 #if defined(ENABLE_APRICOT)
700 (void) inb(ioaddr + 0x10);
701 outb(4, ioaddr + 0xf);
705 if (wait_istat(dev,lp,1000,"initialization timed out"))
707 DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
709 /* Ensure rx frame/buffer descriptors are tidy */
710 rebuild_rx_bufs(dev);
713 #ifdef ENABLE_MVME16x_NET
714 if (MACH_IS_MVME16x) {
715 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
717 /* Enable ints, etc. now */
718 pcc2[0x2a] = 0x55; /* Edge sensitive */
722 #ifdef ENABLE_BVME6000_NET
723 if (MACH_IS_BVME6000) {
724 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
731 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
732 memcpy(lp->cf_cmd.i596_config, init_setup, 14);
733 lp->cf_cmd.cmd.command = CmdConfigure;
734 i596_add_cmd(dev, &lp->cf_cmd.cmd);
736 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
737 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
738 lp->sa_cmd.cmd.command = CmdSASetup;
739 i596_add_cmd(dev, &lp->sa_cmd.cmd);
741 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
742 lp->tdr_cmd.cmd.command = CmdTDR;
743 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
745 spin_lock_irqsave (&lp->lock, flags);
747 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
748 spin_unlock_irqrestore (&lp->lock, flags);
751 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
752 lp->scb.command = RX_START;
755 spin_unlock_irqrestore (&lp->lock, flags);
757 if (wait_cmd(dev,lp,1000,"RX_START not processed"))
759 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
763 printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
764 MPU_PORT(dev, PORT_RESET, 0);
768 static inline int i596_rx(struct net_device *dev)
770 struct i596_private *lp = (struct i596_private *)dev->priv;
771 struct i596_rfd *rfd;
772 struct i596_rbd *rbd;
775 DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n",
776 lp->rfd_head, lp->rbd_head));
778 rfd = lp->rfd_head; /* Ref next frame to check */
780 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
781 if (rfd->rbd == I596_NULL)
783 else if (rfd->rbd == lp->rbd_head->b_addr)
786 printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
790 DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n",
791 rfd, rfd->rbd, rfd->stat));
793 if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
795 int pkt_len = rbd->count & 0x3fff;
796 struct sk_buff *skb = rbd->skb;
799 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
802 /* Check if the packet is long enough to just accept
803 * without copying to a properly sized skbuff.
806 if (pkt_len > rx_copybreak) {
807 struct sk_buff *newskb;
809 /* Get fresh skbuff to replace filled one. */
810 newskb = dev_alloc_skb(PKT_BUF_SZ);
811 if (newskb == NULL) {
812 skb = NULL; /* drop pkt */
815 /* Pass up the skb already on the Rx ring. */
816 skb_put(skb, pkt_len);
820 rbd->v_data = newskb->tail;
821 rbd->b_data = WSWAPchar(virt_to_bus(newskb->tail));
823 cache_clear(virt_to_phys(newskb->tail), PKT_BUF_SZ);
827 skb = dev_alloc_skb(pkt_len + 2);
830 /* XXX tulip.c can defer packets here!! */
831 printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
832 lp->stats.rx_dropped++;
837 /* 16 byte align the data fields */
839 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
841 skb->protocol=eth_type_trans(skb,dev);
844 cache_clear(virt_to_phys(rbd->skb->tail),
848 dev->last_rx = jiffies;
849 lp->stats.rx_packets++;
850 lp->stats.rx_bytes+=pkt_len;
854 DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n",
855 dev->name, rfd->stat));
856 lp->stats.rx_errors++;
857 if ((rfd->stat) & 0x0001)
858 lp->stats.collisions++;
859 if ((rfd->stat) & 0x0080)
860 lp->stats.rx_length_errors++;
861 if ((rfd->stat) & 0x0100)
862 lp->stats.rx_over_errors++;
863 if ((rfd->stat) & 0x0200)
864 lp->stats.rx_fifo_errors++;
865 if ((rfd->stat) & 0x0400)
866 lp->stats.rx_frame_errors++;
867 if ((rfd->stat) & 0x0800)
868 lp->stats.rx_crc_errors++;
869 if ((rfd->stat) & 0x1000)
870 lp->stats.rx_length_errors++;
873 /* Clear the buffer descriptor count and EOF + F flags */
875 if (rbd != I596_NULL && (rbd->count & 0x4000)) {
877 lp->rbd_head = rbd->v_next;
880 /* Tidy the frame descriptor, marking it as end of list */
882 rfd->rbd = I596_NULL;
884 rfd->cmd = CMD_EOL|CMD_FLEX;
887 /* Remove end-of-list from old end descriptor */
889 rfd->v_prev->cmd = CMD_FLEX;
891 /* Update record of next frame descriptor to process */
893 lp->scb.rfd = rfd->b_next;
894 lp->rfd_head = rfd->v_next;
898 DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames));
904 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
906 struct i596_cmd *ptr;
908 while (lp->cmd_head != I596_NULL) {
910 lp->cmd_head = ptr->v_next;
913 switch ((ptr->command) & 0x7) {
916 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
917 struct sk_buff *skb = tx_cmd->skb;
921 lp->stats.tx_errors++;
922 lp->stats.tx_aborted_errors++;
924 ptr->v_next = ptr->b_next = I596_NULL;
925 tx_cmd->cmd.command = 0; /* Mark as free */
929 ptr->v_next = ptr->b_next = I596_NULL;
933 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
934 lp->scb.cmd = I596_NULL;
937 static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr)
941 DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n"));
943 spin_lock_irqsave (&lp->lock, flags);
945 wait_cmd(dev,lp,100,"i596_reset timed out");
947 netif_stop_queue(dev);
949 lp->scb.command = CUC_ABORT | RX_ABORT;
952 /* wait for shutdown */
953 wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
954 spin_unlock_irqrestore (&lp->lock, flags);
956 i596_cleanup_cmd(dev,lp);
959 netif_start_queue(dev);
963 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
965 struct i596_private *lp = (struct i596_private *) dev->priv;
966 int ioaddr = dev->base_addr;
969 DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n"));
972 cmd->command |= (CMD_EOL | CMD_INTR);
973 cmd->v_next = cmd->b_next = I596_NULL;
975 spin_lock_irqsave (&lp->lock, flags);
977 if (lp->cmd_head != I596_NULL) {
978 lp->cmd_tail->v_next = cmd;
979 lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
982 wait_cmd(dev,lp,100,"i596_add_cmd timed out");
983 lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
984 lp->scb.command = CUC_START;
990 spin_unlock_irqrestore (&lp->lock, flags);
992 if (lp->cmd_backlog > max_cmd_backlog) {
993 unsigned long tickssofar = jiffies - lp->last_cmd;
995 if (tickssofar < ticks_limit)
998 printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
1000 i596_reset(dev, lp, ioaddr);
1004 static int i596_open(struct net_device *dev)
1008 DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
1010 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
1011 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
1014 #ifdef ENABLE_MVME16x_NET
1015 if (MACH_IS_MVME16x) {
1016 if (request_irq(0x56, &i596_error, 0, "i82596_error", dev))
1022 netif_start_queue(dev);
1026 /* Initialize the 82596 memory */
1027 if (init_i596_mem(dev)) {
1029 free_irq(dev->irq, dev);
1035 static void i596_tx_timeout (struct net_device *dev)
1037 struct i596_private *lp = (struct i596_private *) dev->priv;
1038 int ioaddr = dev->base_addr;
1040 /* Transmitter timeout, serious problems. */
1041 DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n",
1044 lp->stats.tx_errors++;
1046 /* Try to restart the adaptor */
1047 if (lp->last_restart == lp->stats.tx_packets) {
1048 DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n"));
1049 /* Shutdown and restart */
1050 i596_reset (dev, lp, ioaddr);
1052 /* Issue a channel attention signal */
1053 DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n"));
1054 lp->scb.command = CUC_START | RX_START;
1056 lp->last_restart = lp->stats.tx_packets;
1059 dev->trans_start = jiffies;
1060 netif_wake_queue (dev);
1064 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1066 struct i596_private *lp = (struct i596_private *) dev->priv;
1067 struct tx_cmd *tx_cmd;
1068 struct i596_tbd *tbd;
1069 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1070 dev->trans_start = jiffies;
1072 DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%x) called\n", dev->name,
1073 skb->len, (unsigned int)skb->data));
1075 netif_stop_queue(dev);
1077 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1078 tbd = lp->tbds + lp->next_tx_cmd;
1080 if (tx_cmd->cmd.command) {
1081 printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n",
1083 lp->stats.tx_dropped++;
1087 if (++lp->next_tx_cmd == TX_RING_SIZE)
1088 lp->next_tx_cmd = 0;
1089 tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
1090 tbd->next = I596_NULL;
1092 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1098 tbd->size = EOF | length;
1100 tbd->data = WSWAPchar(virt_to_bus(skb->data));
1103 cache_push(virt_to_phys(skb->data), length);
1105 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1106 i596_add_cmd(dev, &tx_cmd->cmd);
1108 lp->stats.tx_packets++;
1109 lp->stats.tx_bytes += length;
1112 netif_start_queue(dev);
1117 static void print_eth(unsigned char *add, char *str)
1121 printk(KERN_DEBUG "i596 0x%p, ", add);
1122 for (i = 0; i < 6; i++)
1123 printk(" %02X", add[i + 6]);
1125 for (i = 0; i < 6; i++)
1126 printk(" %02X", add[i]);
1127 printk(" %02X%02X, %s\n", add[12], add[13], str);
1130 int __init i82596_probe(struct net_device *dev)
1133 struct i596_private *lp;
1140 #ifdef ENABLE_MVME16x_NET
1141 if (MACH_IS_MVME16x) {
1142 if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
1143 printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n");
1146 memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */
1147 dev->base_addr = MVME_I596_BASE;
1148 dev->irq = (unsigned) MVME16x_IRQ_I596;
1151 #ifdef ENABLE_BVME6000_NET
1152 if (MACH_IS_BVME6000) {
1153 volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
1154 unsigned char msr = rtc[3];
1158 for (i = 0; i < 6; i++)
1159 eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */
1161 dev->base_addr = BVME_I596_BASE;
1162 dev->irq = (unsigned) BVME_IRQ_I596;
1165 #ifdef ENABLE_APRICOT
1170 /* this is easy the ethernet interface can only be at 0x300 */
1171 /* first check nothing is already registered here */
1173 if (!request_region(ioaddr, I596_TOTAL_SIZE, dev->name)) {
1174 printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr);
1178 for (i = 0; i < 8; i++) {
1179 eth_addr[i] = inb(ioaddr + 8 + i);
1180 checksum += eth_addr[i];
1183 /* checksum is a multiple of 0x100, got this wrong first time
1184 some machines have 0x100, some 0x200. The DOS driver doesn't
1185 even bother with the checksum.
1186 Some other boards trip the checksum.. but then appear as
1187 ether address 0. Trap these - AC */
1189 if ((checksum % 0x100) ||
1190 (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
1191 release_region(ioaddr, I596_TOTAL_SIZE);
1195 dev->base_addr = ioaddr;
1199 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1200 if (!dev->mem_start) {
1201 #ifdef ENABLE_APRICOT
1202 release_region(dev->base_addr, I596_TOTAL_SIZE);
1208 DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1210 for (i = 0; i < 6; i++)
1211 DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1213 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1215 DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
1217 /* The 82596-specific entries in the device structure. */
1218 dev->open = i596_open;
1219 dev->stop = i596_close;
1220 dev->hard_start_xmit = i596_start_xmit;
1221 dev->get_stats = i596_get_stats;
1222 dev->set_multicast_list = set_multicast_list;
1223 dev->tx_timeout = i596_tx_timeout;
1224 dev->watchdog_timeo = TX_TIMEOUT;
1226 dev->priv = (void *)(dev->mem_start);
1228 lp = (struct i596_private *) dev->priv;
1229 DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
1230 dev->name, (unsigned long)lp,
1231 sizeof(struct i596_private), (unsigned long)&lp->scb));
1232 memset((void *) lp, 0, sizeof(struct i596_private));
1235 cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
1236 cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
1237 kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1239 lp->scb.command = 0;
1240 lp->scb.cmd = I596_NULL;
1241 lp->scb.rfd = I596_NULL;
1242 lp->lock = SPIN_LOCK_UNLOCKED;
1247 static void i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1249 struct net_device *dev = dev_id;
1250 struct i596_private *lp;
1252 unsigned short status, ack_cmd = 0;
1254 #ifdef ENABLE_BVME6000_NET
1255 if (MACH_IS_BVME6000) {
1256 if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
1257 i596_error(irq, dev_id, regs);
1263 printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq);
1267 ioaddr = dev->base_addr;
1268 lp = (struct i596_private *) dev->priv;
1270 spin_lock (&lp->lock);
1272 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1273 status = lp->scb.status;
1275 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1276 dev->name, irq, status));
1278 ack_cmd = status & 0xf000;
1280 if ((status & 0x8000) || (status & 0x2000)) {
1281 struct i596_cmd *ptr;
1283 if ((status & 0x8000))
1284 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
1285 if ((status & 0x2000))
1286 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1288 while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
1291 DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n",
1292 lp->cmd_head->status, lp->cmd_head->command));
1293 lp->cmd_head = ptr->v_next;
1296 switch ((ptr->command) & 0x7) {
1299 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1300 struct sk_buff *skb = tx_cmd->skb;
1302 if ((ptr->status) & STAT_OK) {
1303 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1305 lp->stats.tx_errors++;
1306 if ((ptr->status) & 0x0020)
1307 lp->stats.collisions++;
1308 if (!((ptr->status) & 0x0040))
1309 lp->stats.tx_heartbeat_errors++;
1310 if ((ptr->status) & 0x0400)
1311 lp->stats.tx_carrier_errors++;
1312 if ((ptr->status) & 0x0800)
1313 lp->stats.collisions++;
1314 if ((ptr->status) & 0x1000)
1315 lp->stats.tx_aborted_errors++;
1318 dev_kfree_skb_irq(skb);
1320 tx_cmd->cmd.command = 0; /* Mark free */
1325 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1327 if (status & 0x8000) {
1328 DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
1330 if (status & 0x4000)
1331 printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
1332 if (status & 0x2000)
1333 printk(KERN_ERR "%s: Termination problem.\n", dev->name);
1334 if (status & 0x1000)
1335 printk(KERN_ERR "%s: Short circuit.\n", dev->name);
1337 DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
1342 case CmdMulticastList:
1343 /* Zap command so set_multicast_list() knows it is free */
1347 ptr->v_next = ptr->b_next = I596_NULL;
1348 lp->last_cmd = jiffies;
1352 while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
1353 ptr->command &= 0x1fff;
1357 if ((lp->cmd_head != I596_NULL))
1358 ack_cmd |= CUC_START;
1359 lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
1361 if ((status & 0x1000) || (status & 0x4000)) {
1362 if ((status & 0x4000))
1363 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
1365 /* Only RX_START if stopped - RGH 07-07-96 */
1366 if (status & 0x1000) {
1367 if (netif_running(dev)) {
1368 DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1369 ack_cmd |= RX_START;
1370 lp->stats.rx_errors++;
1371 lp->stats.rx_fifo_errors++;
1372 rebuild_rx_bufs(dev);
1376 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1377 lp->scb.command = ack_cmd;
1379 #ifdef ENABLE_MVME16x_NET
1380 if (MACH_IS_MVME16x) {
1381 /* Ack the interrupt */
1383 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1388 #ifdef ENABLE_BVME6000_NET
1389 if (MACH_IS_BVME6000) {
1390 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1396 #ifdef ENABLE_APRICOT
1397 (void) inb(ioaddr + 0x10);
1398 outb(4, ioaddr + 0xf);
1402 DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1404 spin_unlock (&lp->lock);
1408 static int i596_close(struct net_device *dev)
1410 struct i596_private *lp = (struct i596_private *) dev->priv;
1411 unsigned long flags;
1413 netif_stop_queue(dev);
1415 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1416 dev->name, lp->scb.status));
1421 wait_cmd(dev,lp,100,"close1 timed out");
1422 lp->scb.command = CUC_ABORT | RX_ABORT;
1425 wait_cmd(dev,lp,100,"close2 timed out");
1426 restore_flags(flags);
1427 DEB(DEB_STRUCT,i596_display_data(dev));
1428 i596_cleanup_cmd(dev,lp);
1430 #ifdef ENABLE_MVME16x_NET
1431 if (MACH_IS_MVME16x) {
1432 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1434 /* Disable all ints */
1437 pcc2[0x2b] = 0x40; /* Set snooping bits now! */
1440 #ifdef ENABLE_BVME6000_NET
1441 if (MACH_IS_BVME6000) {
1442 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1448 free_irq(dev->irq, dev);
1449 remove_rx_bufs(dev);
1455 static struct net_device_stats *
1456 i596_get_stats(struct net_device *dev)
1458 struct i596_private *lp = (struct i596_private *) dev->priv;
1464 * Set or clear the multicast filter for this adaptor.
1467 static void set_multicast_list(struct net_device *dev)
1469 struct i596_private *lp = (struct i596_private *) dev->priv;
1470 int config = 0, cnt;
1472 DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1473 dev->name, dev->mc_count,
1474 dev->flags & IFF_PROMISC ? "ON" : "OFF",
1475 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1477 if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
1480 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1481 lp->cf_cmd.i596_config[8] |= 0x01;
1484 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1485 lp->cf_cmd.i596_config[8] &= ~0x01;
1488 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1489 lp->cf_cmd.i596_config[11] &= ~0x20;
1492 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1493 lp->cf_cmd.i596_config[11] |= 0x20;
1497 lp->cf_cmd.cmd.command = CmdConfigure;
1498 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1501 cnt = dev->mc_count;
1502 if (cnt > MAX_MC_CNT)
1505 printk(KERN_ERR "%s: Only %d multicast addresses supported",
1509 if (dev->mc_count > 0) {
1510 struct dev_mc_list *dmi;
1514 if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
1517 cmd->cmd.command = CmdMulticastList;
1518 cmd->mc_cnt = dev->mc_count * 6;
1520 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1521 memcpy(cp, dmi->dmi_addr, 6);
1523 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1524 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1526 i596_add_cmd(dev, &cmd->cmd);
1531 static struct net_device dev_82596 = { init: i82596_probe };
1533 #ifdef ENABLE_APRICOT
1534 static int io = 0x300;
1535 static int irq = 10;
1536 MODULE_PARM(irq, "i");
1537 MODULE_PARM_DESC(irq, "Apricot IRQ number");
1540 MODULE_PARM(debug, "i");
1541 MODULE_PARM_DESC(debug, "i82596 debug mask");
1542 static int debug = -1;
1544 int init_module(void)
1546 #ifdef ENABLE_APRICOT
1547 dev_82596.base_addr = io;
1548 dev_82596.irq = irq;
1552 if (register_netdev(&dev_82596) != 0)
1557 void cleanup_module(void)
1559 unregister_netdev(&dev_82596);
1561 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1562 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1565 kernel_set_cachemode((void *)(dev_82596.mem_start), 4096,
1566 IOMAP_FULL_CACHING);
1568 free_page ((u32)(dev_82596.mem_start));
1569 dev_82596.priv = NULL;
1570 #ifdef ENABLE_APRICOT
1571 /* If we don't do this, we can't re-insmod it later. */
1572 release_region(dev_82596.base_addr, I596_TOTAL_SIZE);
1580 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 82596.c"