cleanup
[linux-2.4.21-pre4.git] / net / 8021q / vlan_dev.c
1 /*
2  * INET         802.1Q VLAN
3  *              Ethernet-type device handling.
4  *
5  * Authors:     Ben Greear <greearb@candelatech.com>
6  *              Please send support related email to: vlan@scry.wanfear.com
7  *              VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
8  * 
9  * Fixes:       Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
10  *                - reset skb->pkt_type on incoming packets when MAC was changed
11  *                - see that changed MAC is saddr for outgoing packets
12  *              Oct 20, 2001:  Ard van Breeman:
13  *                - Fix MC-list, finally.
14  *                - Flush MC-list on VLAN destroy.
15  *                
16  *
17  *              This program is free software; you can redistribute it and/or
18  *              modify it under the terms of the GNU General Public License
19  *              as published by the Free Software Foundation; either version
20  *              2 of the License, or (at your option) any later version.
21  */
22
23 #include <linux/module.h>
24 #include <linux/mm.h>
25 #include <linux/in.h>
26 #include <linux/init.h>
27 #include <asm/uaccess.h> /* for copy_from_user */
28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <net/datalink.h>
32 #include <net/p8022.h>
33 #include <net/arp.h>
34 #include <linux/brlock.h>
35
36 #include "vlan.h"
37 #include "vlanproc.h"
38 #include <linux/if_vlan.h>
39 #include <net/ip.h>
40
41 /*
42  *      Rebuild the Ethernet MAC header. This is called after an ARP
43  *      (or in future other address resolution) has completed on this
44  *      sk_buff. We now let ARP fill in the other fields.
45  *
46  *      This routine CANNOT use cached dst->neigh!
47  *      Really, it is used only when dst->neigh is wrong.
48  *
49  * TODO:  This needs a checkup, I'm ignorant here. --BLG
50  */
51 int vlan_dev_rebuild_header(struct sk_buff *skb)
52 {
53         struct net_device *dev = skb->dev;
54         struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
55
56         switch (veth->h_vlan_encapsulated_proto) {
57 #ifdef CONFIG_INET
58         case __constant_htons(ETH_P_IP):
59
60                 /* TODO:  Confirm this will work with VLAN headers... */
61                 return arp_find(veth->h_dest, skb);
62 #endif  
63         default:
64                 printk(VLAN_DBG
65                        "%s: unable to resolve type %X addresses.\n", 
66                        dev->name, (int)veth->h_vlan_encapsulated_proto);
67          
68                 memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
69                 break;
70         };
71
72         return 0;
73 }
74
75 static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
76 {
77         if (VLAN_DEV_INFO(skb->dev)->flags & 1) {
78                 skb = skb_share_check(skb, GFP_ATOMIC);
79                 if (skb) {
80                         /* Lifted from Gleb's VLAN code... */
81                         memmove(skb->data - ETH_HLEN,
82                                 skb->data - VLAN_ETH_HLEN, 12);
83                         skb->mac.raw += VLAN_HLEN;
84                 }
85         }
86
87         return skb;
88 }
89
90 /*
91  *      Determine the packet's protocol ID. The rule here is that we 
92  *      assume 802.3 if the type field is short enough to be a length.
93  *      This is normal practice and works for any 'now in use' protocol.
94  *
95  *  Also, at this point we assume that we ARE dealing exclusively with
96  *  VLAN packets, or packets that should be made into VLAN packets based
97  *  on a default VLAN ID.
98  *
99  *  NOTE:  Should be similar to ethernet/eth.c.
100  *
101  *  SANITY NOTE:  This method is called when a packet is moving up the stack
102  *                towards userland.  To get here, it would have already passed
103  *                through the ethernet/eth.c eth_type_trans() method.
104  *  SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
105  *                 stored UNALIGNED in the memory.  RISC systems don't like
106  *                 such cases very much...
107  *  SANITY NOTE 2a:  According to Dave Miller & Alexey, it will always be aligned,
108  *                 so there doesn't need to be any of the unaligned stuff.  It has
109  *                 been commented out now...  --Ben
110  *
111  */
112 int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
113                   struct packet_type* ptype)
114 {
115         unsigned char *rawp = NULL;
116         struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data);
117         unsigned short vid;
118         struct net_device_stats *stats;
119         unsigned short vlan_TCI;
120         unsigned short proto;
121
122         /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */
123         vlan_TCI = ntohs(vhdr->h_vlan_TCI);
124
125         vid = (vlan_TCI & VLAN_VID_MASK);
126
127 #ifdef VLAN_DEBUG
128         printk(VLAN_DBG "%s: skb: %p vlan_id: %hx\n",
129                 __FUNCTION__, skb, vid);
130 #endif
131
132         /* Ok, we will find the correct VLAN device, strip the header,
133          * and then go on as usual.
134          */
135
136         /* We have 12 bits of vlan ID.
137          *
138          * We must not drop the vlan_group_lock until we hold a
139          * reference to the device (netif_rx does that) or we
140          * fail.
141          */
142
143         spin_lock_bh(&vlan_group_lock);
144         skb->dev = __find_vlan_dev(dev, vid);
145         if (!skb->dev) {
146                 spin_unlock_bh(&vlan_group_lock);
147
148 #ifdef VLAN_DEBUG
149                 printk(VLAN_DBG "%s: ERROR: No net_device for VID: %i on dev: %s [%i]\n",
150                         __FUNCTION__, (unsigned int)(vid), dev->name, dev->ifindex);
151 #endif
152                 kfree_skb(skb);
153                 return -1;
154         }
155
156         skb->dev->last_rx = jiffies;
157
158         /* Bump the rx counters for the VLAN device. */
159         stats = vlan_dev_get_stats(skb->dev);
160         stats->rx_packets++;
161         stats->rx_bytes += skb->len;
162
163         skb_pull(skb, VLAN_HLEN); /* take off the VLAN header (4 bytes currently) */
164
165         /* Ok, lets check to make sure the device (dev) we
166          * came in on is what this VLAN is attached to.
167          */
168
169         if (dev != VLAN_DEV_INFO(skb->dev)->real_dev) {
170                 spin_unlock_bh(&vlan_group_lock);
171
172 #ifdef VLAN_DEBUG
173                 printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s  real_dev: %s, skb_dev: %s\n",
174                         __FUNCTION__ skb, dev->name, 
175                         VLAN_DEV_INFO(skb->dev)->real_dev->name, 
176                         skb->dev->name);
177 #endif
178                 kfree_skb(skb);
179                 stats->rx_errors++;
180                 return -1;
181         }
182
183         /*
184          * Deal with ingress priority mapping.
185          */
186         skb->priority = vlan_get_ingress_priority(skb->dev, ntohs(vhdr->h_vlan_TCI));
187
188 #ifdef VLAN_DEBUG
189         printk(VLAN_DBG "%s: priority: %lu  for TCI: %hu (hbo)\n",
190                 __FUNCTION__, (unsigned long)(skb->priority), 
191                 ntohs(vhdr->h_vlan_TCI));
192 #endif
193
194         /* The ethernet driver already did the pkt_type calculations
195          * for us...
196          */
197         switch (skb->pkt_type) {
198         case PACKET_BROADCAST: /* Yeah, stats collect these together.. */
199                 // stats->broadcast ++; // no such counter :-(
200                 break;
201
202         case PACKET_MULTICAST:
203                 stats->multicast++;
204                 break;
205
206         case PACKET_OTHERHOST: 
207                 /* Our lower layer thinks this is not local, let's make sure.
208                  * This allows the VLAN to have a different MAC than the underlying
209                  * device, and still route correctly.
210                  */
211                 if (memcmp(skb->mac.ethernet->h_dest, skb->dev->dev_addr, ETH_ALEN) == 0) {
212                         /* It is for our (changed) MAC-address! */
213                         skb->pkt_type = PACKET_HOST;
214                 }
215                 break;
216         default:
217                 break;
218         };
219
220         /*  Was a VLAN packet, grab the encapsulated protocol, which the layer
221          * three protocols care about.
222          */
223         /* proto = get_unaligned(&vhdr->h_vlan_encapsulated_proto); */
224         proto = vhdr->h_vlan_encapsulated_proto;
225
226         skb->protocol = proto;
227         if (ntohs(proto) >= 1536) {
228                 /* place it back on the queue to be handled by
229                  * true layer 3 protocols.
230                  */
231
232                 /* See if we are configured to re-write the VLAN header
233                  * to make it look like ethernet...
234                  */
235                 skb = vlan_check_reorder_header(skb);
236
237                 /* Can be null if skb-clone fails when re-ordering */
238                 if (skb) {
239                         netif_rx(skb);
240                 } else {
241                         /* TODO:  Add a more specific counter here. */
242                         stats->rx_errors++;
243                 }
244                 spin_unlock_bh(&vlan_group_lock);
245                 return 0;
246         }
247
248         rawp = skb->data;
249
250         /*
251          * This is a magic hack to spot IPX packets. Older Novell breaks
252          * the protocol design and runs IPX over 802.3 without an 802.2 LLC
253          * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
254          * won't work for fault tolerant netware but does for the rest.
255          */
256         if (*(unsigned short *)rawp == 0xFFFF) {
257                 skb->protocol = __constant_htons(ETH_P_802_3);
258                 /* place it back on the queue to be handled by true layer 3 protocols.
259                  */
260
261                 /* See if we are configured to re-write the VLAN header
262                  * to make it look like ethernet...
263                  */
264                 skb = vlan_check_reorder_header(skb);
265
266                 /* Can be null if skb-clone fails when re-ordering */
267                 if (skb) {
268                         netif_rx(skb);
269                 } else {
270                         /* TODO:  Add a more specific counter here. */
271                         stats->rx_errors++;
272                 }
273                 spin_unlock_bh(&vlan_group_lock);
274                 return 0;
275         }
276
277         /*
278          *      Real 802.2 LLC
279          */
280         skb->protocol = __constant_htons(ETH_P_802_2);
281         /* place it back on the queue to be handled by upper layer protocols.
282          */
283
284         /* See if we are configured to re-write the VLAN header
285          * to make it look like ethernet...
286          */
287         skb = vlan_check_reorder_header(skb);
288
289         /* Can be null if skb-clone fails when re-ordering */
290         if (skb) {
291                 netif_rx(skb);
292         } else {
293                 /* TODO:  Add a more specific counter here. */
294                 stats->rx_errors++;
295         }
296         spin_unlock_bh(&vlan_group_lock);
297         return 0;
298 }
299
300 static inline unsigned short vlan_dev_get_egress_qos_mask(struct net_device* dev,
301                                                           struct sk_buff* skb)
302 {
303         struct vlan_priority_tci_mapping *mp =
304                 VLAN_DEV_INFO(dev)->egress_priority_map[(skb->priority & 0xF)];
305
306         while (mp) {
307                 if (mp->priority == skb->priority) {
308                         return mp->vlan_qos; /* This should already be shifted to mask
309                                               * correctly with the VLAN's TCI
310                                               */
311                 }
312                 mp = mp->next;
313         }
314         return 0;
315 }
316
317 /*
318  *      Create the VLAN header for an arbitrary protocol layer 
319  *
320  *      saddr=NULL      means use device source address
321  *      daddr=NULL      means leave destination address (eg unresolved arp)
322  *
323  *  This is called when the SKB is moving down the stack towards the
324  *  physical devices.
325  */
326 int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
327                          unsigned short type, void *daddr, void *saddr,
328                          unsigned len)
329 {
330         struct vlan_hdr *vhdr;
331         unsigned short veth_TCI = 0;
332         int rc = 0;
333         int build_vlan_header = 0;
334         struct net_device *vdev = dev; /* save this for the bottom of the method */
335
336 #ifdef VLAN_DEBUG
337         printk(VLAN_DBG "%s: skb: %p type: %hx len: %x vlan_id: %hx, daddr: %p\n",
338                 __FUNCTION__, skb, type, len, VLAN_DEV_INFO(dev)->vlan_id, daddr);
339 #endif
340
341         /* build vlan header only if re_order_header flag is NOT set.  This
342          * fixes some programs that get confused when they see a VLAN device
343          * sending a frame that is VLAN encoded (the consensus is that the VLAN
344          * device should look completely like an Ethernet device when the
345          * REORDER_HEADER flag is set)  The drawback to this is some extra 
346          * header shuffling in the hard_start_xmit.  Users can turn off this
347          * REORDER behaviour with the vconfig tool.
348          */
349         build_vlan_header = ((VLAN_DEV_INFO(dev)->flags & 1) == 0);
350
351         if (build_vlan_header) {
352                 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
353
354                 /* build the four bytes that make this a VLAN header. */
355
356                 /* Now, construct the second two bytes. This field looks something
357                  * like:
358                  * usr_priority: 3 bits  (high bits)
359                  * CFI           1 bit
360                  * VLAN ID       12 bits (low bits)
361                  *
362                  */
363                 veth_TCI = VLAN_DEV_INFO(dev)->vlan_id;
364                 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
365
366                 vhdr->h_vlan_TCI = htons(veth_TCI);
367
368                 /*
369                  *  Set the protocol type.
370                  *  For a packet of type ETH_P_802_3 we put the length in here instead.
371                  *  It is up to the 802.2 layer to carry protocol information.
372                  */
373
374                 if (type != ETH_P_802_3) {
375                         vhdr->h_vlan_encapsulated_proto = htons(type);
376                 } else {
377                         vhdr->h_vlan_encapsulated_proto = htons(len);
378                 }
379         }
380
381         /* Before delegating work to the lower layer, enter our MAC-address */
382         if (saddr == NULL)
383                 saddr = dev->dev_addr;
384
385         dev = VLAN_DEV_INFO(dev)->real_dev;
386
387         /* MPLS can send us skbuffs w/out enough space.  This check will grow the
388          * skb if it doesn't have enough headroom.  Not a beautiful solution, so
389          * I'll tick a counter so that users can know it's happening...  If they
390          * care...
391          */
392
393         /* NOTE:  This may still break if the underlying device is not the final
394          * device (and thus there are more headers to add...)  It should work for
395          * good-ole-ethernet though.
396          */
397         if (skb_headroom(skb) < dev->hard_header_len) {
398                 struct sk_buff *sk_tmp = skb;
399                 skb = skb_realloc_headroom(sk_tmp, dev->hard_header_len);
400                 kfree_skb(sk_tmp);
401                 if (skb == NULL) {
402                         struct net_device_stats *stats = vlan_dev_get_stats(vdev);
403                         stats->tx_dropped++;
404                         return -ENOMEM;
405                 }
406                 VLAN_DEV_INFO(vdev)->cnt_inc_headroom_on_tx++;
407 #ifdef VLAN_DEBUG
408                 printk(VLAN_DBG "%s: %s: had to grow skb.\n", __FUNCTION__, vdev->name);
409 #endif
410         }
411
412         if (build_vlan_header) {
413                 /* Now make the underlying real hard header */
414                 rc = dev->hard_header(skb, dev, ETH_P_8021Q, daddr, saddr, len + VLAN_HLEN);
415
416                 if (rc > 0) {
417                         rc += VLAN_HLEN;
418                 } else if (rc < 0) {
419                         rc -= VLAN_HLEN;
420                 }
421         } else {
422                 /* If here, then we'll just make a normal looking ethernet frame,
423                  * but, the hard_start_xmit method will insert the tag (it has to
424                  * be able to do this for bridged and other skbs that don't come
425                  * down the protocol stack in an orderly manner.
426                  */
427                 rc = dev->hard_header(skb, dev, type, daddr, saddr, len);
428         }
429
430         return rc;
431 }
432
433 int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
434 {
435         struct net_device_stats *stats = vlan_dev_get_stats(dev);
436         struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
437
438         /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
439          *
440          * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
441          * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
442          */
443
444         if (veth->h_vlan_proto != __constant_htons(ETH_P_8021Q)) {
445                 unsigned short veth_TCI;
446
447                 /* This is not a VLAN frame...but we can fix that! */
448                 VLAN_DEV_INFO(dev)->cnt_encap_on_xmit++;
449
450 #ifdef VLAN_DEBUG
451                 printk(VLAN_DBG "%s: proto to encap: 0x%hx (hbo)\n",
452                         __FUNCTION__, htons(veth->h_vlan_proto));
453 #endif
454
455                 if (skb_headroom(skb) < VLAN_HLEN) {
456                         struct sk_buff *sk_tmp = skb;
457                         skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN);
458                         kfree_skb(sk_tmp);
459                         if (skb == NULL) {
460                                 stats->tx_dropped++;
461                                 return 0;
462                         }
463                         VLAN_DEV_INFO(dev)->cnt_inc_headroom_on_tx++;
464                 } else {
465                         if (!(skb = skb_unshare(skb, GFP_ATOMIC))) {
466                                 printk(KERN_ERR "vlan: failed to unshare skbuff\n");
467                                 stats->tx_dropped++;
468                                 return 0;
469                         }
470                 }
471                 veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
472
473                 /* Move the mac addresses to the beginning of the new header. */
474                 memmove(skb->data, skb->data + VLAN_HLEN, 12);
475
476                 /* first, the ethernet type */
477                 /* put_unaligned(__constant_htons(ETH_P_8021Q), &veth->h_vlan_proto); */
478                 veth->h_vlan_proto = __constant_htons(ETH_P_8021Q);
479
480                 /* Now, construct the second two bytes. This field looks something
481                  * like:
482                  * usr_priority: 3 bits  (high bits)
483                  * CFI           1 bit
484                  * VLAN ID       12 bits (low bits)
485                  */
486                 veth_TCI = VLAN_DEV_INFO(dev)->vlan_id;
487                 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
488
489                 veth->h_vlan_TCI = htons(veth_TCI);
490         }
491
492         skb->dev = VLAN_DEV_INFO(dev)->real_dev;
493
494 #ifdef VLAN_DEBUG
495         printk(VLAN_DBG "%s: about to send skb: %p to dev: %s\n",
496                 __FUNCTION__, skb, skb->dev->name);
497         printk(VLAN_DBG "  %2hx.%2hx.%2hx.%2xh.%2hx.%2hx %2hx.%2hx.%2hx.%2hx.%2hx.%2hx %4hx %4hx %4hx\n",
498                veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], veth->h_dest[3], veth->h_dest[4], veth->h_dest[5],
499                veth->h_source[0], veth->h_source[1], veth->h_source[2], veth->h_source[3], veth->h_source[4], veth->h_source[5],
500                veth->h_vlan_proto, veth->h_vlan_TCI, veth->h_vlan_encapsulated_proto);
501 #endif
502
503         stats->tx_packets++; /* for statics only */
504         stats->tx_bytes += skb->len;
505
506         dev_queue_xmit(skb);
507
508         return 0;
509 }
510
511 int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
512 {
513         struct net_device_stats *stats = vlan_dev_get_stats(dev);
514         struct vlan_skb_tx_cookie *cookie;
515
516         stats->tx_packets++;
517         stats->tx_bytes += skb->len;
518
519         skb->dev = VLAN_DEV_INFO(dev)->real_dev;
520         cookie = VLAN_TX_SKB_CB(skb);
521         cookie->magic = VLAN_TX_COOKIE_MAGIC;
522         cookie->vlan_tag = (VLAN_DEV_INFO(dev)->vlan_id |
523                             vlan_dev_get_egress_qos_mask(dev, skb));
524
525         dev_queue_xmit(skb);
526
527         return 0;
528 }
529
530 int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
531 {
532         /* TODO: gotta make sure the underlying layer can handle it,
533          * maybe an IFF_VLAN_CAPABLE flag for devices?
534          */
535         if (VLAN_DEV_INFO(dev)->real_dev->mtu < new_mtu)
536                 return -ERANGE;
537
538         dev->mtu = new_mtu;
539
540         return new_mtu;
541 }
542
543 int vlan_dev_set_ingress_priority(char *dev_name, __u32 skb_prio, short vlan_prio)
544 {
545         struct net_device *dev = dev_get_by_name(dev_name);
546
547         if (dev) {
548                 if (dev->priv_flags & IFF_802_1Q_VLAN) {
549                         /* see if a priority mapping exists.. */
550                         VLAN_DEV_INFO(dev)->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
551                         dev_put(dev);
552                         return 0;
553                 }
554
555                 dev_put(dev);
556         }
557         return -EINVAL;
558 }
559
560 int vlan_dev_set_egress_priority(char *dev_name, __u32 skb_prio, short vlan_prio)
561 {
562         struct net_device *dev = dev_get_by_name(dev_name);
563         struct vlan_priority_tci_mapping *mp = NULL;
564         struct vlan_priority_tci_mapping *np;
565    
566         if (dev) {
567                 if (dev->priv_flags & IFF_802_1Q_VLAN) {
568                         /* See if a priority mapping exists.. */
569                         mp = VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF];
570                         while (mp) {
571                                 if (mp->priority == skb_prio) {
572                                         mp->vlan_qos = ((vlan_prio << 13) & 0xE000);
573                                         dev_put(dev);
574                                         return 0;
575                                 }
576                                 mp = mp->next;
577                         }
578
579                         /* Create a new mapping then. */
580                         mp = VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF];
581                         np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
582                         if (np) {
583                                 np->next = mp;
584                                 np->priority = skb_prio;
585                                 np->vlan_qos = ((vlan_prio << 13) & 0xE000);
586                                 VLAN_DEV_INFO(dev)->egress_priority_map[skb_prio & 0xF] = np;
587                                 dev_put(dev);
588                                 return 0;
589                         } else {
590                                 dev_put(dev);
591                                 return -ENOBUFS;
592                         }
593                 }
594                 dev_put(dev);
595         }
596         return -EINVAL;
597 }
598
599 /* Flags are defined in the vlan_dev_info class in include/linux/if_vlan.h file. */
600 int vlan_dev_set_vlan_flag(char *dev_name, __u32 flag, short flag_val)
601 {
602         struct net_device *dev = dev_get_by_name(dev_name);
603
604         if (dev) {
605                 if (dev->priv_flags & IFF_802_1Q_VLAN) {
606                         /* verify flag is supported */
607                         if (flag == 1) {
608                                 if (flag_val) {
609                                         VLAN_DEV_INFO(dev)->flags |= 1;
610                                 } else {
611                                         VLAN_DEV_INFO(dev)->flags &= ~1;
612                                 }
613                                 dev_put(dev);
614                                 return 0;
615                         } else {
616                                 printk(KERN_ERR  "%s: flag %i is not valid.\n",
617                                         __FUNCTION__, (int)(flag));
618                                 dev_put(dev);
619                                 return -EINVAL;
620                         }
621                 } else {
622                         printk(KERN_ERR 
623                                "%s: %s is not a vlan device, priv_flags: %hX.\n",
624                                __FUNCTION__, dev->name, dev->priv_flags);
625                         dev_put(dev);
626                 }
627         } else {
628                 printk(KERN_ERR  "%s: Could not find device: %s\n", 
629                         __FUNCTION__, dev_name);
630         }
631
632         return -EINVAL;
633 }
634
635 int vlan_dev_set_mac_address(struct net_device *dev, void *addr_struct_p)
636 {
637         struct sockaddr *addr = (struct sockaddr *)(addr_struct_p);
638         int i;
639
640         if (netif_running(dev))
641                 return -EBUSY;
642
643         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
644
645         printk("%s: Setting MAC address to ", dev->name);
646         for (i = 0; i < 6; i++)
647                 printk(" %2.2x", dev->dev_addr[i]);
648         printk(".\n");
649
650         if (memcmp(VLAN_DEV_INFO(dev)->real_dev->dev_addr,
651                    dev->dev_addr,
652                    dev->addr_len) != 0) {
653                 if (!(VLAN_DEV_INFO(dev)->real_dev->flags & IFF_PROMISC)) {
654                         int flgs = VLAN_DEV_INFO(dev)->real_dev->flags;
655
656                         /* Increment our in-use promiscuity counter */
657                         dev_set_promiscuity(VLAN_DEV_INFO(dev)->real_dev, 1);
658
659                         /* Make PROMISC visible to the user. */
660                         flgs |= IFF_PROMISC;
661                         printk("VLAN (%s):  Setting underlying device (%s) to promiscious mode.\n",
662                                dev->name, VLAN_DEV_INFO(dev)->real_dev->name);
663                         dev_change_flags(VLAN_DEV_INFO(dev)->real_dev, flgs);
664                 }
665         } else {
666                 printk("VLAN (%s):  Underlying device (%s) has same MAC, not checking promiscious mode.\n",
667                        dev->name, VLAN_DEV_INFO(dev)->real_dev->name);
668         }
669
670         return 0;
671 }
672
673 static inline int vlan_dmi_equals(struct dev_mc_list *dmi1,
674                                   struct dev_mc_list *dmi2)
675 {
676         return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) &&
677                 (memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0));
678 }
679
680 /** dmi is a single entry into a dev_mc_list, a single node.  mc_list is
681  *  an entire list, and we'll iterate through it.
682  */
683 static int vlan_should_add_mc(struct dev_mc_list *dmi, struct dev_mc_list *mc_list)
684 {
685         struct dev_mc_list *idmi;
686
687         for (idmi = mc_list; idmi != NULL; ) {
688                 if (vlan_dmi_equals(dmi, idmi)) {
689                         if (dmi->dmi_users > idmi->dmi_users)
690                                 return 1;
691                         else
692                                 return 0;
693                 } else {
694                         idmi = idmi->next;
695                 }
696         }
697
698         return 1;
699 }
700
701 static inline void vlan_destroy_mc_list(struct dev_mc_list *mc_list)
702 {
703         struct dev_mc_list *dmi = mc_list;
704         struct dev_mc_list *next;
705
706         while(dmi) {
707                 next = dmi->next;
708                 kfree(dmi);
709                 dmi = next;
710         }
711 }
712
713 static void vlan_copy_mc_list(struct dev_mc_list *mc_list, struct vlan_dev_info *vlan_info)
714 {
715         struct dev_mc_list *dmi, *new_dmi;
716
717         vlan_destroy_mc_list(vlan_info->old_mc_list);
718         vlan_info->old_mc_list = NULL;
719
720         for (dmi = mc_list; dmi != NULL; dmi = dmi->next) {
721                 new_dmi = kmalloc(sizeof(*new_dmi), GFP_ATOMIC);
722                 if (new_dmi == NULL) {
723                         printk(KERN_ERR "vlan: cannot allocate memory. "
724                                "Multicast may not work properly from now.\n");
725                         return;
726                 }
727
728                 /* Copy whole structure, then make new 'next' pointer */
729                 *new_dmi = *dmi;
730                 new_dmi->next = vlan_info->old_mc_list;
731                 vlan_info->old_mc_list = new_dmi;
732         }
733 }
734
735 static void vlan_flush_mc_list(struct net_device *dev)
736 {
737         struct dev_mc_list *dmi = dev->mc_list;
738
739         while (dmi) {
740                 dev_mc_delete(dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
741                 printk(KERN_DEBUG "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from vlan interface\n",
742                        dev->name,
743                        dmi->dmi_addr[0],
744                        dmi->dmi_addr[1],
745                        dmi->dmi_addr[2],
746                        dmi->dmi_addr[3],
747                        dmi->dmi_addr[4],
748                        dmi->dmi_addr[5]);
749                 dmi = dev->mc_list;
750         }
751
752         /* dev->mc_list is NULL by the time we get here. */
753         vlan_destroy_mc_list(VLAN_DEV_INFO(dev)->old_mc_list);
754         VLAN_DEV_INFO(dev)->old_mc_list = NULL;
755 }
756
757 int vlan_dev_open(struct net_device *dev)
758 {
759         if (!(VLAN_DEV_INFO(dev)->real_dev->flags & IFF_UP))
760                 return -ENETDOWN;
761
762         return 0;
763 }
764
765 int vlan_dev_stop(struct net_device *dev)
766 {
767         vlan_flush_mc_list(dev);
768         return 0;
769 }
770
771 int vlan_dev_init(struct net_device *dev)
772 {
773         /* TODO:  figure this out, maybe do nothing?? */
774         return 0;
775 }
776
777 void vlan_dev_destruct(struct net_device *dev)
778 {
779         if (dev) {
780                 vlan_flush_mc_list(dev);
781                 if (dev->priv) {
782                         if (VLAN_DEV_INFO(dev)->dent)
783                                 BUG();
784
785                         kfree(dev->priv);
786                         dev->priv = NULL;
787                 }
788         }
789 }
790
791 /** Taken from Gleb + Lennert's VLAN code, and modified... */
792 void vlan_dev_set_multicast_list(struct net_device *vlan_dev)
793 {
794         struct dev_mc_list *dmi;
795         struct net_device *real_dev;
796         int inc;
797
798         if (vlan_dev && (vlan_dev->priv_flags & IFF_802_1Q_VLAN)) {
799                 /* Then it's a real vlan device, as far as we can tell.. */
800                 real_dev = VLAN_DEV_INFO(vlan_dev)->real_dev;
801
802                 /* compare the current promiscuity to the last promisc we had.. */
803                 inc = vlan_dev->promiscuity - VLAN_DEV_INFO(vlan_dev)->old_promiscuity;
804                 if (inc) {
805                         printk(KERN_INFO "%s: dev_set_promiscuity(master, %d)\n",
806                                vlan_dev->name, inc);
807                         dev_set_promiscuity(real_dev, inc); /* found in dev.c */
808                         VLAN_DEV_INFO(vlan_dev)->old_promiscuity = vlan_dev->promiscuity;
809                 }
810
811                 inc = vlan_dev->allmulti - VLAN_DEV_INFO(vlan_dev)->old_allmulti;
812                 if (inc) {
813                         printk(KERN_INFO "%s: dev_set_allmulti(master, %d)\n",
814                                vlan_dev->name, inc);
815                         dev_set_allmulti(real_dev, inc); /* dev.c */
816                         VLAN_DEV_INFO(vlan_dev)->old_allmulti = vlan_dev->allmulti;
817                 }
818
819                 /* looking for addresses to add to master's list */
820                 for (dmi = vlan_dev->mc_list; dmi != NULL; dmi = dmi->next) {
821                         if (vlan_should_add_mc(dmi, VLAN_DEV_INFO(vlan_dev)->old_mc_list)) {
822                                 dev_mc_add(real_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
823                                 printk(KERN_DEBUG "%s: add %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address to master interface\n",
824                                        vlan_dev->name,
825                                        dmi->dmi_addr[0],
826                                        dmi->dmi_addr[1],
827                                        dmi->dmi_addr[2],
828                                        dmi->dmi_addr[3],
829                                        dmi->dmi_addr[4],
830                                        dmi->dmi_addr[5]);
831                         }
832                 }
833
834                 /* looking for addresses to delete from master's list */
835                 for (dmi = VLAN_DEV_INFO(vlan_dev)->old_mc_list; dmi != NULL; dmi = dmi->next) {
836                         if (vlan_should_add_mc(dmi, vlan_dev->mc_list)) {
837                                 /* if we think we should add it to the new list, then we should really
838                                  * delete it from the real list on the underlying device.
839                                  */
840                                 dev_mc_delete(real_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
841                                 printk(KERN_DEBUG "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from master interface\n",
842                                        vlan_dev->name,
843                                        dmi->dmi_addr[0],
844                                        dmi->dmi_addr[1],
845                                        dmi->dmi_addr[2],
846                                        dmi->dmi_addr[3],
847                                        dmi->dmi_addr[4],
848                                        dmi->dmi_addr[5]);
849                         }
850                 }
851
852                 /* save multicast list */
853                 vlan_copy_mc_list(vlan_dev->mc_list, VLAN_DEV_INFO(vlan_dev));
854         }
855 }