make oldconfig will rebuild these...
[linux-2.4.21-pre4.git] / net / ipv6 / reassembly.c
1 /*
2  *      IPv6 fragment reassembly
3  *      Linux INET6 implementation 
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>     
7  *
8  *      $Id: reassembly.c,v 1.1.1.1 2005/04/11 02:51:13 jack Exp $
9  *
10  *      Based on: net/ipv4/ip_fragment.c
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 /* 
19  *      Fixes:  
20  *      Andi Kleen      Make it work with multiple hosts.
21  *                      More RFC compliance.
22  *
23  *      Horst von Brand Add missing #include <linux/string.h>
24  *      Alexey Kuznetsov        SMP races, threading, cleanup.
25  */
26 #include <linux/config.h>
27 #include <linux/errno.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/sched.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/ipv6.h>
37 #include <linux/icmpv6.h>
38
39 #include <net/sock.h>
40 #include <net/snmp.h>
41
42 #include <net/ipv6.h>
43 #include <net/protocol.h>
44 #include <net/transp_v6.h>
45 #include <net/rawv6.h>
46 #include <net/ndisc.h>
47 #include <net/addrconf.h>
48
49 int sysctl_ip6frag_high_thresh = 256*1024;
50 int sysctl_ip6frag_low_thresh = 192*1024;
51
52 int sysctl_ip6frag_time = IPV6_FRAG_TIMEOUT;
53
54 struct ip6frag_skb_cb
55 {
56         struct inet6_skb_parm   h;
57         int                     offset;
58 };
59
60 #define FRAG6_CB(skb)   ((struct ip6frag_skb_cb*)((skb)->cb))
61
62
63 /*
64  *      Equivalent of ipv4 struct ipq
65  */
66
67 struct frag_queue
68 {
69         struct frag_queue       *next;
70
71         __u32                   id;             /* fragment id          */
72         struct in6_addr         saddr;
73         struct in6_addr         daddr;
74
75         spinlock_t              lock;
76         atomic_t                refcnt;
77         struct timer_list       timer;          /* expire timer         */
78         struct sk_buff          *fragments;
79         int                     len;
80         int                     meat;
81         int                     iif;
82         struct timeval          stamp;
83         unsigned int            csum;
84         __u8                    last_in;        /* has first/last segment arrived? */
85 #define COMPLETE                4
86 #define FIRST_IN                2
87 #define LAST_IN                 1
88         __u16                   nhoffset;
89         struct frag_queue       **pprev;
90 };
91
92 /* Hash table. */
93
94 #define IP6Q_HASHSZ     64
95
96 static struct frag_queue *ip6_frag_hash[IP6Q_HASHSZ];
97 static rwlock_t ip6_frag_lock = RW_LOCK_UNLOCKED;
98 int ip6_frag_nqueues = 0;
99
100 static __inline__ void __fq_unlink(struct frag_queue *fq)
101 {
102         if(fq->next)
103                 fq->next->pprev = fq->pprev;
104         *fq->pprev = fq->next;
105         ip6_frag_nqueues--;
106 }
107
108 static __inline__ void fq_unlink(struct frag_queue *fq)
109 {
110         write_lock(&ip6_frag_lock);
111         __fq_unlink(fq);
112         write_unlock(&ip6_frag_lock);
113 }
114
115 static __inline__ unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
116                                           struct in6_addr *daddr)
117 {
118         unsigned int h = saddr->s6_addr32[3] ^ daddr->s6_addr32[3] ^ id;
119
120         h ^= (h>>16);
121         h ^= (h>>8);
122         return h & (IP6Q_HASHSZ - 1);
123 }
124
125
126 atomic_t ip6_frag_mem = ATOMIC_INIT(0);
127
128 /* Memory Tracking Functions. */
129 extern __inline__ void frag_kfree_skb(struct sk_buff *skb)
130 {
131         atomic_sub(skb->truesize, &ip6_frag_mem);
132         kfree_skb(skb);
133 }
134
135 extern __inline__ void frag_free_queue(struct frag_queue *fq)
136 {
137         atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem);
138         kfree(fq);
139 }
140
141 extern __inline__ struct frag_queue *frag_alloc_queue(void)
142 {
143         struct frag_queue *fq = kmalloc(sizeof(struct frag_queue), GFP_ATOMIC);
144
145         if(!fq)
146                 return NULL;
147         atomic_add(sizeof(struct frag_queue), &ip6_frag_mem);
148         return fq;
149 }
150
151 /* Destruction primitives. */
152
153 /* Complete destruction of fq. */
154 static void ip6_frag_destroy(struct frag_queue *fq)
155 {
156         struct sk_buff *fp;
157
158         BUG_TRAP(fq->last_in&COMPLETE);
159         BUG_TRAP(del_timer(&fq->timer) == 0);
160
161         /* Release all fragment data. */
162         fp = fq->fragments;
163         while (fp) {
164                 struct sk_buff *xp = fp->next;
165
166                 frag_kfree_skb(fp);
167                 fp = xp;
168         }
169
170         frag_free_queue(fq);
171 }
172
173 static __inline__ void fq_put(struct frag_queue *fq)
174 {
175         if (atomic_dec_and_test(&fq->refcnt))
176                 ip6_frag_destroy(fq);
177 }
178
179 /* Kill fq entry. It is not destroyed immediately,
180  * because caller (and someone more) holds reference count.
181  */
182 static __inline__ void fq_kill(struct frag_queue *fq)
183 {
184         if (del_timer(&fq->timer))
185                 atomic_dec(&fq->refcnt);
186
187         if (!(fq->last_in & COMPLETE)) {
188                 fq_unlink(fq);
189                 atomic_dec(&fq->refcnt);
190                 fq->last_in |= COMPLETE;
191         }
192 }
193
194 static void ip6_evictor(void)
195 {
196         int i, progress;
197
198         do {
199                 if (atomic_read(&ip6_frag_mem) <= sysctl_ip6frag_low_thresh)
200                         return;
201                 progress = 0;
202                 for (i = 0; i < IP6Q_HASHSZ; i++) {
203                         struct frag_queue *fq;
204                         if (ip6_frag_hash[i] == NULL)
205                                 continue;
206
207                         read_lock(&ip6_frag_lock);
208                         if ((fq = ip6_frag_hash[i]) != NULL) {
209                                 /* find the oldest queue for this hash bucket */
210                                 while (fq->next)
211                                         fq = fq->next;
212                                 atomic_inc(&fq->refcnt);
213                                 read_unlock(&ip6_frag_lock);
214
215                                 spin_lock(&fq->lock);
216                                 if (!(fq->last_in&COMPLETE))
217                                         fq_kill(fq);
218                                 spin_unlock(&fq->lock);
219
220                                 fq_put(fq);
221                                 IP6_INC_STATS_BH(Ip6ReasmFails);
222                                 progress = 1;
223                                 continue;
224                         }
225                         read_unlock(&ip6_frag_lock);
226                 }
227         } while (progress);
228 }
229
230 static void ip6_frag_expire(unsigned long data)
231 {
232         struct frag_queue *fq = (struct frag_queue *) data;
233
234         spin_lock(&fq->lock);
235
236         if (fq->last_in & COMPLETE)
237                 goto out;
238
239         fq_kill(fq);
240
241         IP6_INC_STATS_BH(Ip6ReasmTimeout);
242         IP6_INC_STATS_BH(Ip6ReasmFails);
243
244         /* Send error only if the first segment arrived. */
245         if (fq->last_in&FIRST_IN && fq->fragments) {
246                 struct net_device *dev = dev_get_by_index(fq->iif);
247
248                 /*
249                    But use as source device on which LAST ARRIVED
250                    segment was received. And do not use fq->dev
251                    pointer directly, device might already disappeared.
252                  */
253                 if (dev) {
254                         fq->fragments->dev = dev;
255                         icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0,
256                                     dev);
257                         dev_put(dev);
258                 }
259         }
260 out:
261         spin_unlock(&fq->lock);
262         fq_put(fq);
263 }
264
265 /* Creation primitives. */
266
267
268 static struct frag_queue *ip6_frag_intern(unsigned int hash,
269                                           struct frag_queue *fq_in)
270 {
271         struct frag_queue *fq;
272
273         write_lock(&ip6_frag_lock);
274 #ifdef CONFIG_SMP
275         for (fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
276                 if (fq->id == fq_in->id && 
277                     !ipv6_addr_cmp(&fq_in->saddr, &fq->saddr) &&
278                     !ipv6_addr_cmp(&fq_in->daddr, &fq->daddr)) {
279                         atomic_inc(&fq->refcnt);
280                         write_unlock(&ip6_frag_lock);
281                         fq_in->last_in |= COMPLETE;
282                         fq_put(fq_in);
283                         return fq;
284                 }
285         }
286 #endif
287         fq = fq_in;
288
289         if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time))
290                 atomic_inc(&fq->refcnt);
291
292         atomic_inc(&fq->refcnt);
293         if((fq->next = ip6_frag_hash[hash]) != NULL)
294                 fq->next->pprev = &fq->next;
295         ip6_frag_hash[hash] = fq;
296         fq->pprev = &ip6_frag_hash[hash];
297         ip6_frag_nqueues++;
298         write_unlock(&ip6_frag_lock);
299         return fq;
300 }
301
302
303 static struct frag_queue *
304 ip6_frag_create(unsigned int hash, u32 id, struct in6_addr *src, struct in6_addr *dst)
305 {
306         struct frag_queue *fq;
307
308         if ((fq = frag_alloc_queue()) == NULL)
309                 goto oom;
310
311         memset(fq, 0, sizeof(struct frag_queue));
312
313         fq->id = id;
314         ipv6_addr_copy(&fq->saddr, src);
315         ipv6_addr_copy(&fq->daddr, dst);
316
317         /* init_timer has been done by the memset */
318         fq->timer.function = ip6_frag_expire;
319         fq->timer.data = (long) fq;
320         fq->lock = SPIN_LOCK_UNLOCKED;
321         atomic_set(&fq->refcnt, 1);
322
323         return ip6_frag_intern(hash, fq);
324
325 oom:
326         IP6_INC_STATS_BH(Ip6ReasmFails);
327         return NULL;
328 }
329
330 static __inline__ struct frag_queue *
331 fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
332 {
333         struct frag_queue *fq;
334         unsigned int hash = ip6qhashfn(id, src, dst);
335
336         read_lock(&ip6_frag_lock);
337         for(fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
338                 if (fq->id == id && 
339                     !ipv6_addr_cmp(src, &fq->saddr) &&
340                     !ipv6_addr_cmp(dst, &fq->daddr)) {
341                         atomic_inc(&fq->refcnt);
342                         read_unlock(&ip6_frag_lock);
343                         return fq;
344                 }
345         }
346         read_unlock(&ip6_frag_lock);
347
348         return ip6_frag_create(hash, id, src, dst);
349 }
350
351
352 static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 
353                            struct frag_hdr *fhdr, int nhoff)
354 {
355         struct sk_buff *prev, *next;
356         int offset, end;
357
358         if (fq->last_in & COMPLETE)
359                 goto err;
360
361         offset = ntohs(fhdr->frag_off) & ~0x7;
362         end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
363                         ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
364
365         if ((unsigned int)end >= 65536) {
366                 icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
367                 return;
368         }
369
370         if (skb->ip_summed == CHECKSUM_HW)
371                 skb->csum = csum_sub(skb->csum,
372                                      csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
373
374         /* Is this the final fragment? */
375         if (!(fhdr->frag_off & htons(0x0001))) {
376                 /* If we already have some bits beyond end
377                  * or have different end, the segment is corrupted.
378                  */
379                 if (end < fq->len ||
380                     ((fq->last_in & LAST_IN) && end != fq->len))
381                         goto err;
382                 fq->last_in |= LAST_IN;
383                 fq->len = end;
384         } else {
385                 /* Check if the fragment is rounded to 8 bytes.
386                  * Required by the RFC.
387                  */
388                 if (end & 0x7) {
389                         /* RFC2460 says always send parameter problem in
390                          * this case. -DaveM
391                          */
392                         icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 
393                                           offsetof(struct ipv6hdr, payload_len));
394                         return;
395                 }
396                 if (end > fq->len) {
397                         /* Some bits beyond end -> corruption. */
398                         if (fq->last_in & LAST_IN)
399                                 goto err;
400                         fq->len = end;
401                 }
402         }
403
404         if (end == offset)
405                 goto err;
406
407         /* Point into the IP datagram 'data' part. */
408         if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
409                 goto err;
410         if (end-offset < skb->len) {
411                 if (pskb_trim(skb, end - offset))
412                         goto err;
413                 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
414                         skb->ip_summed = CHECKSUM_NONE;
415         }
416
417         /* Find out which fragments are in front and at the back of us
418          * in the chain of fragments so far.  We must know where to put
419          * this fragment, right?
420          */
421         prev = NULL;
422         for(next = fq->fragments; next != NULL; next = next->next) {
423                 if (FRAG6_CB(next)->offset >= offset)
424                         break;  /* bingo! */
425                 prev = next;
426         }
427
428         /* We found where to put this one.  Check for overlap with
429          * preceding fragment, and, if needed, align things so that
430          * any overlaps are eliminated.
431          */
432         if (prev) {
433                 int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
434
435                 if (i > 0) {
436                         offset += i;
437                         if (end <= offset)
438                                 goto err;
439                         if (!pskb_pull(skb, i))
440                                 goto err;
441                         if (skb->ip_summed != CHECKSUM_UNNECESSARY)
442                                 skb->ip_summed = CHECKSUM_NONE;
443                 }
444         }
445
446         /* Look for overlap with succeeding segments.
447          * If we can merge fragments, do it.
448          */
449         while (next && FRAG6_CB(next)->offset < end) {
450                 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
451
452                 if (i < next->len) {
453                         /* Eat head of the next overlapped fragment
454                          * and leave the loop. The next ones cannot overlap.
455                          */
456                         if (!pskb_pull(next, i))
457                                 goto err;
458                         FRAG6_CB(next)->offset += i;    /* next fragment */
459                         fq->meat -= i;
460                         if (next->ip_summed != CHECKSUM_UNNECESSARY)
461                                 next->ip_summed = CHECKSUM_NONE;
462                         break;
463                 } else {
464                         struct sk_buff *free_it = next;
465
466                         /* Old fragmnet is completely overridden with
467                          * new one drop it.
468                          */
469                         next = next->next;
470
471                         if (prev)
472                                 prev->next = next;
473                         else
474                                 fq->fragments = next;
475
476                         fq->meat -= free_it->len;
477                         frag_kfree_skb(free_it);
478                 }
479         }
480
481         FRAG6_CB(skb)->offset = offset;
482
483         /* Insert this fragment in the chain of fragments. */
484         skb->next = next;
485         if (prev)
486                 prev->next = skb;
487         else
488                 fq->fragments = skb;
489
490         if (skb->dev)
491                 fq->iif = skb->dev->ifindex;
492         skb->dev = NULL;
493         fq->stamp = skb->stamp;
494         fq->meat += skb->len;
495         atomic_add(skb->truesize, &ip6_frag_mem);
496
497         /* The first fragment.
498          * nhoffset is obtained from the first fragment, of course.
499          */
500         if (offset == 0) {
501                 fq->nhoffset = nhoff;
502                 fq->last_in |= FIRST_IN;
503         }
504         return;
505
506 err:
507         kfree_skb(skb);
508 }
509
510 /*
511  *      Check if this packet is complete.
512  *      Returns NULL on failure by any reason, and pointer
513  *      to current nexthdr field in reassembled frame.
514  *
515  *      It is called with locked fq, and caller must check that
516  *      queue is eligible for reassembly i.e. it is not COMPLETE,
517  *      the last and the first frames arrived and all the bits are here.
518  */
519 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
520                           struct net_device *dev)
521 {
522         struct sk_buff *fp, *head = fq->fragments;
523         int    remove_fraghdr = 0;
524         int    payload_len;
525         int    nhoff;
526
527         fq_kill(fq);
528
529         BUG_TRAP(head != NULL);
530         BUG_TRAP(FRAG6_CB(head)->offset == 0);
531
532         /* Unfragmented part is taken from the first segment. */
533         payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len;
534         nhoff = head->h.raw - head->nh.raw;
535
536         if (payload_len > 65535) {
537                 payload_len -= 8;
538                 if (payload_len > 65535)
539                         goto out_oversize;
540                 remove_fraghdr = 1;
541         }
542
543         /* Head of list must not be cloned. */
544         if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
545                 goto out_oom;
546
547         /* If the first fragment is fragmented itself, we split
548          * it to two chunks: the first with data and paged part
549          * and the second, holding only fragments. */
550         if (skb_shinfo(head)->frag_list) {
551                 struct sk_buff *clone;
552                 int i, plen = 0;
553
554                 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
555                         goto out_oom;
556                 clone->next = head->next;
557                 head->next = clone;
558                 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
559                 skb_shinfo(head)->frag_list = NULL;
560                 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
561                         plen += skb_shinfo(head)->frags[i].size;
562                 clone->len = clone->data_len = head->data_len - plen;
563                 head->data_len -= clone->len;
564                 head->len -= clone->len;
565                 clone->csum = 0;
566                 clone->ip_summed = head->ip_summed;
567                 atomic_add(clone->truesize, &ip6_frag_mem);
568         }
569
570         /* Normally we do not remove frag header from datagram, but
571          * we have to do this and to relocate header, when payload
572          * is > 65535-8. */
573         if (remove_fraghdr) {
574                 nhoff = fq->nhoffset;
575                 head->nh.raw[nhoff] = head->h.raw[0];
576                 memmove(head->head+8, head->head, (head->data-head->head)-8);
577                 head->mac.raw += 8;
578                 head->nh.raw += 8;
579         } else {
580                 ((struct frag_hdr*)head->h.raw)->frag_off = 0;
581         }
582
583         skb_shinfo(head)->frag_list = head->next;
584         head->h.raw = head->data;
585         skb_push(head, head->data - head->nh.raw);
586         atomic_sub(head->truesize, &ip6_frag_mem);
587
588         for (fp=head->next; fp; fp = fp->next) {
589                 head->data_len += fp->len;
590                 head->len += fp->len;
591                 if (head->ip_summed != fp->ip_summed)
592                         head->ip_summed = CHECKSUM_NONE;
593                 else if (head->ip_summed == CHECKSUM_HW)
594                         head->csum = csum_add(head->csum, fp->csum);
595                 head->truesize += fp->truesize;
596                 atomic_sub(fp->truesize, &ip6_frag_mem);
597         }
598
599         head->next = NULL;
600         head->dev = dev;
601         head->stamp = fq->stamp;
602         head->nh.ipv6h->payload_len = ntohs(payload_len);
603
604         *skb_in = head;
605
606         /* Yes, and fold redundant checksum back. 8) */
607         if (head->ip_summed == CHECKSUM_HW)
608                 head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
609
610         IP6_INC_STATS_BH(Ip6ReasmOKs);
611         fq->fragments = NULL;
612         return nhoff;
613
614 out_oversize:
615         if (net_ratelimit())
616                 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
617         goto out_fail;
618 out_oom:
619         if (net_ratelimit())
620                 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
621 out_fail:
622         IP6_INC_STATS_BH(Ip6ReasmFails);
623         return -1;
624 }
625
626 int ipv6_reassembly(struct sk_buff **skbp, int nhoff)
627 {
628         struct sk_buff *skb = *skbp; 
629         struct net_device *dev = skb->dev;
630         struct frag_hdr *fhdr;
631         struct frag_queue *fq;
632         struct ipv6hdr *hdr;
633
634         hdr = skb->nh.ipv6h;
635
636         IP6_INC_STATS_BH(Ip6ReasmReqds);
637
638         /* Jumbo payload inhibits frag. header */
639         if (hdr->payload_len==0) {
640                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
641                 return -1;
642         }
643         if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
644                 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
645                 return -1;
646         }
647
648         hdr = skb->nh.ipv6h;
649         fhdr = (struct frag_hdr *)skb->h.raw;
650
651         if (!(fhdr->frag_off & htons(0xFFF9))) {
652                 /* It is not a fragmented frame */
653                 skb->h.raw += sizeof(struct frag_hdr);
654                 IP6_INC_STATS_BH(Ip6ReasmOKs);
655
656                 return (u8*)fhdr - skb->nh.raw;
657         }
658
659         if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh)
660                 ip6_evictor();
661
662         if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr)) != NULL) {
663                 int ret = -1;
664
665                 spin_lock(&fq->lock);
666
667                 ip6_frag_queue(fq, skb, fhdr, nhoff);
668
669                 if (fq->last_in == (FIRST_IN|LAST_IN) &&
670                     fq->meat == fq->len)
671                         ret = ip6_frag_reasm(fq, skbp, dev);
672
673                 spin_unlock(&fq->lock);
674                 fq_put(fq);
675                 return ret;
676         }
677
678         IP6_INC_STATS_BH(Ip6ReasmFails);
679         kfree_skb(skb);
680         return -1;
681 }