2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: reassembly.c,v 1.1.1.1 2005/04/11 02:51:13 jack Exp $
10 * Based on: net/ipv4/ip_fragment.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen Make it work with multiple hosts.
21 * More RFC compliance.
23 * Horst von Brand Add missing #include <linux/string.h>
24 * Alexey Kuznetsov SMP races, threading, cleanup.
26 #include <linux/config.h>
27 #include <linux/errno.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/sched.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/ipv6.h>
37 #include <linux/icmpv6.h>
43 #include <net/protocol.h>
44 #include <net/transp_v6.h>
45 #include <net/rawv6.h>
46 #include <net/ndisc.h>
47 #include <net/addrconf.h>
49 int sysctl_ip6frag_high_thresh = 256*1024;
50 int sysctl_ip6frag_low_thresh = 192*1024;
52 int sysctl_ip6frag_time = IPV6_FRAG_TIMEOUT;
56 struct inet6_skb_parm h;
60 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
64 * Equivalent of ipv4 struct ipq
69 struct frag_queue *next;
71 __u32 id; /* fragment id */
72 struct in6_addr saddr;
73 struct in6_addr daddr;
77 struct timer_list timer; /* expire timer */
78 struct sk_buff *fragments;
84 __u8 last_in; /* has first/last segment arrived? */
89 struct frag_queue **pprev;
94 #define IP6Q_HASHSZ 64
96 static struct frag_queue *ip6_frag_hash[IP6Q_HASHSZ];
97 static rwlock_t ip6_frag_lock = RW_LOCK_UNLOCKED;
98 int ip6_frag_nqueues = 0;
100 static __inline__ void __fq_unlink(struct frag_queue *fq)
103 fq->next->pprev = fq->pprev;
104 *fq->pprev = fq->next;
108 static __inline__ void fq_unlink(struct frag_queue *fq)
110 write_lock(&ip6_frag_lock);
112 write_unlock(&ip6_frag_lock);
115 static __inline__ unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
116 struct in6_addr *daddr)
118 unsigned int h = saddr->s6_addr32[3] ^ daddr->s6_addr32[3] ^ id;
122 return h & (IP6Q_HASHSZ - 1);
126 atomic_t ip6_frag_mem = ATOMIC_INIT(0);
128 /* Memory Tracking Functions. */
129 extern __inline__ void frag_kfree_skb(struct sk_buff *skb)
131 atomic_sub(skb->truesize, &ip6_frag_mem);
135 extern __inline__ void frag_free_queue(struct frag_queue *fq)
137 atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem);
141 extern __inline__ struct frag_queue *frag_alloc_queue(void)
143 struct frag_queue *fq = kmalloc(sizeof(struct frag_queue), GFP_ATOMIC);
147 atomic_add(sizeof(struct frag_queue), &ip6_frag_mem);
151 /* Destruction primitives. */
153 /* Complete destruction of fq. */
154 static void ip6_frag_destroy(struct frag_queue *fq)
158 BUG_TRAP(fq->last_in&COMPLETE);
159 BUG_TRAP(del_timer(&fq->timer) == 0);
161 /* Release all fragment data. */
164 struct sk_buff *xp = fp->next;
173 static __inline__ void fq_put(struct frag_queue *fq)
175 if (atomic_dec_and_test(&fq->refcnt))
176 ip6_frag_destroy(fq);
179 /* Kill fq entry. It is not destroyed immediately,
180 * because caller (and someone more) holds reference count.
182 static __inline__ void fq_kill(struct frag_queue *fq)
184 if (del_timer(&fq->timer))
185 atomic_dec(&fq->refcnt);
187 if (!(fq->last_in & COMPLETE)) {
189 atomic_dec(&fq->refcnt);
190 fq->last_in |= COMPLETE;
194 static void ip6_evictor(void)
199 if (atomic_read(&ip6_frag_mem) <= sysctl_ip6frag_low_thresh)
202 for (i = 0; i < IP6Q_HASHSZ; i++) {
203 struct frag_queue *fq;
204 if (ip6_frag_hash[i] == NULL)
207 read_lock(&ip6_frag_lock);
208 if ((fq = ip6_frag_hash[i]) != NULL) {
209 /* find the oldest queue for this hash bucket */
212 atomic_inc(&fq->refcnt);
213 read_unlock(&ip6_frag_lock);
215 spin_lock(&fq->lock);
216 if (!(fq->last_in&COMPLETE))
218 spin_unlock(&fq->lock);
221 IP6_INC_STATS_BH(Ip6ReasmFails);
225 read_unlock(&ip6_frag_lock);
230 static void ip6_frag_expire(unsigned long data)
232 struct frag_queue *fq = (struct frag_queue *) data;
234 spin_lock(&fq->lock);
236 if (fq->last_in & COMPLETE)
241 IP6_INC_STATS_BH(Ip6ReasmTimeout);
242 IP6_INC_STATS_BH(Ip6ReasmFails);
244 /* Send error only if the first segment arrived. */
245 if (fq->last_in&FIRST_IN && fq->fragments) {
246 struct net_device *dev = dev_get_by_index(fq->iif);
249 But use as source device on which LAST ARRIVED
250 segment was received. And do not use fq->dev
251 pointer directly, device might already disappeared.
254 fq->fragments->dev = dev;
255 icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0,
261 spin_unlock(&fq->lock);
265 /* Creation primitives. */
268 static struct frag_queue *ip6_frag_intern(unsigned int hash,
269 struct frag_queue *fq_in)
271 struct frag_queue *fq;
273 write_lock(&ip6_frag_lock);
275 for (fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
276 if (fq->id == fq_in->id &&
277 !ipv6_addr_cmp(&fq_in->saddr, &fq->saddr) &&
278 !ipv6_addr_cmp(&fq_in->daddr, &fq->daddr)) {
279 atomic_inc(&fq->refcnt);
280 write_unlock(&ip6_frag_lock);
281 fq_in->last_in |= COMPLETE;
289 if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time))
290 atomic_inc(&fq->refcnt);
292 atomic_inc(&fq->refcnt);
293 if((fq->next = ip6_frag_hash[hash]) != NULL)
294 fq->next->pprev = &fq->next;
295 ip6_frag_hash[hash] = fq;
296 fq->pprev = &ip6_frag_hash[hash];
298 write_unlock(&ip6_frag_lock);
303 static struct frag_queue *
304 ip6_frag_create(unsigned int hash, u32 id, struct in6_addr *src, struct in6_addr *dst)
306 struct frag_queue *fq;
308 if ((fq = frag_alloc_queue()) == NULL)
311 memset(fq, 0, sizeof(struct frag_queue));
314 ipv6_addr_copy(&fq->saddr, src);
315 ipv6_addr_copy(&fq->daddr, dst);
317 /* init_timer has been done by the memset */
318 fq->timer.function = ip6_frag_expire;
319 fq->timer.data = (long) fq;
320 fq->lock = SPIN_LOCK_UNLOCKED;
321 atomic_set(&fq->refcnt, 1);
323 return ip6_frag_intern(hash, fq);
326 IP6_INC_STATS_BH(Ip6ReasmFails);
330 static __inline__ struct frag_queue *
331 fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
333 struct frag_queue *fq;
334 unsigned int hash = ip6qhashfn(id, src, dst);
336 read_lock(&ip6_frag_lock);
337 for(fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
339 !ipv6_addr_cmp(src, &fq->saddr) &&
340 !ipv6_addr_cmp(dst, &fq->daddr)) {
341 atomic_inc(&fq->refcnt);
342 read_unlock(&ip6_frag_lock);
346 read_unlock(&ip6_frag_lock);
348 return ip6_frag_create(hash, id, src, dst);
352 static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
353 struct frag_hdr *fhdr, int nhoff)
355 struct sk_buff *prev, *next;
358 if (fq->last_in & COMPLETE)
361 offset = ntohs(fhdr->frag_off) & ~0x7;
362 end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
363 ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
365 if ((unsigned int)end >= 65536) {
366 icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw);
370 if (skb->ip_summed == CHECKSUM_HW)
371 skb->csum = csum_sub(skb->csum,
372 csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0));
374 /* Is this the final fragment? */
375 if (!(fhdr->frag_off & htons(0x0001))) {
376 /* If we already have some bits beyond end
377 * or have different end, the segment is corrupted.
380 ((fq->last_in & LAST_IN) && end != fq->len))
382 fq->last_in |= LAST_IN;
385 /* Check if the fragment is rounded to 8 bytes.
386 * Required by the RFC.
389 /* RFC2460 says always send parameter problem in
392 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
393 offsetof(struct ipv6hdr, payload_len));
397 /* Some bits beyond end -> corruption. */
398 if (fq->last_in & LAST_IN)
407 /* Point into the IP datagram 'data' part. */
408 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
410 if (end-offset < skb->len) {
411 if (pskb_trim(skb, end - offset))
413 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
414 skb->ip_summed = CHECKSUM_NONE;
417 /* Find out which fragments are in front and at the back of us
418 * in the chain of fragments so far. We must know where to put
419 * this fragment, right?
422 for(next = fq->fragments; next != NULL; next = next->next) {
423 if (FRAG6_CB(next)->offset >= offset)
428 /* We found where to put this one. Check for overlap with
429 * preceding fragment, and, if needed, align things so that
430 * any overlaps are eliminated.
433 int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
439 if (!pskb_pull(skb, i))
441 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
442 skb->ip_summed = CHECKSUM_NONE;
446 /* Look for overlap with succeeding segments.
447 * If we can merge fragments, do it.
449 while (next && FRAG6_CB(next)->offset < end) {
450 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
453 /* Eat head of the next overlapped fragment
454 * and leave the loop. The next ones cannot overlap.
456 if (!pskb_pull(next, i))
458 FRAG6_CB(next)->offset += i; /* next fragment */
460 if (next->ip_summed != CHECKSUM_UNNECESSARY)
461 next->ip_summed = CHECKSUM_NONE;
464 struct sk_buff *free_it = next;
466 /* Old fragmnet is completely overridden with
474 fq->fragments = next;
476 fq->meat -= free_it->len;
477 frag_kfree_skb(free_it);
481 FRAG6_CB(skb)->offset = offset;
483 /* Insert this fragment in the chain of fragments. */
491 fq->iif = skb->dev->ifindex;
493 fq->stamp = skb->stamp;
494 fq->meat += skb->len;
495 atomic_add(skb->truesize, &ip6_frag_mem);
497 /* The first fragment.
498 * nhoffset is obtained from the first fragment, of course.
501 fq->nhoffset = nhoff;
502 fq->last_in |= FIRST_IN;
511 * Check if this packet is complete.
512 * Returns NULL on failure by any reason, and pointer
513 * to current nexthdr field in reassembled frame.
515 * It is called with locked fq, and caller must check that
516 * queue is eligible for reassembly i.e. it is not COMPLETE,
517 * the last and the first frames arrived and all the bits are here.
519 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
520 struct net_device *dev)
522 struct sk_buff *fp, *head = fq->fragments;
523 int remove_fraghdr = 0;
529 BUG_TRAP(head != NULL);
530 BUG_TRAP(FRAG6_CB(head)->offset == 0);
532 /* Unfragmented part is taken from the first segment. */
533 payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len;
534 nhoff = head->h.raw - head->nh.raw;
536 if (payload_len > 65535) {
538 if (payload_len > 65535)
543 /* Head of list must not be cloned. */
544 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
547 /* If the first fragment is fragmented itself, we split
548 * it to two chunks: the first with data and paged part
549 * and the second, holding only fragments. */
550 if (skb_shinfo(head)->frag_list) {
551 struct sk_buff *clone;
554 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
556 clone->next = head->next;
558 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
559 skb_shinfo(head)->frag_list = NULL;
560 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
561 plen += skb_shinfo(head)->frags[i].size;
562 clone->len = clone->data_len = head->data_len - plen;
563 head->data_len -= clone->len;
564 head->len -= clone->len;
566 clone->ip_summed = head->ip_summed;
567 atomic_add(clone->truesize, &ip6_frag_mem);
570 /* Normally we do not remove frag header from datagram, but
571 * we have to do this and to relocate header, when payload
573 if (remove_fraghdr) {
574 nhoff = fq->nhoffset;
575 head->nh.raw[nhoff] = head->h.raw[0];
576 memmove(head->head+8, head->head, (head->data-head->head)-8);
580 ((struct frag_hdr*)head->h.raw)->frag_off = 0;
583 skb_shinfo(head)->frag_list = head->next;
584 head->h.raw = head->data;
585 skb_push(head, head->data - head->nh.raw);
586 atomic_sub(head->truesize, &ip6_frag_mem);
588 for (fp=head->next; fp; fp = fp->next) {
589 head->data_len += fp->len;
590 head->len += fp->len;
591 if (head->ip_summed != fp->ip_summed)
592 head->ip_summed = CHECKSUM_NONE;
593 else if (head->ip_summed == CHECKSUM_HW)
594 head->csum = csum_add(head->csum, fp->csum);
595 head->truesize += fp->truesize;
596 atomic_sub(fp->truesize, &ip6_frag_mem);
601 head->stamp = fq->stamp;
602 head->nh.ipv6h->payload_len = ntohs(payload_len);
606 /* Yes, and fold redundant checksum back. 8) */
607 if (head->ip_summed == CHECKSUM_HW)
608 head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
610 IP6_INC_STATS_BH(Ip6ReasmOKs);
611 fq->fragments = NULL;
616 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
620 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
622 IP6_INC_STATS_BH(Ip6ReasmFails);
626 int ipv6_reassembly(struct sk_buff **skbp, int nhoff)
628 struct sk_buff *skb = *skbp;
629 struct net_device *dev = skb->dev;
630 struct frag_hdr *fhdr;
631 struct frag_queue *fq;
636 IP6_INC_STATS_BH(Ip6ReasmReqds);
638 /* Jumbo payload inhibits frag. header */
639 if (hdr->payload_len==0) {
640 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
643 if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) {
644 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw);
649 fhdr = (struct frag_hdr *)skb->h.raw;
651 if (!(fhdr->frag_off & htons(0xFFF9))) {
652 /* It is not a fragmented frame */
653 skb->h.raw += sizeof(struct frag_hdr);
654 IP6_INC_STATS_BH(Ip6ReasmOKs);
656 return (u8*)fhdr - skb->nh.raw;
659 if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh)
662 if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr)) != NULL) {
665 spin_lock(&fq->lock);
667 ip6_frag_queue(fq, skb, fhdr, nhoff);
669 if (fq->last_in == (FIRST_IN|LAST_IN) &&
671 ret = ip6_frag_reasm(fq, skbp, dev);
673 spin_unlock(&fq->lock);
678 IP6_INC_STATS_BH(Ip6ReasmFails);