2 * Routines having to do with the 'struct sk_buff' memory handlers.
4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
10 * Alan Cox : Fixed the worst of the load
12 * Dave Platt : Interrupt stacking fix.
13 * Richard Kooijman : Timestamp fixes.
14 * Alan Cox : Changed buffer format.
15 * Alan Cox : destructor hook for AF_UNIX etc.
16 * Linus Torvalds : Better skb_clone.
17 * Alan Cox : Added skb_copy.
18 * Alan Cox : Added all the changed routines Linus
19 * only put in the headers
20 * Ray VanTassle : Fixed --skb->lock in free
21 * Alan Cox : skb_copy copy arp field
22 * Andi Kleen : slabified it.
23 * Robert Olsson : Removed skb_head_pool
26 * The __skb_ routines should be called with interrupts
27 * disabled, or you better be *real* sure that the operation is atomic
28 * with respect to whatever list is being frobbed (e.g. via lock_sock()
29 * or via disabling bottom half handlers, etc).
31 * This program is free software; you can redistribute it and/or
32 * modify it under the terms of the GNU General Public License
33 * as published by the Free Software Foundation; either version
34 * 2 of the License, or (at your option) any later version.
38 * The functions in this file will not compile correctly with gcc 2.4.x
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/types.h>
44 #include <linux/kernel.h>
45 #include <linux/sched.h>
47 #include <linux/interrupt.h>
49 #include <linux/inet.h>
50 #include <linux/slab.h>
51 #include <linux/netdevice.h>
52 #ifdef CONFIG_NET_CLS_ACT
53 #include <net/pkt_sched.h>
55 #include <linux/string.h>
56 #include <linux/skbuff.h>
57 #include <linux/cache.h>
58 #include <linux/rtnetlink.h>
59 #include <linux/init.h>
60 #include <linux/highmem.h>
62 #include <net/protocol.h>
65 #include <net/checksum.h>
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
71 static kmem_cache_t *skbuff_head_cache;
74 * Keep out-of-line to prevent kernel bloat.
75 * __builtin_return_address is not used because it is not always
80 * skb_over_panic - private function
85 * Out of line support code for skb_put(). Not user callable.
87 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
89 printk(KERN_INFO "skput:over: %p:%d put:%d dev:%s",
90 here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
95 * skb_under_panic - private function
100 * Out of line support code for skb_push(). Not user callable.
103 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
105 printk(KERN_INFO "skput:under: %p:%d put:%d dev:%s",
106 here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
110 /* Allocate a new skbuff. We do this ourselves so we can fill in a few
111 * 'private' fields and also do memory statistics to find all the
117 * alloc_skb - allocate a network buffer
118 * @size: size to allocate
119 * @gfp_mask: allocation mask
121 * Allocate a new &sk_buff. The returned buffer has no headroom and a
122 * tail room of size bytes. The object has a reference count of one.
123 * The return is the buffer. On a failure the return is %NULL.
125 * Buffers may only be allocated from interrupts using a @gfp_mask of
128 struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
134 skb = kmem_cache_alloc(skbuff_head_cache,
135 gfp_mask & ~__GFP_DMA);
139 /* Get the DATA. Size must match skb_add_mtu(). */
140 size = SKB_DATA_ALIGN(size);
141 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
145 memset(skb, 0, offsetof(struct sk_buff, truesize));
146 skb->truesize = size + sizeof(struct sk_buff);
147 atomic_set(&skb->users, 1);
151 skb->end = data + size;
153 atomic_set(&(skb_shinfo(skb)->dataref), 1);
154 skb_shinfo(skb)->nr_frags = 0;
155 skb_shinfo(skb)->tso_size = 0;
156 skb_shinfo(skb)->tso_segs = 0;
157 skb_shinfo(skb)->frag_list = NULL;
161 kmem_cache_free(skbuff_head_cache, skb);
167 static void skb_drop_fraglist(struct sk_buff *skb)
169 struct sk_buff *list = skb_shinfo(skb)->frag_list;
171 skb_shinfo(skb)->frag_list = NULL;
174 struct sk_buff *this = list;
180 static void skb_clone_fraglist(struct sk_buff *skb)
182 struct sk_buff *list;
184 for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
188 void skb_release_data(struct sk_buff *skb)
191 atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
192 if (skb_shinfo(skb)->nr_frags) {
194 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
195 put_page(skb_shinfo(skb)->frags[i].page);
198 if (skb_shinfo(skb)->frag_list)
199 skb_drop_fraglist(skb);
201 #if defined(CONFIG_MIPS_BRCM)
203 * If skb->retfreeq_data_prealloc is 1, that means the data buffer was pre-allocated
204 * by our network driver. (songw)
206 if (skb->retfreeq_cb && skb->retfreeq_data_prealloc == 1) {
207 (*skb->retfreeq_cb)(skb->retfreeq_context, skb->head, FREE_DATA);
208 skb->retfreeq_data_prealloc = 0;
217 * Free an skbuff by memory without cleaning the state.
219 void kfree_skbmem(struct sk_buff *skb)
221 skb_release_data(skb);
223 #if defined(CONFIG_MIPS_BRCM)
225 * If skb->retfreeq_skb_prealloc is 1, that means the skb was pre-allocated
226 * by our network driver. (songw)
228 if (skb->retfreeq_cb && skb->retfreeq_skb_prealloc == 1)
229 (*skb->retfreeq_cb)(skb->retfreeq_context, skb, FREE_SKB);
232 kmem_cache_free(skbuff_head_cache, skb);
236 * __kfree_skb - private function
239 * Free an sk_buff. Release anything attached to the buffer.
240 * Clean the state. This is an internal helper function. Users should
241 * always call kfree_skb
244 void __kfree_skb(struct sk_buff *skb)
247 printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
248 "on a list (from %p).\n", NET_CALLER(skb));
252 dst_release(skb->dst);
254 secpath_put(skb->sp);
256 if(skb->destructor) {
258 printk(KERN_WARNING "Warning: kfree_skb on "
259 "hard IRQ %p\n", NET_CALLER(skb));
260 skb->destructor(skb);
262 #ifdef CONFIG_NETFILTER
263 nf_conntrack_put(skb->nfct);
264 #ifdef CONFIG_BRIDGE_NETFILTER
265 nf_bridge_put(skb->nf_bridge);
268 /* XXX: IS this still necessary? - JHS */
269 #ifdef CONFIG_NET_SCHED
271 #ifdef CONFIG_NET_CLS_ACT
281 * skb_clone - duplicate an sk_buff
282 * @skb: buffer to clone
283 * @gfp_mask: allocation priority
285 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
286 * copies share the same packet data but not structure. The new
287 * buffer has a reference count of 1. If the allocation fails the
288 * function returns %NULL otherwise the new buffer is returned.
290 * If this function is called from an interrupt gfp_mask() must be
294 struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
296 struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
301 #define C(x) n->x = skb->x
303 n->next = n->prev = NULL;
316 secpath_get(skb->sp);
318 memcpy(n->cb, skb->cb, sizeof(skb->cb));
329 n->destructor = NULL;
330 #ifdef CONFIG_NETFILTER
334 nf_conntrack_get(skb->nfct);
335 #ifdef CONFIG_NETFILTER_DEBUG
338 #ifdef CONFIG_BRIDGE_NETFILTER
340 nf_bridge_get(skb->nf_bridge);
342 #endif /*CONFIG_NETFILTER*/
343 #if defined(CONFIG_HIPPI)
346 #ifdef CONFIG_NET_SCHED
348 #ifdef CONFIG_NET_CLS_ACT
349 n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
350 n->tc_verd = CLR_TC_OK2MUNGE(skb->tc_verd);
351 n->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
357 #if defined(CONFIG_MIPS_BRCM)
358 n->retfreeq_skb_prealloc = 0;
359 C(retfreeq_data_prealloc);
363 memcpy(n->extif, skb->extif, sizeof(skb->extif));
366 atomic_set(&n->users, 1);
372 atomic_inc(&(skb_shinfo(skb)->dataref));
378 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
381 * Shift between the two data areas in bytes
383 unsigned long offset = new->data - old->data;
388 new->real_dev = old->real_dev;
389 new->priority = old->priority;
390 new->protocol = old->protocol;
391 new->dst = dst_clone(old->dst);
393 new->sp = secpath_get(old->sp);
395 new->h.raw = old->h.raw + offset;
396 new->nh.raw = old->nh.raw + offset;
397 new->mac.raw = old->mac.raw + offset;
398 memcpy(new->cb, old->cb, sizeof(old->cb));
399 new->local_df = old->local_df;
400 new->pkt_type = old->pkt_type;
401 new->stamp = old->stamp;
402 new->destructor = NULL;
403 new->security = old->security;
404 #ifdef CONFIG_NETFILTER
405 new->nfmark = old->nfmark;
406 new->nfcache = old->nfcache;
407 new->nfct = old->nfct;
408 nf_conntrack_get(old->nfct);
409 #ifdef CONFIG_NETFILTER_DEBUG
410 new->nf_debug = old->nf_debug;
412 #ifdef CONFIG_BRIDGE_NETFILTER
413 new->nf_bridge = old->nf_bridge;
414 nf_bridge_get(old->nf_bridge);
417 #ifdef CONFIG_NET_SCHED
418 #ifdef CONFIG_NET_CLS_ACT
419 new->tc_verd = old->tc_verd;
421 new->tc_index = old->tc_index;
423 #if defined(CONFIG_MIPS_BRCM)
424 new->rcvfrom = old->rcvfrom;
425 new->retfreeq_cb = NULL;
426 new->retfreeq_context = NULL;
427 new->retfreeq_skb_prealloc = 0;
428 new->retfreeq_data_prealloc = 0;
430 atomic_set(&new->users, 1);
434 * skb_copy - create private copy of an sk_buff
435 * @skb: buffer to copy
436 * @gfp_mask: allocation priority
438 * Make a copy of both an &sk_buff and its data. This is used when the
439 * caller wishes to modify the data and needs a private copy of the
440 * data to alter. Returns %NULL on failure or the pointer to the buffer
441 * on success. The returned buffer has a reference count of 1.
443 * As by-product this function converts non-linear &sk_buff to linear
444 * one, so that &sk_buff becomes completely private and caller is allowed
445 * to modify all the data of returned buffer. This means that this
446 * function is not recommended for use in circumstances when only
447 * header is going to be modified. Use pskb_copy() instead.
450 struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
452 int headerlen = skb->data - skb->head;
454 * Allocate the copy buffer
456 struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
461 /* Set the data pointer */
462 skb_reserve(n, headerlen);
463 /* Set the tail pointer and length */
464 skb_put(n, skb->len);
466 n->ip_summed = skb->ip_summed;
468 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
471 copy_skb_header(n, skb);
477 * pskb_copy - create copy of an sk_buff with private head.
478 * @skb: buffer to copy
479 * @gfp_mask: allocation priority
481 * Make a copy of both an &sk_buff and part of its data, located
482 * in header. Fragmented data remain shared. This is used when
483 * the caller wishes to modify only header of &sk_buff and needs
484 * private copy of the header to alter. Returns %NULL on failure
485 * or the pointer to the buffer on success.
486 * The returned buffer has a reference count of 1.
489 struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
492 * Allocate the copy buffer
494 struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
499 /* Set the data pointer */
500 skb_reserve(n, skb->data - skb->head);
501 /* Set the tail pointer and length */
502 skb_put(n, skb_headlen(skb));
504 memcpy(n->data, skb->data, n->len);
506 n->ip_summed = skb->ip_summed;
508 n->data_len = skb->data_len;
511 if (skb_shinfo(skb)->nr_frags) {
514 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
515 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
516 get_page(skb_shinfo(n)->frags[i].page);
518 skb_shinfo(n)->nr_frags = i;
520 skb_shinfo(n)->tso_size = skb_shinfo(skb)->tso_size;
521 skb_shinfo(n)->tso_segs = skb_shinfo(skb)->tso_segs;
523 if (skb_shinfo(skb)->frag_list) {
524 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
525 skb_clone_fraglist(n);
528 copy_skb_header(n, skb);
534 * pskb_expand_head - reallocate header of &sk_buff
535 * @skb: buffer to reallocate
536 * @nhead: room to add at head
537 * @ntail: room to add at tail
538 * @gfp_mask: allocation priority
540 * Expands (or creates identical copy, if &nhead and &ntail are zero)
541 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have
542 * reference count of 1. Returns zero in the case of success or error,
543 * if expansion failed. In the last case, &sk_buff is not changed.
545 * All the pointers pointing into skb header may change and must be
546 * reloaded after call to this function.
549 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
553 int size = nhead + (skb->end - skb->head) + ntail;
559 size = SKB_DATA_ALIGN(size);
561 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
565 /* Copy only real data... and, alas, header. This should be
566 * optimized for the cases when header is void. */
567 memcpy(data + nhead, skb->head, skb->tail - skb->head);
568 memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
570 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
571 get_page(skb_shinfo(skb)->frags[i].page);
573 if (skb_shinfo(skb)->frag_list)
574 skb_clone_fraglist(skb);
576 skb_release_data(skb);
578 off = (data + nhead) - skb->head;
581 skb->end = data + size;
588 #if defined(CONFIG_MIPS_BRCM)
589 /* The data buffer of this skb is not pre-allocated any more
590 * even the skb itself is pre-allocated. (songw)
592 skb->retfreeq_data_prealloc = 0;
595 atomic_set(&skb_shinfo(skb)->dataref, 1);
602 /* Make private copy of skb with writable head and some headroom */
604 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
606 struct sk_buff *skb2;
607 int delta = headroom - skb_headroom(skb);
610 skb2 = pskb_copy(skb, GFP_ATOMIC);
612 skb2 = skb_clone(skb, GFP_ATOMIC);
613 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
624 * skb_copy_expand - copy and expand sk_buff
625 * @skb: buffer to copy
626 * @newheadroom: new free bytes at head
627 * @newtailroom: new free bytes at tail
628 * @gfp_mask: allocation priority
630 * Make a copy of both an &sk_buff and its data and while doing so
631 * allocate additional space.
633 * This is used when the caller wishes to modify the data and needs a
634 * private copy of the data to alter as well as more space for new fields.
635 * Returns %NULL on failure or the pointer to the buffer
636 * on success. The returned buffer has a reference count of 1.
638 * You must pass %GFP_ATOMIC as the allocation priority if this function
639 * is called from an interrupt.
641 * BUG ALERT: ip_summed is not copied. Why does this work? Is it used
642 * only by netfilter in the cases when checksum is recalculated? --ANK
644 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
645 int newheadroom, int newtailroom, int gfp_mask)
648 * Allocate the copy buffer
650 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
652 int head_copy_len, head_copy_off;
657 skb_reserve(n, newheadroom);
659 /* Set the tail pointer and length */
660 skb_put(n, skb->len);
662 head_copy_len = skb_headroom(skb);
664 if (newheadroom <= head_copy_len)
665 head_copy_len = newheadroom;
667 head_copy_off = newheadroom - head_copy_len;
669 /* Copy the linear header and data. */
670 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
671 skb->len + head_copy_len))
674 copy_skb_header(n, skb);
675 skb_shinfo(n)->tso_size = skb_shinfo(skb)->tso_size;
676 skb_shinfo(n)->tso_segs = skb_shinfo(skb)->tso_segs;
682 * skb_pad - zero pad the tail of an skb
683 * @skb: buffer to pad
686 * Ensure that a buffer is followed by a padding area that is zero
687 * filled. Used by network drivers which may DMA or transfer data
688 * beyond the buffer end onto the wire.
690 * May return NULL in out of memory cases.
693 struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
695 struct sk_buff *nskb;
697 /* If the skbuff is non linear tailroom is always zero.. */
698 if (skb_tailroom(skb) >= pad) {
699 memset(skb->data+skb->len, 0, pad);
703 nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
706 memset(nskb->data+nskb->len, 0, pad);
710 /* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
711 * If realloc==0 and trimming is impossible without change of data,
715 int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
717 int offset = skb_headlen(skb);
718 int nfrags = skb_shinfo(skb)->nr_frags;
721 for (i = 0; i < nfrags; i++) {
722 int end = offset + skb_shinfo(skb)->frags[i].size;
724 if (skb_cloned(skb)) {
727 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
731 put_page(skb_shinfo(skb)->frags[i].page);
732 skb_shinfo(skb)->nr_frags--;
734 skb_shinfo(skb)->frags[i].size = len - offset;
741 skb->data_len -= skb->len - len;
744 if (len <= skb_headlen(skb)) {
747 skb->tail = skb->data + len;
748 if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
749 skb_drop_fraglist(skb);
751 skb->data_len -= skb->len - len;
760 * __pskb_pull_tail - advance tail of skb header
761 * @skb: buffer to reallocate
762 * @delta: number of bytes to advance tail
764 * The function makes a sense only on a fragmented &sk_buff,
765 * it expands header moving its tail forward and copying necessary
766 * data from fragmented part.
768 * &sk_buff MUST have reference count of 1.
770 * Returns %NULL (and &sk_buff does not change) if pull failed
771 * or value of new tail of skb in the case of success.
773 * All the pointers pointing into skb header may change and must be
774 * reloaded after call to this function.
777 /* Moves tail of skb head forward, copying data from fragmented part,
778 * when it is necessary.
779 * 1. It may fail due to malloc failure.
780 * 2. It may change skb pointers.
782 * It is pretty complicated. Luckily, it is called only in exceptional cases.
784 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
786 /* If skb has not enough free space at tail, get new one
787 * plus 128 bytes for future expansions. If we have enough
788 * room at tail, reallocate without expansion only if skb is cloned.
790 int i, k, eat = (skb->tail + delta) - skb->end;
792 if (eat > 0 || skb_cloned(skb)) {
793 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
798 if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
801 /* Optimization: no fragments, no reasons to preestimate
802 * size of pulled pages. Superb.
804 if (!skb_shinfo(skb)->frag_list)
807 /* Estimate size of pulled pages. */
809 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
810 if (skb_shinfo(skb)->frags[i].size >= eat)
812 eat -= skb_shinfo(skb)->frags[i].size;
815 /* If we need update frag list, we are in troubles.
816 * Certainly, it possible to add an offset to skb data,
817 * but taking into account that pulling is expected to
818 * be very rare operation, it is worth to fight against
819 * further bloating skb head and crucify ourselves here instead.
820 * Pure masohism, indeed. 8)8)
823 struct sk_buff *list = skb_shinfo(skb)->frag_list;
824 struct sk_buff *clone = NULL;
825 struct sk_buff *insp = NULL;
831 if (list->len <= eat) {
832 /* Eaten as whole. */
837 /* Eaten partially. */
839 if (skb_shared(list)) {
840 /* Sucks! We need to fork list. :-( */
841 clone = skb_clone(list, GFP_ATOMIC);
847 /* This may be pulled without
851 if (!pskb_pull(list, eat)) {
860 /* Free pulled out fragments. */
861 while ((list = skb_shinfo(skb)->frag_list) != insp) {
862 skb_shinfo(skb)->frag_list = list->next;
865 /* And insert new clone at head. */
868 skb_shinfo(skb)->frag_list = clone;
871 /* Success! Now we may commit changes to skb data. */
876 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
877 if (skb_shinfo(skb)->frags[i].size <= eat) {
878 put_page(skb_shinfo(skb)->frags[i].page);
879 eat -= skb_shinfo(skb)->frags[i].size;
881 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
883 skb_shinfo(skb)->frags[k].page_offset += eat;
884 skb_shinfo(skb)->frags[k].size -= eat;
890 skb_shinfo(skb)->nr_frags = k;
893 skb->data_len -= delta;
898 /* Copy some data bits from skb to kernel buffer. */
900 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
903 int start = skb_headlen(skb);
905 if (offset > (int)skb->len - len)
909 if ((copy = start - offset) > 0) {
912 memcpy(to, skb->data + offset, copy);
913 if ((len -= copy) == 0)
919 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
922 BUG_TRAP(start <= offset + len);
924 end = start + skb_shinfo(skb)->frags[i].size;
925 if ((copy = end - offset) > 0) {
931 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
933 vaddr + skb_shinfo(skb)->frags[i].page_offset+
934 offset - start, copy);
935 kunmap_skb_frag(vaddr);
937 if ((len -= copy) == 0)
945 if (skb_shinfo(skb)->frag_list) {
946 struct sk_buff *list = skb_shinfo(skb)->frag_list;
948 for (; list; list = list->next) {
951 BUG_TRAP(start <= offset + len);
953 end = start + list->len;
954 if ((copy = end - offset) > 0) {
957 if (skb_copy_bits(list, offset - start,
960 if ((len -= copy) == 0)
975 /* Keep iterating until skb_iter_next returns false. */
976 void skb_iter_first(const struct sk_buff *skb, struct skb_iter *i)
978 i->len = skb_headlen(skb);
979 i->data = (unsigned char *)skb->data;
984 int skb_iter_next(const struct sk_buff *skb, struct skb_iter *i)
986 /* Unmap previous, if not head fragment. */
988 kunmap_skb_frag(i->data);
992 /* We're iterating through fraglist. */
993 if (i->nextfrag < skb_shinfo(i->fraglist)->nr_frags) {
994 i->data = kmap_skb_frag(&skb_shinfo(i->fraglist)
995 ->frags[i->nextfrag]);
996 i->len = skb_shinfo(i->fraglist)->frags[i->nextfrag]
1001 /* Fragments with fragments? Too hard! */
1002 BUG_ON(skb_shinfo(i->fraglist)->frag_list);
1003 i->fraglist = i->fraglist->next;
1007 i->len = skb_headlen(i->fraglist);
1008 i->data = i->fraglist->data;
1013 if (i->nextfrag < skb_shinfo(skb)->nr_frags) {
1014 i->data = kmap_skb_frag(&skb_shinfo(skb)->frags[i->nextfrag]);
1015 i->len = skb_shinfo(skb)->frags[i->nextfrag].size;
1020 i->fraglist = skb_shinfo(skb)->frag_list;
1025 /* Bug trap for callers */
1030 void skb_iter_abort(const struct sk_buff *skb, struct skb_iter *i)
1032 /* Unmap previous, if not head fragment. */
1033 if (i->data && i->nextfrag)
1034 kunmap_skb_frag(i->data);
1035 /* Bug trap for callers */
1039 /* Checksum skb data. */
1041 unsigned int skb_checksum(const struct sk_buff *skb, int offset,
1042 int len, unsigned int csum)
1044 int start = skb_headlen(skb);
1045 int i, copy = start - offset;
1048 /* Checksum header. */
1052 csum = csum_partial(skb->data + offset, copy, csum);
1053 if ((len -= copy) == 0)
1059 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1062 BUG_TRAP(start <= offset + len);
1064 end = start + skb_shinfo(skb)->frags[i].size;
1065 if ((copy = end - offset) > 0) {
1068 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1072 vaddr = kmap_skb_frag(frag);
1073 csum2 = csum_partial(vaddr + frag->page_offset +
1074 offset - start, copy, 0);
1075 kunmap_skb_frag(vaddr);
1076 csum = csum_block_add(csum, csum2, pos);
1085 if (skb_shinfo(skb)->frag_list) {
1086 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1088 for (; list; list = list->next) {
1091 BUG_TRAP(start <= offset + len);
1093 end = start + list->len;
1094 if ((copy = end - offset) > 0) {
1098 csum2 = skb_checksum(list, offset - start,
1100 csum = csum_block_add(csum, csum2, pos);
1101 if ((len -= copy) == 0)
1115 /* Both of above in one bottle. */
1117 unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1118 u8 *to, int len, unsigned int csum)
1120 int start = skb_headlen(skb);
1121 int i, copy = start - offset;
1128 csum = csum_partial_copy_nocheck(skb->data + offset, to,
1130 if ((len -= copy) == 0)
1137 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1140 BUG_TRAP(start <= offset + len);
1142 end = start + skb_shinfo(skb)->frags[i].size;
1143 if ((copy = end - offset) > 0) {
1146 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1150 vaddr = kmap_skb_frag(frag);
1151 csum2 = csum_partial_copy_nocheck(vaddr +
1155 kunmap_skb_frag(vaddr);
1156 csum = csum_block_add(csum, csum2, pos);
1166 if (skb_shinfo(skb)->frag_list) {
1167 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1169 for (; list; list = list->next) {
1173 BUG_TRAP(start <= offset + len);
1175 end = start + list->len;
1176 if ((copy = end - offset) > 0) {
1179 csum2 = skb_copy_and_csum_bits(list,
1182 csum = csum_block_add(csum, csum2, pos);
1183 if ((len -= copy) == 0)
1197 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1202 if (skb->ip_summed == CHECKSUM_HW)
1203 csstart = skb->h.raw - skb->data;
1205 csstart = skb_headlen(skb);
1207 if (csstart > skb_headlen(skb))
1210 memcpy(to, skb->data, csstart);
1213 if (csstart != skb->len)
1214 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1215 skb->len - csstart, 0);
1217 if (skb->ip_summed == CHECKSUM_HW) {
1218 long csstuff = csstart + skb->csum;
1220 *((unsigned short *)(to + csstuff)) = csum_fold(csum);
1225 * skb_dequeue - remove from the head of the queue
1226 * @list: list to dequeue from
1228 * Remove the head of the list. The list lock is taken so the function
1229 * may be used safely with other locking list functions. The head item is
1230 * returned or %NULL if the list is empty.
1233 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1235 unsigned long flags;
1236 struct sk_buff *result;
1238 spin_lock_irqsave(&list->lock, flags);
1239 result = __skb_dequeue(list);
1240 spin_unlock_irqrestore(&list->lock, flags);
1245 * skb_dequeue_tail - remove from the tail of the queue
1246 * @list: list to dequeue from
1248 * Remove the tail of the list. The list lock is taken so the function
1249 * may be used safely with other locking list functions. The tail item is
1250 * returned or %NULL if the list is empty.
1252 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1254 unsigned long flags;
1255 struct sk_buff *result;
1257 spin_lock_irqsave(&list->lock, flags);
1258 result = __skb_dequeue_tail(list);
1259 spin_unlock_irqrestore(&list->lock, flags);
1264 * skb_queue_purge - empty a list
1265 * @list: list to empty
1267 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1268 * the list and one reference dropped. This function takes the list
1269 * lock and is atomic with respect to other list locking functions.
1271 void skb_queue_purge(struct sk_buff_head *list)
1273 struct sk_buff *skb;
1274 while ((skb = skb_dequeue(list)) != NULL)
1279 * skb_queue_head - queue a buffer at the list head
1280 * @list: list to use
1281 * @newsk: buffer to queue
1283 * Queue a buffer at the start of the list. This function takes the
1284 * list lock and can be used safely with other locking &sk_buff functions
1287 * A buffer cannot be placed on two lists at the same time.
1289 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1291 unsigned long flags;
1293 spin_lock_irqsave(&list->lock, flags);
1294 __skb_queue_head(list, newsk);
1295 spin_unlock_irqrestore(&list->lock, flags);
1299 * skb_queue_tail - queue a buffer at the list tail
1300 * @list: list to use
1301 * @newsk: buffer to queue
1303 * Queue a buffer at the tail of the list. This function takes the
1304 * list lock and can be used safely with other locking &sk_buff functions
1307 * A buffer cannot be placed on two lists at the same time.
1309 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1311 unsigned long flags;
1313 spin_lock_irqsave(&list->lock, flags);
1314 __skb_queue_tail(list, newsk);
1315 spin_unlock_irqrestore(&list->lock, flags);
1318 * skb_unlink - remove a buffer from a list
1319 * @skb: buffer to remove
1321 * Place a packet after a given packet in a list. The list locks are taken
1322 * and this function is atomic with respect to other list locked calls
1324 * Works even without knowing the list it is sitting on, which can be
1325 * handy at times. It also means that THE LIST MUST EXIST when you
1326 * unlink. Thus a list must have its contents unlinked before it is
1329 void skb_unlink(struct sk_buff *skb)
1331 struct sk_buff_head *list = skb->list;
1334 unsigned long flags;
1336 spin_lock_irqsave(&list->lock, flags);
1337 if (skb->list == list)
1338 __skb_unlink(skb, skb->list);
1339 spin_unlock_irqrestore(&list->lock, flags);
1345 * skb_append - append a buffer
1346 * @old: buffer to insert after
1347 * @newsk: buffer to insert
1349 * Place a packet after a given packet in a list. The list locks are taken
1350 * and this function is atomic with respect to other list locked calls.
1351 * A buffer cannot be placed on two lists at the same time.
1354 void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1356 unsigned long flags;
1358 spin_lock_irqsave(&old->list->lock, flags);
1359 __skb_append(old, newsk);
1360 spin_unlock_irqrestore(&old->list->lock, flags);
1365 * skb_insert - insert a buffer
1366 * @old: buffer to insert before
1367 * @newsk: buffer to insert
1369 * Place a packet before a given packet in a list. The list locks are taken
1370 * and this function is atomic with respect to other list locked calls
1371 * A buffer cannot be placed on two lists at the same time.
1374 void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
1376 unsigned long flags;
1378 spin_lock_irqsave(&old->list->lock, flags);
1379 __skb_insert(newsk, old->prev, old, old->list);
1380 spin_unlock_irqrestore(&old->list->lock, flags);
1385 * Tune the memory allocator for a new MTU size.
1387 void skb_add_mtu(int mtu)
1389 /* Must match allocation in alloc_skb */
1390 mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
1392 kmem_add_cache_size(mtu);
1396 static void inline skb_split_inside_header(struct sk_buff *skb,
1397 struct sk_buff* skb1,
1398 const u32 len, const int pos)
1402 memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
1404 /* And move data appendix as is. */
1405 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1406 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
1408 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
1409 skb_shinfo(skb)->nr_frags = 0;
1410 skb1->data_len = skb->data_len;
1411 skb1->len += skb1->data_len;
1414 skb->tail = skb->data + len;
1417 static void inline skb_split_no_header(struct sk_buff *skb,
1418 struct sk_buff* skb1,
1419 const u32 len, int pos)
1422 const int nfrags = skb_shinfo(skb)->nr_frags;
1424 skb_shinfo(skb)->nr_frags = 0;
1425 skb1->len = skb1->data_len = skb->len - len;
1427 skb->data_len = len - pos;
1429 for (i = 0; i < nfrags; i++) {
1430 int size = skb_shinfo(skb)->frags[i].size;
1432 if (pos + size > len) {
1433 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
1437 * We have to variants in this case:
1438 * 1. Move all the frag to the second
1439 * part, if it is possible. F.e.
1440 * this approach is mandatory for TUX,
1441 * where splitting is expensive.
1442 * 2. Split is accurately. We make this.
1444 get_page(skb_shinfo(skb)->frags[i].page);
1445 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
1446 skb_shinfo(skb1)->frags[0].size -= len - pos;
1447 skb_shinfo(skb)->frags[i].size = len - pos;
1448 skb_shinfo(skb)->nr_frags++;
1452 skb_shinfo(skb)->nr_frags++;
1455 skb_shinfo(skb1)->nr_frags = k;
1459 * skb_split - Split fragmented skb to two parts at length len.
1461 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
1463 int pos = skb_headlen(skb);
1465 if (len < pos) /* Split line is inside header. */
1466 skb_split_inside_header(skb, skb1, len, pos);
1467 else /* Second chunk has no header, nothing to copy. */
1468 skb_split_no_header(skb, skb1, len, pos);
1471 void __init skb_init(void)
1473 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
1474 sizeof(struct sk_buff),
1478 if (!skbuff_head_cache)
1479 panic("cannot create skbuff cache");
1482 EXPORT_SYMBOL(___pskb_trim);
1483 EXPORT_SYMBOL(__kfree_skb);
1484 EXPORT_SYMBOL(__pskb_pull_tail);
1485 EXPORT_SYMBOL(alloc_skb);
1486 EXPORT_SYMBOL(pskb_copy);
1487 EXPORT_SYMBOL(pskb_expand_head);
1488 EXPORT_SYMBOL(skb_checksum);
1489 EXPORT_SYMBOL(skb_clone);
1490 EXPORT_SYMBOL(skb_clone_fraglist);
1491 EXPORT_SYMBOL(skb_copy);
1492 EXPORT_SYMBOL(skb_copy_and_csum_bits);
1493 EXPORT_SYMBOL(skb_copy_and_csum_dev);
1494 EXPORT_SYMBOL(skb_copy_bits);
1495 EXPORT_SYMBOL(skb_copy_expand);
1496 EXPORT_SYMBOL(skb_over_panic);
1497 EXPORT_SYMBOL(skb_pad);
1498 EXPORT_SYMBOL(skb_realloc_headroom);
1499 EXPORT_SYMBOL(skb_under_panic);
1500 EXPORT_SYMBOL(skb_dequeue);
1501 EXPORT_SYMBOL(skb_dequeue_tail);
1502 EXPORT_SYMBOL(skb_insert);
1503 EXPORT_SYMBOL(skb_queue_purge);
1504 EXPORT_SYMBOL(skb_queue_head);
1505 EXPORT_SYMBOL(skb_queue_tail);
1506 EXPORT_SYMBOL(skb_unlink);
1507 EXPORT_SYMBOL(skb_append);
1508 EXPORT_SYMBOL(skb_split);
1509 EXPORT_SYMBOL(skb_iter_first);
1510 EXPORT_SYMBOL(skb_iter_next);
1511 EXPORT_SYMBOL(skb_iter_abort);