2 * Routines having to do with the 'struct sk_buff' memory handlers.
4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
10 * Alan Cox : Fixed the worst of the load balancer bugs.
11 * Dave Platt : Interrupt stacking fix.
12 * Richard Kooijman : Timestamp fixes.
13 * Alan Cox : Changed buffer format.
14 * Alan Cox : destructor hook for AF_UNIX etc.
15 * Linus Torvalds : Better skb_clone.
16 * Alan Cox : Added skb_copy.
17 * Alan Cox : Added all the changed routines Linus
18 * only put in the headers
19 * Ray VanTassle : Fixed --skb->lock in free
20 * Alan Cox : skb_copy copy arp field
21 * Andi Kleen : slabified it.
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
36 * The functions in this file will not compile correctly with gcc 2.4.x
39 #include <linux/config.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/sched.h>
44 #include <linux/interrupt.h>
46 #include <linux/inet.h>
47 #include <linux/slab.h>
48 #include <linux/netdevice.h>
49 #include <linux/string.h>
50 #include <linux/skbuff.h>
51 #include <linux/cache.h>
52 #include <linux/rtnetlink.h>
53 #include <linux/init.h>
54 #include <linux/highmem.h>
56 #include <net/protocol.h>
59 #include <net/checksum.h>
61 #include <asm/uaccess.h>
62 #include <asm/system.h>
64 int sysctl_hot_list_len = 128;
66 static kmem_cache_t *skbuff_head_cache;
69 struct sk_buff_head list;
70 char pad[SMP_CACHE_BYTES];
71 } skb_head_pool[NR_CPUS];
74 * Keep out-of-line to prevent kernel bloat.
75 * __builtin_return_address is not used because it is not always
80 * skb_over_panic - private function
85 * Out of line support code for skb_put(). Not user callable.
88 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
90 printk("skput:over: %p:%d put:%d dev:%s",
91 here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
96 * skb_under_panic - private function
101 * Out of line support code for skb_push(). Not user callable.
105 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
107 printk("skput:under: %p:%d put:%d dev:%s",
108 here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
112 static __inline__ struct sk_buff *skb_head_from_pool(void)
114 struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
116 if (skb_queue_len(list)) {
120 local_irq_save(flags);
121 skb = __skb_dequeue(list);
122 local_irq_restore(flags);
128 static __inline__ void skb_head_to_pool(struct sk_buff *skb)
130 struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
132 if (skb_queue_len(list) < sysctl_hot_list_len) {
135 local_irq_save(flags);
136 __skb_queue_head(list, skb);
137 local_irq_restore(flags);
141 kmem_cache_free(skbuff_head_cache, skb);
145 /* Allocate a new skbuff. We do this ourselves so we can fill in a few
146 * 'private' fields and also do memory statistics to find all the
152 * alloc_skb - allocate a network buffer
153 * @size: size to allocate
154 * @gfp_mask: allocation mask
156 * Allocate a new &sk_buff. The returned buffer has no headroom and a
157 * tail room of size bytes. The object has a reference count of one.
158 * The return is the buffer. On a failure the return is %NULL.
160 * Buffers may only be allocated from interrupts using a @gfp_mask of
164 struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
169 if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
170 static int count = 0;
172 printk(KERN_ERR "alloc_skb called nonatomically "
173 "from interrupt %p\n", NET_CALLER(size));
176 gfp_mask &= ~__GFP_WAIT;
180 skb = skb_head_from_pool();
182 skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask & ~__GFP_DMA);
187 /* Get the DATA. Size must match skb_add_mtu(). */
188 size = SKB_DATA_ALIGN(size);
189 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
193 /* XXX: does not include slab overhead */
194 skb->truesize = size + sizeof(struct sk_buff);
196 /* Load the data pointers. */
200 skb->end = data + size;
202 /* Set up other state */
207 atomic_set(&skb->users, 1);
208 atomic_set(&(skb_shinfo(skb)->dataref), 1);
209 skb_shinfo(skb)->nr_frags = 0;
210 skb_shinfo(skb)->frag_list = NULL;
214 skb_head_to_pool(skb);
221 * Slab constructor for a skb head.
223 static inline void skb_headerinit(void *p, kmem_cache_t *cache,
226 struct sk_buff *skb = p;
232 skb->stamp.tv_sec=0; /* No idea about time */
234 skb->real_dev = NULL;
236 memset(skb->cb, 0, sizeof(skb->cb));
237 skb->pkt_type = PACKET_HOST; /* Default type */
240 skb->security = 0; /* By default packets are insecure */
241 skb->destructor = NULL;
243 #ifdef CONFIG_NETFILTER
244 skb->nfmark = skb->nfcache = 0;
246 #ifdef CONFIG_NETFILTER_DEBUG
250 #ifdef CONFIG_NET_SCHED
255 static void skb_drop_fraglist(struct sk_buff *skb)
257 struct sk_buff *list = skb_shinfo(skb)->frag_list;
259 skb_shinfo(skb)->frag_list = NULL;
262 struct sk_buff *this = list;
268 static void skb_clone_fraglist(struct sk_buff *skb)
270 struct sk_buff *list;
272 for (list = skb_shinfo(skb)->frag_list; list; list=list->next)
276 static void skb_release_data(struct sk_buff *skb)
279 atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
280 if (skb_shinfo(skb)->nr_frags) {
282 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
283 put_page(skb_shinfo(skb)->frags[i].page);
286 if (skb_shinfo(skb)->frag_list)
287 skb_drop_fraglist(skb);
294 * Free an skbuff by memory without cleaning the state.
296 void kfree_skbmem(struct sk_buff *skb)
298 skb_release_data(skb);
299 skb_head_to_pool(skb);
303 * __kfree_skb - private function
306 * Free an sk_buff. Release anything attached to the buffer.
307 * Clean the state. This is an internal helper function. Users should
308 * always call kfree_skb
311 void __kfree_skb(struct sk_buff *skb)
314 printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
315 "on a list (from %p).\n", NET_CALLER(skb));
319 dst_release(skb->dst);
320 if(skb->destructor) {
322 printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n",
325 skb->destructor(skb);
327 #ifdef CONFIG_NETFILTER
328 nf_conntrack_put(skb->nfct);
330 skb_headerinit(skb, NULL, 0); /* clean state */
335 * skb_clone - duplicate an sk_buff
336 * @skb: buffer to clone
337 * @gfp_mask: allocation priority
339 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
340 * copies share the same packet data but not structure. The new
341 * buffer has a reference count of 1. If the allocation fails the
342 * function returns %NULL otherwise the new buffer is returned.
344 * If this function is called from an interrupt gfp_mask() must be
348 struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
352 n = skb_head_from_pool();
354 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
359 #define C(x) n->x = skb->x
361 n->next = n->prev = NULL;
372 memcpy(n->cb, skb->cb, sizeof(skb->cb));
380 atomic_set(&n->users, 1);
388 n->destructor = NULL;
389 #ifdef CONFIG_NETFILTER
393 #ifdef CONFIG_NETFILTER_DEBUG
396 #endif /*CONFIG_NETFILTER*/
397 #if defined(CONFIG_HIPPI)
400 #ifdef CONFIG_NET_SCHED
404 atomic_inc(&(skb_shinfo(skb)->dataref));
406 #ifdef CONFIG_NETFILTER
407 nf_conntrack_get(skb->nfct);
412 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
415 * Shift between the two data areas in bytes
417 unsigned long offset = new->data - old->data;
422 new->real_dev=old->real_dev;
423 new->priority=old->priority;
424 new->protocol=old->protocol;
425 new->dst=dst_clone(old->dst);
426 new->h.raw=old->h.raw+offset;
427 new->nh.raw=old->nh.raw+offset;
428 new->mac.raw=old->mac.raw+offset;
429 memcpy(new->cb, old->cb, sizeof(old->cb));
430 atomic_set(&new->users, 1);
431 new->pkt_type=old->pkt_type;
432 new->stamp=old->stamp;
433 new->destructor = NULL;
434 new->security=old->security;
435 #ifdef CONFIG_NETFILTER
436 new->nfmark=old->nfmark;
437 new->nfcache=old->nfcache;
439 nf_conntrack_get(new->nfct);
440 #ifdef CONFIG_NETFILTER_DEBUG
441 new->nf_debug=old->nf_debug;
444 #ifdef CONFIG_NET_SCHED
445 new->tc_index = old->tc_index;
450 * skb_copy - create private copy of an sk_buff
451 * @skb: buffer to copy
452 * @gfp_mask: allocation priority
454 * Make a copy of both an &sk_buff and its data. This is used when the
455 * caller wishes to modify the data and needs a private copy of the
456 * data to alter. Returns %NULL on failure or the pointer to the buffer
457 * on success. The returned buffer has a reference count of 1.
459 * As by-product this function converts non-linear &sk_buff to linear
460 * one, so that &sk_buff becomes completely private and caller is allowed
461 * to modify all the data of returned buffer. This means that this
462 * function is not recommended for use in circumstances when only
463 * header is going to be modified. Use pskb_copy() instead.
466 struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
469 int headerlen = skb->data-skb->head;
472 * Allocate the copy buffer
474 n=alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
478 /* Set the data pointer */
479 skb_reserve(n,headerlen);
480 /* Set the tail pointer and length */
483 n->ip_summed = skb->ip_summed;
485 if (skb_copy_bits(skb, -headerlen, n->head, headerlen+skb->len))
488 copy_skb_header(n, skb);
493 /* Keep head the same: replace data */
494 int skb_linearize(struct sk_buff *skb, int gfp_mask)
499 int headerlen = skb->data - skb->head;
500 int expand = (skb->tail+skb->data_len) - skb->end;
508 size = (skb->end - skb->head + expand);
509 size = SKB_DATA_ALIGN(size);
510 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
514 /* Copy entire thing */
515 if (skb_copy_bits(skb, -headerlen, data, headerlen+skb->len))
518 /* Offset between the two in bytes */
519 offset = data - skb->head;
522 skb_release_data(skb);
525 skb->end = data + size;
527 /* Set up new pointers */
528 skb->h.raw += offset;
529 skb->nh.raw += offset;
530 skb->mac.raw += offset;
535 atomic_set(&(skb_shinfo(skb)->dataref), 1);
536 skb_shinfo(skb)->nr_frags = 0;
537 skb_shinfo(skb)->frag_list = NULL;
539 /* We are no longer a clone, even if we were. */
542 skb->tail += skb->data_len;
549 * pskb_copy - create copy of an sk_buff with private head.
550 * @skb: buffer to copy
551 * @gfp_mask: allocation priority
553 * Make a copy of both an &sk_buff and part of its data, located
554 * in header. Fragmented data remain shared. This is used when
555 * the caller wishes to modify only header of &sk_buff and needs
556 * private copy of the header to alter. Returns %NULL on failure
557 * or the pointer to the buffer on success.
558 * The returned buffer has a reference count of 1.
561 struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
566 * Allocate the copy buffer
568 n=alloc_skb(skb->end - skb->head, gfp_mask);
572 /* Set the data pointer */
573 skb_reserve(n,skb->data-skb->head);
574 /* Set the tail pointer and length */
575 skb_put(n,skb_headlen(skb));
577 memcpy(n->data, skb->data, n->len);
579 n->ip_summed = skb->ip_summed;
581 n->data_len = skb->data_len;
584 if (skb_shinfo(skb)->nr_frags) {
587 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
588 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
589 get_page(skb_shinfo(n)->frags[i].page);
591 skb_shinfo(n)->nr_frags = i;
594 if (skb_shinfo(skb)->frag_list) {
595 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
596 skb_clone_fraglist(n);
599 copy_skb_header(n, skb);
605 * pskb_expand_head - reallocate header of &sk_buff
606 * @skb: buffer to reallocate
607 * @nhead: room to add at head
608 * @ntail: room to add at tail
609 * @gfp_mask: allocation priority
611 * Expands (or creates identical copy, if &nhead and &ntail are zero)
612 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have
613 * reference count of 1. Returns zero in the case of success or error,
614 * if expansion failed. In the last case, &sk_buff is not changed.
616 * All the pointers pointing into skb header may change and must be
617 * reloaded after call to this function.
620 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
624 int size = nhead + (skb->end - skb->head) + ntail;
630 size = SKB_DATA_ALIGN(size);
632 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
636 /* Copy only real data... and, alas, header. This should be
637 * optimized for the cases when header is void. */
638 memcpy(data+nhead, skb->head, skb->tail-skb->head);
639 memcpy(data+size, skb->end, sizeof(struct skb_shared_info));
641 for (i=0; i<skb_shinfo(skb)->nr_frags; i++)
642 get_page(skb_shinfo(skb)->frags[i].page);
644 if (skb_shinfo(skb)->frag_list)
645 skb_clone_fraglist(skb);
647 skb_release_data(skb);
649 off = (data+nhead) - skb->head;
652 skb->end = data+size;
660 atomic_set(&skb_shinfo(skb)->dataref, 1);
667 /* Make private copy of skb with writable head and some headroom */
670 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
672 struct sk_buff *skb2;
673 int delta = headroom - skb_headroom(skb);
676 return pskb_copy(skb, GFP_ATOMIC);
678 skb2 = skb_clone(skb, GFP_ATOMIC);
680 !pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
689 * skb_copy_expand - copy and expand sk_buff
690 * @skb: buffer to copy
691 * @newheadroom: new free bytes at head
692 * @newtailroom: new free bytes at tail
693 * @gfp_mask: allocation priority
695 * Make a copy of both an &sk_buff and its data and while doing so
696 * allocate additional space.
698 * This is used when the caller wishes to modify the data and needs a
699 * private copy of the data to alter as well as more space for new fields.
700 * Returns %NULL on failure or the pointer to the buffer
701 * on success. The returned buffer has a reference count of 1.
703 * You must pass %GFP_ATOMIC as the allocation priority if this function
704 * is called from an interrupt.
708 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
716 * Allocate the copy buffer
719 n=alloc_skb(newheadroom + skb->len + newtailroom,
724 skb_reserve(n,newheadroom);
726 /* Set the tail pointer and length */
729 /* Copy the data only. */
730 if (skb_copy_bits(skb, 0, n->data, skb->len))
733 copy_skb_header(n, skb);
738 * skb_pad - zero pad the tail of an skb
739 * @skb: buffer to pad
742 * Ensure that a buffer is followed by a padding area that is zero
743 * filled. Used by network drivers which may DMA or transfer data
744 * beyond the buffer end onto the wire.
746 * May return NULL in out of memory cases.
749 struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
751 struct sk_buff *nskb;
753 /* If the skbuff is non linear tailroom is always zero.. */
754 if(skb_tailroom(skb) >= pad)
756 memset(skb->data+skb->len, 0, pad);
760 nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
763 memset(nskb->data+nskb->len, 0, pad);
767 /* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
768 * If realloc==0 and trimming is impossible without change of data,
772 int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
774 int offset = skb_headlen(skb);
775 int nfrags = skb_shinfo(skb)->nr_frags;
778 for (i=0; i<nfrags; i++) {
779 int end = offset + skb_shinfo(skb)->frags[i].size;
781 if (skb_cloned(skb)) {
784 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
788 put_page(skb_shinfo(skb)->frags[i].page);
789 skb_shinfo(skb)->nr_frags--;
791 skb_shinfo(skb)->frags[i].size = len-offset;
798 skb->data_len -= skb->len - len;
801 if (len <= skb_headlen(skb)) {
804 skb->tail = skb->data + len;
805 if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
806 skb_drop_fraglist(skb);
808 skb->data_len -= skb->len - len;
817 * __pskb_pull_tail - advance tail of skb header
818 * @skb: buffer to reallocate
819 * @delta: number of bytes to advance tail
821 * The function makes a sense only on a fragmented &sk_buff,
822 * it expands header moving its tail forward and copying necessary
823 * data from fragmented part.
825 * &sk_buff MUST have reference count of 1.
827 * Returns %NULL (and &sk_buff does not change) if pull failed
828 * or value of new tail of skb in the case of success.
830 * All the pointers pointing into skb header may change and must be
831 * reloaded after call to this function.
834 /* Moves tail of skb head forward, copying data from fragmented part,
835 * when it is necessary.
836 * 1. It may fail due to malloc failure.
837 * 2. It may change skb pointers.
839 * It is pretty complicated. Luckily, it is called only in exceptional cases.
841 unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
845 /* If skb has not enough free space at tail, get new one
846 * plus 128 bytes for future expansions. If we have enough
847 * room at tail, reallocate without expansion only if skb is cloned.
849 eat = (skb->tail+delta) - skb->end;
851 if (eat > 0 || skb_cloned(skb)) {
852 if (pskb_expand_head(skb, 0, eat>0 ? eat+128 : 0, GFP_ATOMIC))
856 if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
859 /* Optimization: no fragments, no reasons to preestimate
860 * size of pulled pages. Superb.
862 if (skb_shinfo(skb)->frag_list == NULL)
865 /* Estimate size of pulled pages. */
867 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
868 if (skb_shinfo(skb)->frags[i].size >= eat)
870 eat -= skb_shinfo(skb)->frags[i].size;
873 /* If we need update frag list, we are in troubles.
874 * Certainly, it possible to add an offset to skb data,
875 * but taking into account that pulling is expected to
876 * be very rare operation, it is worth to fight against
877 * further bloating skb head and crucify ourselves here instead.
878 * Pure masohism, indeed. 8)8)
881 struct sk_buff *list = skb_shinfo(skb)->frag_list;
882 struct sk_buff *clone = NULL;
883 struct sk_buff *insp = NULL;
889 if (list->len <= eat) {
890 /* Eaten as whole. */
895 /* Eaten partially. */
897 if (skb_shared(list)) {
898 /* Sucks! We need to fork list. :-( */
899 clone = skb_clone(list, GFP_ATOMIC);
905 /* This may be pulled without
909 if (pskb_pull(list, eat) == NULL) {
918 /* Free pulled out fragments. */
919 while ((list = skb_shinfo(skb)->frag_list) != insp) {
920 skb_shinfo(skb)->frag_list = list->next;
923 /* And insert new clone at head. */
926 skb_shinfo(skb)->frag_list = clone;
929 /* Success! Now we may commit changes to skb data. */
934 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
935 if (skb_shinfo(skb)->frags[i].size <= eat) {
936 put_page(skb_shinfo(skb)->frags[i].page);
937 eat -= skb_shinfo(skb)->frags[i].size;
939 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
941 skb_shinfo(skb)->frags[k].page_offset += eat;
942 skb_shinfo(skb)->frags[k].size -= eat;
948 skb_shinfo(skb)->nr_frags = k;
951 skb->data_len -= delta;
956 /* Copy some data bits from skb to kernel buffer. */
958 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
961 int start = skb->len - skb->data_len;
963 if (offset > (int)skb->len-len)
967 if ((copy = start-offset) > 0) {
970 memcpy(to, skb->data + offset, copy);
971 if ((len -= copy) == 0)
977 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
980 BUG_TRAP(start <= offset+len);
982 end = start + skb_shinfo(skb)->frags[i].size;
983 if ((copy = end-offset) > 0) {
989 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
990 memcpy(to, vaddr+skb_shinfo(skb)->frags[i].page_offset+
992 kunmap_skb_frag(vaddr);
994 if ((len -= copy) == 0)
1002 if (skb_shinfo(skb)->frag_list) {
1003 struct sk_buff *list;
1005 for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
1008 BUG_TRAP(start <= offset+len);
1010 end = start + list->len;
1011 if ((copy = end-offset) > 0) {
1014 if (skb_copy_bits(list, offset-start, to, copy))
1016 if ((len -= copy) == 0)
1031 /* Checksum skb data. */
1033 unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum)
1036 int start = skb->len - skb->data_len;
1039 /* Checksum header. */
1040 if ((copy = start-offset) > 0) {
1043 csum = csum_partial(skb->data+offset, copy, csum);
1044 if ((len -= copy) == 0)
1050 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
1053 BUG_TRAP(start <= offset+len);
1055 end = start + skb_shinfo(skb)->frags[i].size;
1056 if ((copy = end-offset) > 0) {
1059 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1063 vaddr = kmap_skb_frag(frag);
1064 csum2 = csum_partial(vaddr + frag->page_offset +
1065 offset-start, copy, 0);
1066 kunmap_skb_frag(vaddr);
1067 csum = csum_block_add(csum, csum2, pos);
1076 if (skb_shinfo(skb)->frag_list) {
1077 struct sk_buff *list;
1079 for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
1082 BUG_TRAP(start <= offset+len);
1084 end = start + list->len;
1085 if ((copy = end-offset) > 0) {
1089 csum2 = skb_checksum(list, offset-start, copy, 0);
1090 csum = csum_block_add(csum, csum2, pos);
1091 if ((len -= copy) == 0)
1106 /* Both of above in one bottle. */
1108 unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum)
1111 int start = skb->len - skb->data_len;
1115 if ((copy = start-offset) > 0) {
1118 csum = csum_partial_copy_nocheck(skb->data+offset, to, copy, csum);
1119 if ((len -= copy) == 0)
1126 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
1129 BUG_TRAP(start <= offset+len);
1131 end = start + skb_shinfo(skb)->frags[i].size;
1132 if ((copy = end-offset) > 0) {
1135 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1139 vaddr = kmap_skb_frag(frag);
1140 csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset +
1141 offset-start, to, copy, 0);
1142 kunmap_skb_frag(vaddr);
1143 csum = csum_block_add(csum, csum2, pos);
1153 if (skb_shinfo(skb)->frag_list) {
1154 struct sk_buff *list;
1156 for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
1160 BUG_TRAP(start <= offset+len);
1162 end = start + list->len;
1163 if ((copy = end-offset) > 0) {
1166 csum2 = skb_copy_and_csum_bits(list, offset-start, to, copy, 0);
1167 csum = csum_block_add(csum, csum2, pos);
1168 if ((len -= copy) == 0)
1184 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1189 if (skb->ip_summed == CHECKSUM_HW)
1190 csstart = skb->h.raw - skb->data;
1192 csstart = skb->len - skb->data_len;
1194 if (csstart > skb->len - skb->data_len)
1197 memcpy(to, skb->data, csstart);
1200 if (csstart != skb->len)
1201 csum = skb_copy_and_csum_bits(skb, csstart, to+csstart,
1202 skb->len-csstart, 0);
1204 if (skb->ip_summed == CHECKSUM_HW) {
1205 long csstuff = csstart + skb->csum;
1207 *((unsigned short *)(to + csstuff)) = csum_fold(csum);
1213 * Tune the memory allocator for a new MTU size.
1215 void skb_add_mtu(int mtu)
1217 /* Must match allocation in alloc_skb */
1218 mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
1220 kmem_add_cache_size(mtu);
1224 void __init skb_init(void)
1228 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
1229 sizeof(struct sk_buff),
1232 skb_headerinit, NULL);
1233 if (!skbuff_head_cache)
1234 panic("cannot create skbuff cache");
1236 for (i=0; i<NR_CPUS; i++)
1237 skb_queue_head_init(&skb_head_pool[i].list);