2 * Routines having to do with the 'struct sk_buff' memory handlers.
4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
7 * Version: $Id: skbuff.c,v 1.1.1.1 2005/04/11 02:51:12 jack Exp $
10 * Alan Cox : Fixed the worst of the load balancer bugs.
11 * Dave Platt : Interrupt stacking fix.
12 * Richard Kooijman : Timestamp fixes.
13 * Alan Cox : Changed buffer format.
14 * Alan Cox : destructor hook for AF_UNIX etc.
15 * Linus Torvalds : Better skb_clone.
16 * Alan Cox : Added skb_copy.
17 * Alan Cox : Added all the changed routines Linus
18 * only put in the headers
19 * Ray VanTassle : Fixed --skb->lock in free
20 * Alan Cox : skb_copy copy arp field
21 * Andi Kleen : slabified it.
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
36 * The functions in this file will not compile correctly with gcc 2.4.x
39 #include <linux/config.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/sched.h>
44 #include <linux/interrupt.h>
46 #include <linux/inet.h>
47 #include <linux/slab.h>
48 #include <linux/netdevice.h>
49 #include <linux/string.h>
50 #include <linux/skbuff.h>
51 #include <linux/cache.h>
52 #include <linux/rtnetlink.h>
53 #include <linux/init.h>
54 #include <linux/highmem.h>
56 #include <net/protocol.h>
59 #include <net/checksum.h>
61 #include <asm/uaccess.h>
62 #include <asm/system.h>
64 int sysctl_hot_list_len = 128;
66 static kmem_cache_t *skbuff_head_cache;
69 struct sk_buff_head list;
70 char pad[SMP_CACHE_BYTES];
71 } skb_head_pool[NR_CPUS];
74 * Keep out-of-line to prevent kernel bloat.
75 * __builtin_return_address is not used because it is not always
80 * skb_over_panic - private function
85 * Out of line support code for skb_put(). Not user callable.
88 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
90 printk("skput:over: %p:%d put:%d dev:%s",
91 here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
96 * skb_under_panic - private function
101 * Out of line support code for skb_push(). Not user callable.
105 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
107 printk("skput:under: %p:%d put:%d dev:%s",
108 here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
112 static __inline__ struct sk_buff *skb_head_from_pool(void)
114 struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
116 if (skb_queue_len(list)) {
120 local_irq_save(flags);
121 skb = __skb_dequeue(list);
122 local_irq_restore(flags);
128 static __inline__ void skb_head_to_pool(struct sk_buff *skb)
130 struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
132 if (skb_queue_len(list) < sysctl_hot_list_len) {
135 local_irq_save(flags);
136 __skb_queue_head(list, skb);
137 local_irq_restore(flags);
141 kmem_cache_free(skbuff_head_cache, skb);
145 /* Allocate a new skbuff. We do this ourselves so we can fill in a few
146 * 'private' fields and also do memory statistics to find all the
152 * alloc_skb - allocate a network buffer
153 * @size: size to allocate
154 * @gfp_mask: allocation mask
156 * Allocate a new &sk_buff. The returned buffer has no headroom and a
157 * tail room of size bytes. The object has a reference count of one.
158 * The return is the buffer. On a failure the return is %NULL.
160 * Buffers may only be allocated from interrupts using a @gfp_mask of
164 struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
169 if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
170 static int count = 0;
172 printk(KERN_ERR "alloc_skb called nonatomically "
173 "from interrupt %p\n", NET_CALLER(size));
176 gfp_mask &= ~__GFP_WAIT;
180 skb = skb_head_from_pool();
182 skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask & ~__GFP_DMA);
187 /* Get the DATA. Size must match skb_add_mtu(). */
188 size = SKB_DATA_ALIGN(size);
189 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
193 /* XXX: does not include slab overhead */
194 skb->truesize = size + sizeof(struct sk_buff);
196 /* Load the data pointers. */
200 skb->end = data + size;
202 /* Set up other state */
207 atomic_set(&skb->users, 1);
208 atomic_set(&(skb_shinfo(skb)->dataref), 1);
209 skb_shinfo(skb)->nr_frags = 0;
210 skb_shinfo(skb)->frag_list = NULL;
214 skb_head_to_pool(skb);
221 * Slab constructor for a skb head.
223 static inline void skb_headerinit(void *p, kmem_cache_t *cache,
226 struct sk_buff *skb = p;
232 skb->stamp.tv_sec=0; /* No idea about time */
235 memset(skb->cb, 0, sizeof(skb->cb));
236 skb->pkt_type = PACKET_HOST; /* Default type */
239 skb->security = 0; /* By default packets are insecure */
240 skb->destructor = NULL;
242 #ifdef CONFIG_NETFILTER
243 skb->nfmark = skb->nfcache = 0;
245 #ifdef CONFIG_NETFILTER_DEBUG
249 #ifdef CONFIG_NET_SCHED
254 static void skb_drop_fraglist(struct sk_buff *skb)
256 struct sk_buff *list = skb_shinfo(skb)->frag_list;
258 skb_shinfo(skb)->frag_list = NULL;
261 struct sk_buff *this = list;
267 static void skb_clone_fraglist(struct sk_buff *skb)
269 struct sk_buff *list;
271 for (list = skb_shinfo(skb)->frag_list; list; list=list->next)
275 static void skb_release_data(struct sk_buff *skb)
278 atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
279 if (skb_shinfo(skb)->nr_frags) {
281 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
282 put_page(skb_shinfo(skb)->frags[i].page);
285 if (skb_shinfo(skb)->frag_list)
286 skb_drop_fraglist(skb);
293 * Free an skbuff by memory without cleaning the state.
295 void kfree_skbmem(struct sk_buff *skb)
297 skb_release_data(skb);
298 skb_head_to_pool(skb);
302 * __kfree_skb - private function
305 * Free an sk_buff. Release anything attached to the buffer.
306 * Clean the state. This is an internal helper function. Users should
307 * always call kfree_skb
310 void __kfree_skb(struct sk_buff *skb)
313 printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
314 "on a list (from %p).\n", NET_CALLER(skb));
318 dst_release(skb->dst);
319 if(skb->destructor) {
321 printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n",
324 skb->destructor(skb);
326 #ifdef CONFIG_NETFILTER
327 nf_conntrack_put(skb->nfct);
329 skb_headerinit(skb, NULL, 0); /* clean state */
334 * skb_clone - duplicate an sk_buff
335 * @skb: buffer to clone
336 * @gfp_mask: allocation priority
338 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
339 * copies share the same packet data but not structure. The new
340 * buffer has a reference count of 1. If the allocation fails the
341 * function returns %NULL otherwise the new buffer is returned.
343 * If this function is called from an interrupt gfp_mask() must be
347 struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
351 n = skb_head_from_pool();
353 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
358 #define C(x) n->x = skb->x
360 n->next = n->prev = NULL;
370 memcpy(n->cb, skb->cb, sizeof(skb->cb));
378 atomic_set(&n->users, 1);
386 n->destructor = NULL;
387 #ifdef CONFIG_NETFILTER
391 #ifdef CONFIG_NETFILTER_DEBUG
394 #endif /*CONFIG_NETFILTER*/
395 #if defined(CONFIG_HIPPI)
398 #ifdef CONFIG_NET_SCHED
402 atomic_inc(&(skb_shinfo(skb)->dataref));
404 #ifdef CONFIG_NETFILTER
405 nf_conntrack_get(skb->nfct);
410 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
413 * Shift between the two data areas in bytes
415 unsigned long offset = new->data - old->data;
420 new->priority=old->priority;
421 new->protocol=old->protocol;
422 new->dst=dst_clone(old->dst);
423 new->h.raw=old->h.raw+offset;
424 new->nh.raw=old->nh.raw+offset;
425 new->mac.raw=old->mac.raw+offset;
426 memcpy(new->cb, old->cb, sizeof(old->cb));
427 atomic_set(&new->users, 1);
428 new->pkt_type=old->pkt_type;
429 new->stamp=old->stamp;
430 new->destructor = NULL;
431 new->security=old->security;
432 #ifdef CONFIG_NETFILTER
433 new->nfmark=old->nfmark;
434 new->nfcache=old->nfcache;
436 nf_conntrack_get(new->nfct);
437 #ifdef CONFIG_NETFILTER_DEBUG
438 new->nf_debug=old->nf_debug;
441 #ifdef CONFIG_NET_SCHED
442 new->tc_index = old->tc_index;
447 * skb_copy - create private copy of an sk_buff
448 * @skb: buffer to copy
449 * @gfp_mask: allocation priority
451 * Make a copy of both an &sk_buff and its data. This is used when the
452 * caller wishes to modify the data and needs a private copy of the
453 * data to alter. Returns %NULL on failure or the pointer to the buffer
454 * on success. The returned buffer has a reference count of 1.
456 * As by-product this function converts non-linear &sk_buff to linear
457 * one, so that &sk_buff becomes completely private and caller is allowed
458 * to modify all the data of returned buffer. This means that this
459 * function is not recommended for use in circumstances when only
460 * header is going to be modified. Use pskb_copy() instead.
463 struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
466 int headerlen = skb->data-skb->head;
469 * Allocate the copy buffer
471 n=alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
475 /* Set the data pointer */
476 skb_reserve(n,headerlen);
477 /* Set the tail pointer and length */
480 n->ip_summed = skb->ip_summed;
482 if (skb_copy_bits(skb, -headerlen, n->head, headerlen+skb->len))
485 copy_skb_header(n, skb);
490 /* Keep head the same: replace data */
491 int skb_linearize(struct sk_buff *skb, int gfp_mask)
496 int headerlen = skb->data - skb->head;
497 int expand = (skb->tail+skb->data_len) - skb->end;
505 size = (skb->end - skb->head + expand);
506 size = SKB_DATA_ALIGN(size);
507 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
511 /* Copy entire thing */
512 if (skb_copy_bits(skb, -headerlen, data, headerlen+skb->len))
515 /* Offset between the two in bytes */
516 offset = data - skb->head;
519 skb_release_data(skb);
522 skb->end = data + size;
524 /* Set up new pointers */
525 skb->h.raw += offset;
526 skb->nh.raw += offset;
527 skb->mac.raw += offset;
532 atomic_set(&(skb_shinfo(skb)->dataref), 1);
533 skb_shinfo(skb)->nr_frags = 0;
534 skb_shinfo(skb)->frag_list = NULL;
536 /* We are no longer a clone, even if we were. */
539 skb->tail += skb->data_len;
546 * pskb_copy - create copy of an sk_buff with private head.
547 * @skb: buffer to copy
548 * @gfp_mask: allocation priority
550 * Make a copy of both an &sk_buff and part of its data, located
551 * in header. Fragmented data remain shared. This is used when
552 * the caller wishes to modify only header of &sk_buff and needs
553 * private copy of the header to alter. Returns %NULL on failure
554 * or the pointer to the buffer on success.
555 * The returned buffer has a reference count of 1.
558 struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
563 * Allocate the copy buffer
565 n=alloc_skb(skb->end - skb->head, gfp_mask);
569 /* Set the data pointer */
570 skb_reserve(n,skb->data-skb->head);
571 /* Set the tail pointer and length */
572 skb_put(n,skb_headlen(skb));
574 memcpy(n->data, skb->data, n->len);
576 n->ip_summed = skb->ip_summed;
578 n->data_len = skb->data_len;
581 if (skb_shinfo(skb)->nr_frags) {
584 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
585 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
586 get_page(skb_shinfo(n)->frags[i].page);
588 skb_shinfo(n)->nr_frags = i;
591 if (skb_shinfo(skb)->frag_list) {
592 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
593 skb_clone_fraglist(n);
596 copy_skb_header(n, skb);
602 * pskb_expand_head - reallocate header of &sk_buff
603 * @skb: buffer to reallocate
604 * @nhead: room to add at head
605 * @ntail: room to add at tail
606 * @gfp_mask: allocation priority
608 * Expands (or creates identical copy, if &nhead and &ntail are zero)
609 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have
610 * reference count of 1. Returns zero in the case of success or error,
611 * if expansion failed. In the last case, &sk_buff is not changed.
613 * All the pointers pointing into skb header may change and must be
614 * reloaded after call to this function.
617 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
621 int size = nhead + (skb->end - skb->head) + ntail;
627 size = SKB_DATA_ALIGN(size);
629 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
633 /* Copy only real data... and, alas, header. This should be
634 * optimized for the cases when header is void. */
635 memcpy(data+nhead, skb->head, skb->tail-skb->head);
636 memcpy(data+size, skb->end, sizeof(struct skb_shared_info));
638 for (i=0; i<skb_shinfo(skb)->nr_frags; i++)
639 get_page(skb_shinfo(skb)->frags[i].page);
641 if (skb_shinfo(skb)->frag_list)
642 skb_clone_fraglist(skb);
644 skb_release_data(skb);
646 off = (data+nhead) - skb->head;
649 skb->end = data+size;
657 atomic_set(&skb_shinfo(skb)->dataref, 1);
664 /* Make private copy of skb with writable head and some headroom */
667 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
669 struct sk_buff *skb2;
670 int delta = headroom - skb_headroom(skb);
673 return pskb_copy(skb, GFP_ATOMIC);
675 skb2 = skb_clone(skb, GFP_ATOMIC);
677 !pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
686 * skb_copy_expand - copy and expand sk_buff
687 * @skb: buffer to copy
688 * @newheadroom: new free bytes at head
689 * @newtailroom: new free bytes at tail
690 * @gfp_mask: allocation priority
692 * Make a copy of both an &sk_buff and its data and while doing so
693 * allocate additional space.
695 * This is used when the caller wishes to modify the data and needs a
696 * private copy of the data to alter as well as more space for new fields.
697 * Returns %NULL on failure or the pointer to the buffer
698 * on success. The returned buffer has a reference count of 1.
700 * You must pass %GFP_ATOMIC as the allocation priority if this function
701 * is called from an interrupt.
705 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
713 * Allocate the copy buffer
716 n=alloc_skb(newheadroom + skb->len + newtailroom,
721 skb_reserve(n,newheadroom);
723 /* Set the tail pointer and length */
726 /* Copy the data only. */
727 if (skb_copy_bits(skb, 0, n->data, skb->len))
730 copy_skb_header(n, skb);
734 /* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
735 * If realloc==0 and trimming is impossible without change of data,
739 int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
741 int offset = skb_headlen(skb);
742 int nfrags = skb_shinfo(skb)->nr_frags;
745 for (i=0; i<nfrags; i++) {
746 int end = offset + skb_shinfo(skb)->frags[i].size;
748 if (skb_cloned(skb)) {
751 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
755 put_page(skb_shinfo(skb)->frags[i].page);
756 skb_shinfo(skb)->nr_frags--;
758 skb_shinfo(skb)->frags[i].size = len-offset;
765 skb->data_len -= skb->len - len;
768 if (len <= skb_headlen(skb)) {
771 skb->tail = skb->data + len;
772 if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
773 skb_drop_fraglist(skb);
775 skb->data_len -= skb->len - len;
784 * __pskb_pull_tail - advance tail of skb header
785 * @skb: buffer to reallocate
786 * @delta: number of bytes to advance tail
788 * The function makes a sense only on a fragmented &sk_buff,
789 * it expands header moving its tail forward and copying necessary
790 * data from fragmented part.
792 * &sk_buff MUST have reference count of 1.
794 * Returns %NULL (and &sk_buff does not change) if pull failed
795 * or value of new tail of skb in the case of success.
797 * All the pointers pointing into skb header may change and must be
798 * reloaded after call to this function.
801 /* Moves tail of skb head forward, copying data from fragmented part,
802 * when it is necessary.
803 * 1. It may fail due to malloc failure.
804 * 2. It may change skb pointers.
806 * It is pretty complicated. Luckily, it is called only in exceptional cases.
808 unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
812 /* If skb has not enough free space at tail, get new one
813 * plus 128 bytes for future expansions. If we have enough
814 * room at tail, reallocate without expansion only if skb is cloned.
816 eat = (skb->tail+delta) - skb->end;
818 if (eat > 0 || skb_cloned(skb)) {
819 if (pskb_expand_head(skb, 0, eat>0 ? eat+128 : 0, GFP_ATOMIC))
823 if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
826 /* Optimization: no fragments, no reasons to preestimate
827 * size of pulled pages. Superb.
829 if (skb_shinfo(skb)->frag_list == NULL)
832 /* Estimate size of pulled pages. */
834 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
835 if (skb_shinfo(skb)->frags[i].size >= eat)
837 eat -= skb_shinfo(skb)->frags[i].size;
840 /* If we need update frag list, we are in troubles.
841 * Certainly, it possible to add an offset to skb data,
842 * but taking into account that pulling is expected to
843 * be very rare operation, it is worth to fight against
844 * further bloating skb head and crucify ourselves here instead.
845 * Pure masohism, indeed. 8)8)
848 struct sk_buff *list = skb_shinfo(skb)->frag_list;
849 struct sk_buff *clone = NULL;
850 struct sk_buff *insp = NULL;
856 if (list->len <= eat) {
857 /* Eaten as whole. */
862 /* Eaten partially. */
864 if (skb_shared(list)) {
865 /* Sucks! We need to fork list. :-( */
866 clone = skb_clone(list, GFP_ATOMIC);
872 /* This may be pulled without
876 if (pskb_pull(list, eat) == NULL) {
885 /* Free pulled out fragments. */
886 while ((list = skb_shinfo(skb)->frag_list) != insp) {
887 skb_shinfo(skb)->frag_list = list->next;
890 /* And insert new clone at head. */
893 skb_shinfo(skb)->frag_list = clone;
896 /* Success! Now we may commit changes to skb data. */
901 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
902 if (skb_shinfo(skb)->frags[i].size <= eat) {
903 put_page(skb_shinfo(skb)->frags[i].page);
904 eat -= skb_shinfo(skb)->frags[i].size;
906 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
908 skb_shinfo(skb)->frags[k].page_offset += eat;
909 skb_shinfo(skb)->frags[k].size -= eat;
915 skb_shinfo(skb)->nr_frags = k;
918 skb->data_len -= delta;
923 /* Copy some data bits from skb to kernel buffer. */
925 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
928 int start = skb->len - skb->data_len;
930 if (offset > (int)skb->len-len)
934 if ((copy = start-offset) > 0) {
937 memcpy(to, skb->data + offset, copy);
938 if ((len -= copy) == 0)
944 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
947 BUG_TRAP(start <= offset+len);
949 end = start + skb_shinfo(skb)->frags[i].size;
950 if ((copy = end-offset) > 0) {
956 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
957 memcpy(to, vaddr+skb_shinfo(skb)->frags[i].page_offset+
959 kunmap_skb_frag(vaddr);
961 if ((len -= copy) == 0)
969 if (skb_shinfo(skb)->frag_list) {
970 struct sk_buff *list;
972 for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
975 BUG_TRAP(start <= offset+len);
977 end = start + list->len;
978 if ((copy = end-offset) > 0) {
981 if (skb_copy_bits(list, offset-start, to, copy))
983 if ((len -= copy) == 0)
998 /* Checksum skb data. */
1000 unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum)
1003 int start = skb->len - skb->data_len;
1006 /* Checksum header. */
1007 if ((copy = start-offset) > 0) {
1010 csum = csum_partial(skb->data+offset, copy, csum);
1011 if ((len -= copy) == 0)
1017 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
1020 BUG_TRAP(start <= offset+len);
1022 end = start + skb_shinfo(skb)->frags[i].size;
1023 if ((copy = end-offset) > 0) {
1026 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1030 vaddr = kmap_skb_frag(frag);
1031 csum2 = csum_partial(vaddr + frag->page_offset +
1032 offset-start, copy, 0);
1033 kunmap_skb_frag(vaddr);
1034 csum = csum_block_add(csum, csum2, pos);
1043 if (skb_shinfo(skb)->frag_list) {
1044 struct sk_buff *list;
1046 for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
1049 BUG_TRAP(start <= offset+len);
1051 end = start + list->len;
1052 if ((copy = end-offset) > 0) {
1056 csum2 = skb_checksum(list, offset-start, copy, 0);
1057 csum = csum_block_add(csum, csum2, pos);
1058 if ((len -= copy) == 0)
1073 /* Both of above in one bottle. */
1075 unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum)
1078 int start = skb->len - skb->data_len;
1082 if ((copy = start-offset) > 0) {
1085 csum = csum_partial_copy_nocheck(skb->data+offset, to, copy, csum);
1086 if ((len -= copy) == 0)
1093 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
1096 BUG_TRAP(start <= offset+len);
1098 end = start + skb_shinfo(skb)->frags[i].size;
1099 if ((copy = end-offset) > 0) {
1102 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1106 vaddr = kmap_skb_frag(frag);
1107 csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset +
1108 offset-start, to, copy, 0);
1109 kunmap_skb_frag(vaddr);
1110 csum = csum_block_add(csum, csum2, pos);
1120 if (skb_shinfo(skb)->frag_list) {
1121 struct sk_buff *list;
1123 for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
1127 BUG_TRAP(start <= offset+len);
1129 end = start + list->len;
1130 if ((copy = end-offset) > 0) {
1133 csum2 = skb_copy_and_csum_bits(list, offset-start, to, copy, 0);
1134 csum = csum_block_add(csum, csum2, pos);
1135 if ((len -= copy) == 0)
1151 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1156 if (skb->ip_summed == CHECKSUM_HW)
1157 csstart = skb->h.raw - skb->data;
1159 csstart = skb->len - skb->data_len;
1161 if (csstart > skb->len - skb->data_len)
1164 memcpy(to, skb->data, csstart);
1167 if (csstart != skb->len)
1168 csum = skb_copy_and_csum_bits(skb, csstart, to+csstart,
1169 skb->len-csstart, 0);
1171 if (skb->ip_summed == CHECKSUM_HW) {
1172 long csstuff = csstart + skb->csum;
1174 *((unsigned short *)(to + csstuff)) = csum_fold(csum);
1180 * Tune the memory allocator for a new MTU size.
1182 void skb_add_mtu(int mtu)
1184 /* Must match allocation in alloc_skb */
1185 mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
1187 kmem_add_cache_size(mtu);
1191 void __init skb_init(void)
1195 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
1196 sizeof(struct sk_buff),
1199 skb_headerinit, NULL);
1200 if (!skbuff_head_cache)
1201 panic("cannot create skbuff cache");
1203 for (i=0; i<NR_CPUS; i++)
1204 skb_queue_head_init(&skb_head_pool[i].list);