2 * Definitions for the 'struct sk_buff' memory handlers.
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
17 #include <linux/config.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/time.h>
21 #include <linux/cache.h>
23 #include <asm/atomic.h>
24 #include <asm/types.h>
25 #include <linux/spinlock.h>
27 #include <linux/highmem.h>
29 #define HAVE_ALLOC_SKB /* For the drivers to know */
30 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
31 #define SLAB_SKB /* Slabified skbuffs */
33 #define CHECKSUM_NONE 0
35 #define CHECKSUM_UNNECESSARY 2
37 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
38 #define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
39 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0))
40 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2))
42 /* A. Checksumming of received packets by device.
44 * NONE: device failed to checksum this packet.
45 * skb->csum is undefined.
47 * UNNECESSARY: device parsed packet and wouldbe verified checksum.
48 * skb->csum is undefined.
49 * It is bad option, but, unfortunately, many of vendors do this.
50 * Apparently with secret goal to sell you new device, when you
51 * will add new protocol to your host. F.e. IPv6. 8)
53 * HW: the most generic way. Device supplied checksum of _all_
54 * the packet as seen by netif_rx in skb->csum.
55 * NOTE: Even if device supports only some protocols, but
56 * is able to produce some skb->csum, it MUST use HW,
59 * B. Checksumming on output.
61 * NONE: skb is checksummed by protocol or csum is not required.
63 * HW: device is required to csum packet as seen by hard_start_xmit
64 * from skb->h.raw to the end and to record the checksum
65 * at skb->h.raw+skb->csum.
67 * Device must show its capabilities in dev->features, set
68 * at device setup time.
69 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
71 * NETIF_F_NO_CSUM - loopback or reliable single hop media.
72 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
73 * TCP/UDP over IPv4. Sigh. Vendors like this
74 * way by an unknown reason. Though, see comment above
75 * about CHECKSUM_UNNECESSARY. 8)
77 * Any questions? No questions, good. --ANK
81 #define NET_CALLER(arg) (*(((void**)&arg)-1))
83 #define NET_CALLER(arg) __builtin_return_address(0)
86 #ifdef CONFIG_NETFILTER
89 void (*destroy)(struct nf_conntrack *);
93 struct nf_conntrack *master;
98 /* These two members must be first. */
99 struct sk_buff * next;
100 struct sk_buff * prev;
108 #define MAX_SKB_FRAGS 6
110 typedef struct skb_frag_struct skb_frag_t;
112 struct skb_frag_struct
119 /* This data is invariant across clones and lives at
120 * the end of the header data, ie. at skb->end.
122 struct skb_shared_info {
124 unsigned int nr_frags;
125 struct sk_buff *frag_list;
126 skb_frag_t frags[MAX_SKB_FRAGS];
130 /* These two members must be first. */
131 struct sk_buff * next; /* Next buffer in list */
132 struct sk_buff * prev; /* Previous buffer in list */
134 struct sk_buff_head * list; /* List we are on */
135 struct sock *sk; /* Socket we are owned by */
136 struct timeval stamp; /* Time we arrived */
137 struct net_device *dev; /* Device we arrived on/are leaving by */
139 /* Transport layer header */
144 struct icmphdr *icmph;
145 struct igmphdr *igmph;
151 /* Network layer header */
155 struct ipv6hdr *ipv6h;
161 /* Link layer header */
164 struct ethhdr *ethernet;
168 struct dst_entry *dst;
171 * This is the control buffer. It is free to use for every
172 * layer. Please put your private variables there. If you
173 * want to keep them across layers you have to do a skb_clone()
174 * first. This is owned by whoever has the skb queued ATM.
178 unsigned int len; /* Length of actual data */
179 unsigned int data_len;
180 unsigned int csum; /* Checksum */
181 unsigned char __unused, /* Dead field, may be reused */
182 cloned, /* head may be cloned (check refcnt to be sure). */
183 pkt_type, /* Packet class */
184 ip_summed; /* Driver fed us an IP checksum */
185 __u32 priority; /* Packet queueing priority */
186 atomic_t users; /* User count - see datagram.c,tcp.c */
187 unsigned short protocol; /* Packet protocol from driver. */
188 unsigned short security; /* Security level of packet */
189 unsigned int truesize; /* Buffer size */
191 unsigned char *head; /* Head of buffer */
192 unsigned char *data; /* Data head pointer */
193 unsigned char *tail; /* Tail pointer */
194 unsigned char *end; /* End pointer */
196 void (*destructor)(struct sk_buff *); /* Destruct function */
197 #ifdef CONFIG_NETFILTER
198 /* Can be used for communication between hooks. */
199 unsigned long nfmark;
202 /* Associated connection, if any */
203 struct nf_ct_info *nfct;
204 #ifdef CONFIG_NETFILTER_DEBUG
205 unsigned int nf_debug;
207 #endif /*CONFIG_NETFILTER*/
209 #if defined(CONFIG_HIPPI)
215 #ifdef CONFIG_NET_SCHED
216 __u32 tc_index; /* traffic control index */
217 struct timeval vsTime; /* Virtual start time of this packet */
218 struct timeval vfTime; /* Virtual finish time of this packet */
222 #define SK_WMEM_MAX 65535
223 #define SK_RMEM_MAX 65535
227 * Handling routines are only of interest to the kernel
229 #include <linux/slab.h>
231 #include <asm/system.h>
233 extern void __kfree_skb(struct sk_buff *skb);
234 extern struct sk_buff * alloc_skb(unsigned int size, int priority);
235 extern void kfree_skbmem(struct sk_buff *skb);
236 extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
237 extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority);
238 extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask);
239 extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
240 extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
241 extern struct sk_buff * skb_copy_expand(const struct sk_buff *skb,
245 #define dev_kfree_skb(a) kfree_skb(a)
246 extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
247 extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
250 #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
253 * skb_queue_empty - check if a queue is empty
256 * Returns true if the queue is empty, false otherwise.
259 static inline int skb_queue_empty(struct sk_buff_head *list)
261 return (list->next == (struct sk_buff *) list);
265 * skb_get - reference buffer
266 * @skb: buffer to reference
268 * Makes another reference to a socket buffer and returns a pointer
272 static inline struct sk_buff *skb_get(struct sk_buff *skb)
274 atomic_inc(&skb->users);
279 * If users==1, we are the only owner and are can avoid redundant
284 * kfree_skb - free an sk_buff
285 * @skb: buffer to free
287 * Drop a reference to the buffer and free it if the usage count has
291 static inline void kfree_skb(struct sk_buff *skb)
293 if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
297 /* Use this if you didn't touch the skb state [for fast switching] */
298 static inline void kfree_skb_fast(struct sk_buff *skb)
300 if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
305 * skb_cloned - is the buffer a clone
306 * @skb: buffer to check
308 * Returns true if the buffer was generated with skb_clone() and is
309 * one of multiple shared copies of the buffer. Cloned buffers are
310 * shared data so must not be written to under normal circumstances.
313 static inline int skb_cloned(struct sk_buff *skb)
315 return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
319 * skb_shared - is the buffer shared
320 * @skb: buffer to check
322 * Returns true if more than one person has a reference to this
326 static inline int skb_shared(struct sk_buff *skb)
328 return (atomic_read(&skb->users) != 1);
332 * skb_share_check - check if buffer is shared and if so clone it
333 * @skb: buffer to check
334 * @pri: priority for memory allocation
336 * If the buffer is shared the buffer is cloned and the old copy
337 * drops a reference. A new clone with a single reference is returned.
338 * If the buffer is not shared the original buffer is returned. When
339 * being called from interrupt status or with spinlocks held pri must
342 * NULL is returned on a memory allocation failure.
345 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
347 if (skb_shared(skb)) {
348 struct sk_buff *nskb;
349 nskb = skb_clone(skb, pri);
358 * Copy shared buffers into a new sk_buff. We effectively do COW on
359 * packets to handle cases where we have a local reader and forward
360 * and a couple of other messy ones. The normal one is tcpdumping
361 * a packet thats being forwarded.
365 * skb_unshare - make a copy of a shared buffer
366 * @skb: buffer to check
367 * @pri: priority for memory allocation
369 * If the socket buffer is a clone then this function creates a new
370 * copy of the data, drops a reference count on the old copy and returns
371 * the new copy with the reference count at 1. If the buffer is not a clone
372 * the original buffer is returned. When called with a spinlock held or
373 * from interrupt state @pri must be %GFP_ATOMIC
375 * %NULL is returned on a memory allocation failure.
378 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
380 struct sk_buff *nskb;
383 nskb=skb_copy(skb, pri);
384 kfree_skb(skb); /* Free our shared copy */
390 * @list_: list to peek at
392 * Peek an &sk_buff. Unlike most other operations you _MUST_
393 * be careful with this one. A peek leaves the buffer on the
394 * list and someone else may run off with it. You must hold
395 * the appropriate locks or have a private queue to do this.
397 * Returns %NULL for an empty list or a pointer to the head element.
398 * The reference count is not incremented and the reference is therefore
399 * volatile. Use with caution.
402 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
404 struct sk_buff *list = ((struct sk_buff *)list_)->next;
405 if (list == (struct sk_buff *)list_)
412 * @list_: list to peek at
414 * Peek an &sk_buff. Unlike most other operations you _MUST_
415 * be careful with this one. A peek leaves the buffer on the
416 * list and someone else may run off with it. You must hold
417 * the appropriate locks or have a private queue to do this.
419 * Returns %NULL for an empty list or a pointer to the tail element.
420 * The reference count is not incremented and the reference is therefore
421 * volatile. Use with caution.
424 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
426 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
427 if (list == (struct sk_buff *)list_)
433 * skb_queue_len - get queue length
434 * @list_: list to measure
436 * Return the length of an &sk_buff queue.
439 static inline __u32 skb_queue_len(struct sk_buff_head *list_)
444 static inline void skb_queue_head_init(struct sk_buff_head *list)
446 spin_lock_init(&list->lock);
447 list->prev = (struct sk_buff *)list;
448 list->next = (struct sk_buff *)list;
453 * Insert an sk_buff at the start of a list.
455 * The "__skb_xxxx()" functions are the non-atomic ones that
456 * can only be called with interrupts disabled.
460 * __skb_queue_head - queue a buffer at the list head
462 * @newsk: buffer to queue
464 * Queue a buffer at the start of a list. This function takes no locks
465 * and you must therefore hold required locks before calling it.
467 * A buffer cannot be placed on two lists at the same time.
470 static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
472 struct sk_buff *prev, *next;
476 prev = (struct sk_buff *)list;
486 * skb_queue_head - queue a buffer at the list head
488 * @newsk: buffer to queue
490 * Queue a buffer at the start of the list. This function takes the
491 * list lock and can be used safely with other locking &sk_buff functions
494 * A buffer cannot be placed on two lists at the same time.
497 static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
501 spin_lock_irqsave(&list->lock, flags);
502 __skb_queue_head(list, newsk);
503 spin_unlock_irqrestore(&list->lock, flags);
507 * __skb_queue_tail - queue a buffer at the list tail
509 * @newsk: buffer to queue
511 * Queue a buffer at the end of a list. This function takes no locks
512 * and you must therefore hold required locks before calling it.
514 * A buffer cannot be placed on two lists at the same time.
518 static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
520 struct sk_buff *prev, *next;
524 next = (struct sk_buff *)list;
533 * skb_queue_tail - queue a buffer at the list tail
535 * @newsk: buffer to queue
537 * Queue a buffer at the tail of the list. This function takes the
538 * list lock and can be used safely with other locking &sk_buff functions
541 * A buffer cannot be placed on two lists at the same time.
544 static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
548 spin_lock_irqsave(&list->lock, flags);
549 __skb_queue_tail(list, newsk);
550 spin_unlock_irqrestore(&list->lock, flags);
554 * __skb_dequeue - remove from the head of the queue
555 * @list: list to dequeue from
557 * Remove the head of the list. This function does not take any locks
558 * so must be used with appropriate locks held only. The head item is
559 * returned or %NULL if the list is empty.
562 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
564 struct sk_buff *next, *prev, *result;
566 prev = (struct sk_buff *) list;
583 * skb_dequeue - remove from the head of the queue
584 * @list: list to dequeue from
586 * Remove the head of the list. The list lock is taken so the function
587 * may be used safely with other locking list functions. The head item is
588 * returned or %NULL if the list is empty.
591 static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
594 struct sk_buff *result;
596 spin_lock_irqsave(&list->lock, flags);
597 result = __skb_dequeue(list);
598 spin_unlock_irqrestore(&list->lock, flags);
603 * Insert a packet on a list.
606 static inline void __skb_insert(struct sk_buff *newsk,
607 struct sk_buff * prev, struct sk_buff *next,
608 struct sk_buff_head * list)
619 * skb_insert - insert a buffer
620 * @old: buffer to insert before
621 * @newsk: buffer to insert
623 * Place a packet before a given packet in a list. The list locks are taken
624 * and this function is atomic with respect to other list locked calls
625 * A buffer cannot be placed on two lists at the same time.
628 static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
632 spin_lock_irqsave(&old->list->lock, flags);
633 __skb_insert(newsk, old->prev, old, old->list);
634 spin_unlock_irqrestore(&old->list->lock, flags);
638 * Place a packet after a given packet in a list.
641 static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
643 __skb_insert(newsk, old, old->next, old->list);
647 * skb_append - append a buffer
648 * @old: buffer to insert after
649 * @newsk: buffer to insert
651 * Place a packet after a given packet in a list. The list locks are taken
652 * and this function is atomic with respect to other list locked calls.
653 * A buffer cannot be placed on two lists at the same time.
657 static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
661 spin_lock_irqsave(&old->list->lock, flags);
662 __skb_append(old, newsk);
663 spin_unlock_irqrestore(&old->list->lock, flags);
667 * remove sk_buff from list. _Must_ be called atomically, and with
671 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
673 struct sk_buff * next, * prev;
686 * skb_unlink - remove a buffer from a list
687 * @skb: buffer to remove
689 * Place a packet after a given packet in a list. The list locks are taken
690 * and this function is atomic with respect to other list locked calls
692 * Works even without knowing the list it is sitting on, which can be
693 * handy at times. It also means that THE LIST MUST EXIST when you
694 * unlink. Thus a list must have its contents unlinked before it is
698 static inline void skb_unlink(struct sk_buff *skb)
700 struct sk_buff_head *list = skb->list;
705 spin_lock_irqsave(&list->lock, flags);
706 if(skb->list == list)
707 __skb_unlink(skb, skb->list);
708 spin_unlock_irqrestore(&list->lock, flags);
712 /* XXX: more streamlined implementation */
715 * __skb_dequeue_tail - remove from the tail of the queue
716 * @list: list to dequeue from
718 * Remove the tail of the list. This function does not take any locks
719 * so must be used with appropriate locks held only. The tail item is
720 * returned or %NULL if the list is empty.
723 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
725 struct sk_buff *skb = skb_peek_tail(list);
727 __skb_unlink(skb, list);
732 * skb_dequeue - remove from the head of the queue
733 * @list: list to dequeue from
735 * Remove the head of the list. The list lock is taken so the function
736 * may be used safely with other locking list functions. The tail item is
737 * returned or %NULL if the list is empty.
740 static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
743 struct sk_buff *result;
745 spin_lock_irqsave(&list->lock, flags);
746 result = __skb_dequeue_tail(list);
747 spin_unlock_irqrestore(&list->lock, flags);
751 static inline int skb_is_nonlinear(const struct sk_buff *skb)
753 return skb->data_len;
756 static inline int skb_headlen(const struct sk_buff *skb)
758 return skb->len - skb->data_len;
761 #define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) out_of_line_bug(); } while (0)
762 #define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) out_of_line_bug(); } while (0)
763 #define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) out_of_line_bug(); } while (0)
766 * Add data to an sk_buff
769 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
771 unsigned char *tmp=skb->tail;
772 SKB_LINEAR_ASSERT(skb);
779 * skb_put - add data to a buffer
780 * @skb: buffer to use
781 * @len: amount of data to add
783 * This function extends the used data area of the buffer. If this would
784 * exceed the total buffer size the kernel will panic. A pointer to the
785 * first byte of the extra data is returned.
788 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
790 unsigned char *tmp=skb->tail;
791 SKB_LINEAR_ASSERT(skb);
794 if(skb->tail>skb->end) {
795 skb_over_panic(skb, len, current_text_addr());
800 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
808 * skb_push - add data to the start of a buffer
809 * @skb: buffer to use
810 * @len: amount of data to add
812 * This function extends the used data area of the buffer at the buffer
813 * start. If this would exceed the total buffer headroom the kernel will
814 * panic. A pointer to the first byte of the extra data is returned.
817 static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
821 if(skb->data<skb->head) {
822 skb_under_panic(skb, len, current_text_addr());
827 static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
830 if (skb->len < skb->data_len)
832 return skb->data+=len;
836 * skb_pull - remove data from the start of a buffer
837 * @skb: buffer to use
838 * @len: amount of data to remove
840 * This function removes data from the start of a buffer, returning
841 * the memory to the headroom. A pointer to the next data in the buffer
842 * is returned. Once the data has been pulled future pushes will overwrite
846 static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
850 return __skb_pull(skb,len);
853 extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
855 static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
857 if (len > skb_headlen(skb) &&
858 __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
861 return skb->data += len;
864 static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
868 return __pskb_pull(skb,len);
871 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
873 if (len <= skb_headlen(skb))
877 return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
881 * skb_headroom - bytes at buffer head
882 * @skb: buffer to check
884 * Return the number of bytes of free space at the head of an &sk_buff.
887 static inline int skb_headroom(const struct sk_buff *skb)
889 return skb->data-skb->head;
893 * skb_tailroom - bytes at buffer end
894 * @skb: buffer to check
896 * Return the number of bytes of free space at the tail of an sk_buff
899 static inline int skb_tailroom(const struct sk_buff *skb)
901 return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
905 * skb_reserve - adjust headroom
906 * @skb: buffer to alter
907 * @len: bytes to move
909 * Increase the headroom of an empty &sk_buff by reducing the tail
910 * room. This is only allowed for an empty buffer.
913 static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
919 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
921 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
923 if (!skb->data_len) {
925 skb->tail = skb->data+len;
927 ___pskb_trim(skb, len, 0);
932 * skb_trim - remove end from a buffer
933 * @skb: buffer to alter
936 * Cut the length of a buffer down by removing data from the tail. If
937 * the buffer is already under the length specified it is not modified.
940 static inline void skb_trim(struct sk_buff *skb, unsigned int len)
942 if (skb->len > len) {
943 __skb_trim(skb, len);
948 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
950 if (!skb->data_len) {
952 skb->tail = skb->data+len;
955 return ___pskb_trim(skb, len, 1);
959 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
962 return __pskb_trim(skb, len);
967 * skb_orphan - orphan a buffer
968 * @skb: buffer to orphan
970 * If a buffer currently has an owner then we call the owner's
971 * destructor function and make the @skb unowned. The buffer continues
972 * to exist but is no longer charged to its former owner.
976 static inline void skb_orphan(struct sk_buff *skb)
979 skb->destructor(skb);
980 skb->destructor = NULL;
985 * skb_purge - empty a list
986 * @list: list to empty
988 * Delete all buffers on an &sk_buff list. Each buffer is removed from
989 * the list and one reference dropped. This function takes the list
990 * lock and is atomic with respect to other list locking functions.
994 static inline void skb_queue_purge(struct sk_buff_head *list)
997 while ((skb=skb_dequeue(list))!=NULL)
1002 * __skb_purge - empty a list
1003 * @list: list to empty
1005 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1006 * the list and one reference dropped. This function does not take the
1007 * list lock and the caller must hold the relevant locks to use it.
1011 static inline void __skb_queue_purge(struct sk_buff_head *list)
1013 struct sk_buff *skb;
1014 while ((skb=__skb_dequeue(list))!=NULL)
1019 * __dev_alloc_skb - allocate an skbuff for sending
1020 * @length: length to allocate
1021 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1023 * Allocate a new &sk_buff and assign it a usage count of one. The
1024 * buffer has unspecified headroom built in. Users should allocate
1025 * the headroom they think they need without accounting for the
1026 * built in space. The built in space is used for optimisations.
1028 * %NULL is returned in there is no free memory.
1031 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1034 struct sk_buff *skb;
1036 skb = alloc_skb(length+16, gfp_mask);
1038 skb_reserve(skb,16);
1043 * dev_alloc_skb - allocate an skbuff for sending
1044 * @length: length to allocate
1046 * Allocate a new &sk_buff and assign it a usage count of one. The
1047 * buffer has unspecified headroom built in. Users should allocate
1048 * the headroom they think they need without accounting for the
1049 * built in space. The built in space is used for optimisations.
1051 * %NULL is returned in there is no free memory. Although this function
1052 * allocates memory it can be called from an interrupt.
1055 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1057 return __dev_alloc_skb(length, GFP_ATOMIC);
1061 * skb_cow - copy header of skb when it is required
1062 * @skb: buffer to cow
1063 * @headroom: needed headroom
1065 * If the skb passed lacks sufficient headroom or its data part
1066 * is shared, data is reallocated. If reallocation fails, an error
1067 * is returned and original skb is not changed.
1069 * The result is skb with writable area skb->head...skb->tail
1070 * and at least @headroom of space at head.
1074 skb_cow(struct sk_buff *skb, unsigned int headroom)
1076 int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
1081 if (delta || skb_cloned(skb))
1082 return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
1087 * skb_linearize - convert paged skb to linear one
1088 * @skb: buffer to linarize
1089 * @gfp: allocation mode
1091 * If there is no free memory -ENOMEM is returned, otherwise zero
1092 * is returned and the old skb data released. */
1093 int skb_linearize(struct sk_buff *skb, int gfp);
1095 static inline void *kmap_skb_frag(const skb_frag_t *frag)
1097 #ifdef CONFIG_HIGHMEM
1103 return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
1106 static inline void kunmap_skb_frag(void *vaddr)
1108 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1109 #ifdef CONFIG_HIGHMEM
1114 #define skb_queue_walk(queue, skb) \
1115 for (skb = (queue)->next; \
1116 (skb != (struct sk_buff *)(queue)); \
1120 extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
1121 extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
1122 extern int skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size);
1123 extern int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size);
1124 extern int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump);
1125 extern int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov);
1126 extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
1128 extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);
1129 extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
1130 extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);
1131 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1133 extern void skb_init(void);
1134 extern void skb_add_mtu(int mtu);
1136 #ifdef CONFIG_NETFILTER
1138 nf_conntrack_put(struct nf_ct_info *nfct)
1140 if (nfct && atomic_dec_and_test(&nfct->master->use))
1141 nfct->master->destroy(nfct->master);
1144 nf_conntrack_get(struct nf_ct_info *nfct)
1147 atomic_inc(&nfct->master->use);
1151 #endif /* __KERNEL__ */
1152 #endif /* _LINUX_SKBUFF_H */