X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=include%2Flinux%2Fskbuff.h;h=85577a4ffa618d5e319487ab396648a485c6223c;hb=eb878e84575fbce21d2edb079eada78bfa27023d;hp=57d7d4965f9aa0fc13a1cd10c06501d90c67e844;hpb=026477c1141b67e98e3bd8bdedb7d4b88a3ecd09;p=powerpc.git diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 57d7d4965f..85577a4ffa 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -34,8 +34,9 @@ #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ #define CHECKSUM_NONE 0 -#define CHECKSUM_HW 1 +#define CHECKSUM_PARTIAL 1 #define CHECKSUM_UNNECESSARY 2 +#define CHECKSUM_COMPLETE 3 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ ~(SMP_CACHE_BYTES - 1)) @@ -56,17 +57,17 @@ * Apparently with secret goal to sell you new device, when you * will add new protocol to your host. F.e. IPv6. 8) * - * HW: the most generic way. Device supplied checksum of _all_ + * COMPLETE: the most generic way. Device supplied checksum of _all_ * the packet as seen by netif_rx in skb->csum. * NOTE: Even if device supports only some protocols, but - * is able to produce some skb->csum, it MUST use HW, + * is able to produce some skb->csum, it MUST use COMPLETE, * not UNNECESSARY. * * B. Checksumming on output. * * NONE: skb is checksummed by protocol or csum is not required. * - * HW: device is required to csum packet as seen by hard_start_xmit + * PARTIAL: device is required to csum packet as seen by hard_start_xmit * from skb->h.raw to the end and to record the checksum * at skb->h.raw+skb->csum. * @@ -604,6 +605,14 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } +/* + * This function creates a split out lock class for each invocation; + * this is needed for now since a whole lot of users of the skb-queue + * infrastructure in drivers have different locking usage (in hardirq) + * than the networking core (in softirq only). In the long run either the + * network layer or drivers should need annotation to consolidate the + * main types of usage into 3 classes. + */ static inline void skb_queue_head_init(struct sk_buff_head *list) { spin_lock_init(&list->lock); @@ -1031,6 +1040,21 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len) return (len < skb->len) ? __pskb_trim(skb, len) : 0; } +/** + * pskb_trim_unique - remove end from a paged unique (not cloned) buffer + * @skb: buffer to alter + * @len: new length + * + * This is identical to pskb_trim except that the caller knows that + * the skb is not cloned so we should never get an error due to out- + * of-memory. + */ +static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) +{ + int err = pskb_trim(skb, len); + BUG_ON(err); +} + /** * skb_orphan - orphan a buffer * @skb: buffer to orphan @@ -1063,9 +1087,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) kfree_skb(skb); } -#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB /** - * __dev_alloc_skb - allocate an skbuff for sending + * __dev_alloc_skb - allocate an skbuff for receiving * @length: length to allocate * @gfp_mask: get_free_pages mask, passed to alloc_skb * @@ -1074,7 +1097,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * - * %NULL is returned in there is no free memory. + * %NULL is returned if there is no free memory. */ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask) @@ -1084,12 +1107,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, skb_reserve(skb, NET_SKB_PAD); return skb; } -#else -extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask); -#endif /** - * dev_alloc_skb - allocate an skbuff for sending + * dev_alloc_skb - allocate an skbuff for receiving * @length: length to allocate * * Allocate a new &sk_buff and assign it a usage count of one. The @@ -1097,7 +1117,7 @@ extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask); * the headroom they think they need without accounting for the * built in space. The built in space is used for optimisations. * - * %NULL is returned in there is no free memory. Although this function + * %NULL is returned if there is no free memory. Although this function * allocates memory it can be called from an interrupt. */ static inline struct sk_buff *dev_alloc_skb(unsigned int length) @@ -1105,6 +1125,28 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length) return __dev_alloc_skb(length, GFP_ATOMIC); } +extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, + unsigned int length, gfp_t gfp_mask); + +/** + * netdev_alloc_skb - allocate an skbuff for rx on a specific device + * @dev: network device to receive on + * @length: length to allocate + * + * Allocate a new &sk_buff and assign it a usage count of one. The + * buffer has unspecified headroom built in. Users should allocate + * the headroom they think they need without accounting for the + * built in space. The built in space is used for optimisations. + * + * %NULL is returned if there is no free memory. Although this function + * allocates memory it can be called from an interrupt. + */ +static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, + unsigned int length) +{ + return __netdev_alloc_skb(dev, length, GFP_ATOMIC); +} + /** * skb_cow - copy header of skb when it is required * @skb: buffer to cow @@ -1220,14 +1262,14 @@ static inline int skb_linearize_cow(struct sk_buff *skb) * @len: length of data pulled * * After doing a pull on a received packet, you need to call this to - * update the CHECKSUM_HW checksum, or set ip_summed to CHECKSUM_NONE - * so that it can be recomputed from scratch. + * update the CHECKSUM_COMPLETE checksum, or set ip_summed to + * CHECKSUM_NONE so that it can be recomputed from scratch. */ static inline void skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { - if (skb->ip_summed == CHECKSUM_HW) + if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); } @@ -1246,7 +1288,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) { if (likely(len >= skb->len)) return 0; - if (skb->ip_summed == CHECKSUM_HW) + if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; return __pskb_trim(skb, len); } @@ -1452,5 +1494,10 @@ static inline void skb_init_secmark(struct sk_buff *skb) { } #endif +static inline int skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */