{
if (*name == '\0')
return 0;
+ if (strlen(name) >= IFNAMSIZ)
+ return 0;
if (!strcmp(name, ".") || !strcmp(name, ".."))
return 0;
* Invalidate hardware checksum when packet is to be mangled, and
* complete checksum manually on outgoing path.
*/
-int skb_checksum_help(struct sk_buff *skb, int inward)
+int skb_checksum_help(struct sk_buff *skb)
{
unsigned int csum;
int ret = 0, offset = skb->h.raw - skb->data;
- if (inward)
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
goto out_set_summed;
if (unlikely(skb_shinfo(skb)->gso_size)) {
skb->mac_len = skb->nh.raw - skb->data;
__skb_pull(skb, skb->mac_len);
- if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
if (skb_header_cloned(skb) &&
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
return ERR_PTR(err);
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
- if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
err = ptype->gso_send_check(skb);
segs = ERR_PTR(err);
if (err || skb_gso_ok(skb, features))
/* If packet is not checksummed and device does not support
* checksumming for this protocol, complete checksumming here.
*/
- if (skb->ip_summed == CHECKSUM_HW &&
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
(!(dev->features & NETIF_F_GEN_CSUM) &&
(!(dev->features & NETIF_F_IP_CSUM) ||
skb->protocol != htons(ETH_P_IP))))
- if (skb_checksum_help(skb, 0))
+ if (skb_checksum_help(skb))
goto out_kfree_skb;
gso:
struct net_device *dev;
int alloc_size;
+ BUG_ON(strlen(name) >= sizeof(dev->name));
+
/* ensure 32-byte alignment of both the device and private area */
alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
p = kzalloc(alloc_size, GFP_KERNEL);
if (!p) {
- printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
+ printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
return NULL;
}