From: Linus Torvalds Date: Sun, 11 Feb 2007 19:38:13 +0000 (-0800) Subject: Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6 X-Git-Tag: v2.6.21-rc1~91^2~240 X-Git-Url: http://git.rot13.org/?a=commitdiff_plain;h=cb18eccff48ef3986d1072964590bce6fec705fb;hp=-c;p=powerpc.git Merge /pub/scm/linux/kernel/git/davem/net-2.6 * master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (45 commits) [IPV4]: Restore multipath routing after rt_next changes. [XFRM] IPV6: Fix outbound RO transformation which is broken by IPsec tunnel patch. [NET]: Reorder fields of struct dst_entry [DECNET]: Convert decnet route to use the new dst_entry 'next' pointer [IPV6]: Convert ipv6 route to use the new dst_entry 'next' pointer [IPV4]: Convert ipv4 route to use the new dst_entry 'next' pointer [NET]: Introduce union in struct dst_entry to hold 'next' pointer [DECNET]: fix misannotation of linkinfo_dn [DECNET]: FRA_{DST,SRC} are le16 for decnet [UDP]: UDP can use sk_hash to speedup lookups [NET]: Fix whitespace errors. [NET] XFRM: Fix whitespace errors. [NET] X25: Fix whitespace errors. [NET] WANROUTER: Fix whitespace errors. [NET] UNIX: Fix whitespace errors. [NET] TIPC: Fix whitespace errors. [NET] SUNRPC: Fix whitespace errors. [NET] SCTP: Fix whitespace errors. [NET] SCHED: Fix whitespace errors. [NET] RXRPC: Fix whitespace errors. ... --- cb18eccff48ef3986d1072964590bce6fec705fb diff --combined net/core/dst.c index f9eace78d3,0ab9a981fc..61dd9d3951 --- a/net/core/dst.c +++ b/net/core/dst.c @@@ -29,7 -29,7 +29,7 @@@ * 4) All operations modify state, so a spinlock is used. */ static struct dst_entry *dst_garbage_list; - #if RT_CACHE_DEBUG >= 2 + #if RT_CACHE_DEBUG >= 2 static atomic_t dst_total = ATOMIC_INIT(0); #endif static DEFINE_SPINLOCK(dst_lock); @@@ -132,16 -132,17 +132,16 @@@ void * dst_alloc(struct dst_ops * ops if (ops->gc()) return NULL; } - dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); + dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); if (!dst) return NULL; - memset(dst, 0, ops->entry_size); atomic_set(&dst->__refcnt, 0); dst->ops = ops; dst->lastuse = jiffies; dst->path = dst; dst->input = dst_discard_in; dst->output = dst_discard_out; - #if RT_CACHE_DEBUG >= 2 + #if RT_CACHE_DEBUG >= 2 atomic_inc(&dst_total); #endif atomic_inc(&ops->entries); @@@ -202,7 -203,7 +202,7 @@@ again dst->ops->destroy(dst); if (dst->dev) dev_put(dst->dev); - #if RT_CACHE_DEBUG >= 2 + #if RT_CACHE_DEBUG >= 2 atomic_dec(&dst_total); #endif kmem_cache_free(dst->ops->kmem_cachep, dst); diff --combined net/core/neighbour.c index efb673ad18,c5f161e795..c08d696505 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@@ -251,10 -251,12 +251,10 @@@ static struct neighbour *neigh_alloc(st goto out_entries; } - n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC); + n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC); if (!n) goto out_entries; - memset(n, 0, tbl->entry_size); - skb_queue_head_init(&n->arp_queue); rwlock_init(&n->lock); n->updated = n->used = now; @@@ -343,7 -345,7 +343,7 @@@ struct neighbour *neigh_lookup(struct n struct neighbour *n; int key_len = tbl->key_len; u32 hash_val = tbl->hash(pkey, dev); - + NEIGH_CACHE_STAT_INC(tbl, lookups); read_lock_bh(&tbl->lock); @@@ -685,9 -687,9 +685,9 @@@ next_elt np = &n->next; } - /* Cycle through all hash buckets every base_reachable_time/2 ticks. - * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 - * base_reachable_time. + /* Cycle through all hash buckets every base_reachable_time/2 ticks. + * ARP entry timeouts range from 1/2 base_reachable_time to 3/2 + * base_reachable_time. */ expire = tbl->parms.base_reachable_time >> 1; expire /= (tbl->hash_mask + 1); @@@ -742,7 -744,7 +742,7 @@@ static void neigh_timer_handler(unsigne } if (state & NUD_REACHABLE) { - if (time_before_eq(now, + if (time_before_eq(now, neigh->confirmed + neigh->parms->reachable_time)) { NEIGH_PRINTK2("neigh %p is still alive.\n", neigh); next = neigh->confirmed + neigh->parms->reachable_time; @@@ -761,7 -763,7 +761,7 @@@ notify = 1; } } else if (state & NUD_DELAY) { - if (time_before_eq(now, + if (time_before_eq(now, neigh->confirmed + neigh->parms->delay_probe_time)) { NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh); neigh->nud_state = NUD_REACHABLE; @@@ -847,7 -849,7 +847,7 @@@ int __neigh_event_send(struct neighbou goto out_unlock_bh; now = jiffies; - + if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { if (neigh->parms->mcast_probes + neigh->parms->app_probes) { atomic_set(&neigh->probes, neigh->parms->ucast_probes); @@@ -915,13 -917,13 +915,13 @@@ static void neigh_update_hhs(struct nei NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr, if it is different. NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected" - lladdr instead of overriding it + lladdr instead of overriding it if it is different. It also allows to retain current state if lladdr is unchanged. NEIGH_UPDATE_F_ADMIN means that the change is administrative. - NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing + NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing NTF_ROUTER flag. NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as a router. @@@ -944,7 -946,7 +944,7 @@@ int neigh_update(struct neighbour *neig old = neigh->nud_state; err = -EPERM; - if (!(flags & NEIGH_UPDATE_F_ADMIN) && + if (!(flags & NEIGH_UPDATE_F_ADMIN) && (old & (NUD_NOARP | NUD_PERMANENT))) goto out; @@@ -968,7 -970,7 +968,7 @@@ - compare new & old - if they are different, check override flag */ - if ((old & NUD_VALID) && + if ((old & NUD_VALID) && !memcmp(lladdr, neigh->ha, dev->addr_len)) lladdr = neigh->ha; } else { @@@ -1012,8 -1014,8 +1012,8 @@@ neigh_del_timer(neigh); if (new & NUD_IN_TIMER) { neigh_hold(neigh); - neigh_add_timer(neigh, (jiffies + - ((new & NUD_REACHABLE) ? + neigh_add_timer(neigh, (jiffies + + ((new & NUD_REACHABLE) ? neigh->parms->reachable_time : 0))); } @@@ -1075,7 -1077,7 +1075,7 @@@ struct neighbour *neigh_event_ns(struc struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len); if (neigh) - neigh_update(neigh, lladdr, NUD_STALE, + neigh_update(neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_OVERRIDE); return neigh; } @@@ -1127,7 -1129,7 +1127,7 @@@ int neigh_compat_output(struct sk_buff if (dev->hard_header && dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, - skb->len) < 0 && + skb->len) < 0 && dev->rebuild_header(skb)) return 0; @@@ -1347,10 -1349,10 +1347,10 @@@ void neigh_table_init_no_netlink(struc tbl->stats = alloc_percpu(struct neigh_statistics); if (!tbl->stats) panic("cannot create neighbour cache statistics"); - + #ifdef CONFIG_PROC_FS tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat); - if (!tbl->pde) + if (!tbl->pde) panic("cannot create neighbour proc dir entry"); tbl->pde->proc_fops = &neigh_stat_seq_fops; tbl->pde->data = tbl; @@@ -1565,7 -1567,7 +1565,7 @@@ int neigh_add(struct sk_buff *skb, stru err = -ENOENT; goto out_dev_put; } - + neigh = __neigh_lookup_errno(tbl, dst, dev); if (IS_ERR(neigh)) { err = PTR_ERR(neigh); @@@ -1742,12 -1744,12 +1742,12 @@@ errout nlmsg_cancel(skb, nlh); return -EMSGSIZE; } - + static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, int ifindex) { struct neigh_parms *p; - + for (p = &tbl->parms; p; p = p->next) if ((p->dev && p->dev->ifindex == ifindex) || (!p->dev && !ifindex)) @@@ -1813,7 -1815,7 +1813,7 @@@ int neightbl_set(struct sk_buff *skb, s goto errout_locked; } - /* + /* * We acquire tbl->lock to be nice to the periodic timers and * make sure they always see a consistent set of values. */ @@@ -2321,7 -2323,7 +2321,7 @@@ static void *neigh_stat_seq_start(struc if (*pos == 0) return SEQ_START_TOKEN; - + for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { if (!cpu_possible(cpu)) continue; @@@ -2629,7 -2631,7 +2629,7 @@@ static struct neigh_sysctl_table }; int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, - int p_id, int pdev_id, char *p_name, + int p_id, int pdev_id, char *p_name, proc_handler *handler, ctl_handler *strategy) { struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template, @@@ -2661,7 -2663,7 +2661,7 @@@ t->neigh_vars[14].procname = NULL; t->neigh_vars[15].procname = NULL; } else { - dev_name_source = t->neigh_dev[0].procname; + dev_name_source = t->neigh_dev[0].procname; t->neigh_vars[12].data = (int *)(p + 1); t->neigh_vars[13].data = (int *)(p + 1) + 1; t->neigh_vars[14].data = (int *)(p + 1) + 2; @@@ -2696,7 -2698,7 +2696,7 @@@ goto free; } - t->neigh_dev[0].procname = dev_name; + t->neigh_dev[0].procname = dev_name; t->neigh_neigh_dir[0].ctl_name = pdev_id; diff --combined net/decnet/dn_table.c index 720501e1ae,0542015922..780a141f83 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@@ -60,7 -60,7 +60,7 @@@ struct dn_has #define dz_prefix(key,dz) ((key).datum) #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\ - for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) + for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) #define endfor_nexthops(fi) } @@@ -290,82 -290,82 +290,82 @@@ static inline size_t dn_fib_nlmsg_size( } static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, - u32 tb_id, u8 type, u8 scope, void *dst, int dst_len, - struct dn_fib_info *fi, unsigned int flags) + u32 tb_id, u8 type, u8 scope, void *dst, int dst_len, + struct dn_fib_info *fi, unsigned int flags) { - struct rtmsg *rtm; - struct nlmsghdr *nlh; - unsigned char *b = skb->tail; - - nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); - rtm = NLMSG_DATA(nlh); - rtm->rtm_family = AF_DECnet; - rtm->rtm_dst_len = dst_len; - rtm->rtm_src_len = 0; - rtm->rtm_tos = 0; - rtm->rtm_table = tb_id; + struct rtmsg *rtm; + struct nlmsghdr *nlh; + unsigned char *b = skb->tail; + + nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); + rtm = NLMSG_DATA(nlh); + rtm->rtm_family = AF_DECnet; + rtm->rtm_dst_len = dst_len; + rtm->rtm_src_len = 0; + rtm->rtm_tos = 0; + rtm->rtm_table = tb_id; RTA_PUT_U32(skb, RTA_TABLE, tb_id); - rtm->rtm_flags = fi->fib_flags; - rtm->rtm_scope = scope; + rtm->rtm_flags = fi->fib_flags; + rtm->rtm_scope = scope; rtm->rtm_type = type; - if (rtm->rtm_dst_len) - RTA_PUT(skb, RTA_DST, 2, dst); - rtm->rtm_protocol = fi->fib_protocol; - if (fi->fib_priority) - RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); + if (rtm->rtm_dst_len) + RTA_PUT(skb, RTA_DST, 2, dst); + rtm->rtm_protocol = fi->fib_protocol; + if (fi->fib_priority) + RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) goto rtattr_failure; - if (fi->fib_nhs == 1) { - if (fi->fib_nh->nh_gw) - RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw); - if (fi->fib_nh->nh_oif) - RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); - } - if (fi->fib_nhs > 1) { - struct rtnexthop *nhp; - struct rtattr *mp_head; - if (skb_tailroom(skb) <= RTA_SPACE(0)) - goto rtattr_failure; - mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0)); - - for_nexthops(fi) { - if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) - goto rtattr_failure; - nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); - nhp->rtnh_flags = nh->nh_flags & 0xFF; - nhp->rtnh_hops = nh->nh_weight - 1; - nhp->rtnh_ifindex = nh->nh_oif; - if (nh->nh_gw) - RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw); - nhp->rtnh_len = skb->tail - (unsigned char *)nhp; - } endfor_nexthops(fi); - mp_head->rta_type = RTA_MULTIPATH; - mp_head->rta_len = skb->tail - (u8*)mp_head; - } - - nlh->nlmsg_len = skb->tail - b; - return skb->len; + if (fi->fib_nhs == 1) { + if (fi->fib_nh->nh_gw) + RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw); + if (fi->fib_nh->nh_oif) + RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); + } + if (fi->fib_nhs > 1) { + struct rtnexthop *nhp; + struct rtattr *mp_head; + if (skb_tailroom(skb) <= RTA_SPACE(0)) + goto rtattr_failure; + mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0)); + + for_nexthops(fi) { + if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) + goto rtattr_failure; + nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); + nhp->rtnh_flags = nh->nh_flags & 0xFF; + nhp->rtnh_hops = nh->nh_weight - 1; + nhp->rtnh_ifindex = nh->nh_oif; + if (nh->nh_gw) + RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw); + nhp->rtnh_len = skb->tail - (unsigned char *)nhp; + } endfor_nexthops(fi); + mp_head->rta_type = RTA_MULTIPATH; + mp_head->rta_len = skb->tail - (u8*)mp_head; + } + + nlh->nlmsg_len = skb->tail - b; + return skb->len; nlmsg_failure: rtattr_failure: - skb_trim(skb, b - skb->data); - return -EMSGSIZE; + skb_trim(skb, b - skb->data); + return -EMSGSIZE; } static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id, - struct nlmsghdr *nlh, struct netlink_skb_parms *req) + struct nlmsghdr *nlh, struct netlink_skb_parms *req) { - struct sk_buff *skb; - u32 pid = req ? req->pid : 0; + struct sk_buff *skb; + u32 pid = req ? req->pid : 0; int err = -ENOBUFS; - skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL); - if (skb == NULL) + skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL); + if (skb == NULL) goto errout; - err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, + err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, f->fn_type, f->fn_scope, &f->fn_key, z, DN_FIB_INFO(f), 0); if (err < 0) { @@@ -380,7 -380,7 +380,7 @@@ errout rtnl_set_sk_err(RTNLGRP_DECnet_ROUTE, err); } - static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb, + static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb, struct dn_fib_table *tb, struct dn_zone *dz, @@@ -394,12 -394,12 +394,12 @@@ continue; if (f->fn_state & DN_S_ZOMBIE) continue; - if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid, + if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWROUTE, - tb->n, + tb->n, (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type, - f->fn_scope, &f->fn_key, dz->dz_order, + f->fn_scope, &f->fn_key, dz->dz_order, f->fn_info, NLM_F_MULTI) < 0) { cb->args[4] = i; return -1; @@@ -409,7 -409,7 +409,7 @@@ return skb->len; } - static __inline__ int dn_hash_dump_zone(struct sk_buff *skb, + static __inline__ int dn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb, struct dn_fib_table *tb, struct dn_zone *dz) @@@ -433,10 -433,10 +433,10 @@@ return skb->len; } - static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb, - struct netlink_callback *cb) + static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb, + struct netlink_callback *cb) { - int m, s_m; + int m, s_m; struct dn_zone *dz; struct dn_hash *table = (struct dn_hash *)tb->data; @@@ -457,7 -457,7 +457,7 @@@ read_unlock(&dn_fib_tables_lock); cb->args[2] = m; - return skb->len; + return skb->len; } int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb) @@@ -482,7 -482,7 +482,7 @@@ goto next; if (dumped) memset(&cb->args[2], 0, sizeof(cb->args) - - 2 * sizeof(cb->args[0])); + 2 * sizeof(cb->args[0])); if (tb->dump(tb, skb, cb) < 0) goto out; dumped = 1; @@@ -503,13 -503,13 +503,13 @@@ static int dn_fib_table_insert(struct d struct dn_fib_node *new_f, *f, **fp, **del_fp; struct dn_zone *dz; struct dn_fib_info *fi; - int z = r->rtm_dst_len; + int z = r->rtm_dst_len; int type = r->rtm_type; dn_fib_key_t key; - int err; + int err; - if (z > 16) - return -EINVAL; + if (z > 16) + return -EINVAL; dz = table->dh_zones[z]; if (!dz && !(dz = dn_new_zone(table, z))) @@@ -524,8 -524,8 +524,8 @@@ key = dz_key(dst, dz); } - if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL) - return err; + if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL) + return err; if (dz->dz_nent > (dz->dz_divisor << 2) && dz->dz_divisor > DN_MAX_DIVISOR && @@@ -593,10 -593,12 +593,10 @@@ create replace: err = -ENOBUFS; - new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL); + new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL); if (new_f == NULL) goto out; - memset(new_f, 0, sizeof(struct dn_fib_node)); - new_f->fn_key = key; new_f->fn_type = type; new_f->fn_scope = r->rtm_scope; @@@ -624,9 -626,9 +624,9 @@@ dn_rt_cache_flush(-1); } - dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req); + dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req); - return 0; + return 0; out: dn_fib_release_info(fi); return err; @@@ -637,14 -639,14 +637,14 @@@ static int dn_fib_table_delete(struct d { struct dn_hash *table = (struct dn_hash*)tb->data; struct dn_fib_node **fp, **del_fp, *f; - int z = r->rtm_dst_len; + int z = r->rtm_dst_len; struct dn_zone *dz; dn_fib_key_t key; int matched; - if (z > 16) - return -EINVAL; + if (z > 16) + return -EINVAL; if ((dz = table->dh_zones[z]) == NULL) return -ESRCH; @@@ -680,7 -682,7 +680,7 @@@ if (del_fp == NULL && (!r->rtm_type || f->fn_type == r->rtm_type) && (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) && - (!r->rtm_protocol || + (!r->rtm_protocol || fi->fib_protocol == r->rtm_protocol) && dn_fib_nh_match(r, n, rta, fi) == 0) del_fp = fp; @@@ -688,7 -690,7 +688,7 @@@ if (del_fp) { f = *del_fp; - dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); + dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); if (matched != 1) { write_lock_bh(&dn_fib_tables_lock); @@@ -712,7 -714,7 +712,7 @@@ return 0; } - return -ESRCH; + return -ESRCH; } static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table) @@@ -759,7 -761,7 +759,7 @@@ static int dn_fib_table_flush(struct dn static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp, struct dn_fib_res *res) { - int err; + int err; struct dn_zone *dz; struct dn_hash *t = (struct dn_hash *)tb->data; @@@ -788,7 -790,7 +788,7 @@@ if (err == 0) { res->type = f->fn_type; - res->scope = f->fn_scope; + res->scope = f->fn_scope; res->prefixlen = dz->dz_order; goto out; } @@@ -799,21 -801,21 +799,21 @@@ err = 1; out: read_unlock(&dn_fib_tables_lock); - return err; + return err; } struct dn_fib_table *dn_fib_get_table(u32 n, int create) { - struct dn_fib_table *t; + struct dn_fib_table *t; struct hlist_node *node; unsigned int h; - if (n < RT_TABLE_MIN) - return NULL; + if (n < RT_TABLE_MIN) + return NULL; - if (n > RT_TABLE_MAX) - return NULL; + if (n > RT_TABLE_MAX) + return NULL; h = n & (DN_FIB_TABLE_HASHSZ - 1); rcu_read_lock(); @@@ -825,54 -827,54 +825,54 @@@ } rcu_read_unlock(); - if (!create) - return NULL; + if (!create) + return NULL; - if (in_interrupt() && net_ratelimit()) { - printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); - return NULL; - } + if (in_interrupt() && net_ratelimit()) { + printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); + return NULL; + } - t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash), + t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash), GFP_KERNEL); - if (t == NULL) - return NULL; - - t->n = n; - t->insert = dn_fib_table_insert; - t->delete = dn_fib_table_delete; - t->lookup = dn_fib_table_lookup; - t->flush = dn_fib_table_flush; - t->dump = dn_fib_table_dump; + if (t == NULL) + return NULL; + + t->n = n; + t->insert = dn_fib_table_insert; + t->delete = dn_fib_table_delete; + t->lookup = dn_fib_table_lookup; + t->flush = dn_fib_table_flush; + t->dump = dn_fib_table_dump; hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]); - return t; + return t; } struct dn_fib_table *dn_fib_empty_table(void) { - u32 id; + u32 id; - for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++) + for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++) if (dn_fib_get_table(id, 0) == NULL) - return dn_fib_get_table(id, 1); - return NULL; + return dn_fib_get_table(id, 1); + return NULL; } void dn_fib_flush(void) { - int flushed = 0; - struct dn_fib_table *tb; + int flushed = 0; + struct dn_fib_table *tb; struct hlist_node *node; unsigned int h; for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) - flushed += tb->flush(tb); - } + flushed += tb->flush(tb); + } - if (flushed) - dn_rt_cache_flush(-1); + if (flushed) + dn_rt_cache_flush(-1); } void __init dn_fib_table_init(void) @@@ -892,7 -894,7 +892,7 @@@ void __exit dn_fib_table_cleanup(void write_lock(&dn_fib_tables_lock); for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h], - hlist) { + hlist) { hlist_del(&t->hlist); kfree(t); } diff --combined net/ipv4/ipmr.c index d7e1e60f51,a099000cd1..604f5b5851 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@@ -241,7 -241,7 +241,7 @@@ failure /* * Delete a VIF entry */ - + static int vif_delete(int vifi) { struct vif_device *v; @@@ -409,7 -409,7 +409,7 @@@ static int vif_add(struct vifctl *vifc return -ENOBUFS; break; #endif - case VIFF_TUNNEL: + case VIFF_TUNNEL: dev = ipmr_new_tunnel(vifc); if (!dev) return -ENOBUFS; @@@ -479,18 -479,20 +479,18 @@@ static struct mfc_cache *ipmr_cache_fin */ static struct mfc_cache *ipmr_cache_alloc(void) { - struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_KERNEL); + struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); if(c==NULL) return NULL; - memset(c, 0, sizeof(*c)); c->mfc_un.res.minvif = MAXVIFS; return c; } static struct mfc_cache *ipmr_cache_alloc_unres(void) { - struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_ATOMIC); + struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); if(c==NULL) return NULL; - memset(c, 0, sizeof(*c)); skb_queue_head_init(&c->mfc_un.unres.unresolved); c->mfc_un.unres.expires = jiffies + 10*HZ; return c; @@@ -499,7 -501,7 +499,7 @@@ /* * A cache entry has gone into a resolved state from queued */ - + static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) { struct sk_buff *skb; @@@ -536,7 -538,7 +536,7 @@@ * * Called under mrt_lock. */ - + static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) { struct sk_buff *skb; @@@ -567,13 -569,13 +567,13 @@@ memcpy(msg, pkt->nh.raw, sizeof(struct iphdr)); msg->im_msgtype = IGMPMSG_WHOLEPKT; msg->im_mbz = 0; - msg->im_vif = reg_vif_num; + msg->im_vif = reg_vif_num; skb->nh.iph->ihl = sizeof(struct iphdr) >> 2; skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr)); - } else + } else #endif - { - + { + /* * Copy the IP header */ @@@ -595,7 -597,7 +595,7 @@@ igmp->code = 0; skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */ skb->h.raw = skb->nh.raw; - } + } if (mroute_socket == NULL) { kfree_skb(skb); @@@ -617,7 -619,7 +617,7 @@@ /* * Queue a packet for resolution. It gets locked cache entry! */ - + static int ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) { @@@ -655,7 -657,7 +655,7 @@@ * Reflect first query at mrouted. */ if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) { - /* If the report failed throw the cache entry + /* If the report failed throw the cache entry out - Brad Parker */ spin_unlock_bh(&mfc_unres_lock); @@@ -781,11 -783,11 +781,11 @@@ static int ipmr_mfc_add(struct mfcctl * /* * Close the multicast socket, and clear the vif tables etc */ - + static void mroute_clean_tables(struct sock *sk) { int i; - + /* * Shut down all active vif entries */ @@@ -852,13 -854,13 +852,13 @@@ static void mrtsock_destruct(struct soc * that's how BSD mrouted happens to think. Maybe one day with a proper * MOSPF/PIM router set up we can clean this up. */ - + int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen) { int ret; struct vifctl vif; struct mfcctl mfc; - + if(optname!=MRT_INIT) { if(sk!=mroute_socket && !capable(CAP_NET_ADMIN)) @@@ -899,7 -901,7 +899,7 @@@ if(optlen!=sizeof(vif)) return -EINVAL; if (copy_from_user(&vif,optval,sizeof(vif))) - return -EFAULT; + return -EFAULT; if(vif.vifc_vifi >= MAXVIFS) return -ENFILE; rtnl_lock(); @@@ -978,13 -980,13 +978,13 @@@ /* * Getsock opt support for the multicast routing system. */ - + int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen) { int olr; int val; - if(optname!=MRT_VERSION && + if(optname!=MRT_VERSION && #ifdef CONFIG_IP_PIMSM optname!=MRT_PIM && #endif @@@ -997,7 -999,7 +997,7 @@@ olr = min_t(unsigned int, olr, sizeof(int)); if (olr < 0) return -EINVAL; - + if(put_user(olr,optlen)) return -EFAULT; if(optname==MRT_VERSION) @@@ -1016,19 -1018,19 +1016,19 @@@ /* * The IP multicast ioctl support routines. */ - + int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) { struct sioc_sg_req sr; struct sioc_vif_req vr; struct vif_device *vif; struct mfc_cache *c; - + switch(cmd) { case SIOCGETVIFCNT: if (copy_from_user(&vr,arg,sizeof(vr))) - return -EFAULT; + return -EFAULT; if(vr.vifi>=maxvif) return -EINVAL; read_lock(&mrt_lock); @@@ -1094,7 -1096,7 +1094,7 @@@ static struct notifier_block ip_mr_noti * This avoids tunnel drivers and other mess and gives us the speed so * important for multicast video. */ - + static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr)); @@@ -1192,7 -1194,7 +1192,7 @@@ static void ipmr_queue_xmit(struct sk_b encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len; if (skb_cow(skb, encap)) { - ip_rt_put(rt); + ip_rt_put(rt); goto out_free; } @@@ -1226,7 -1228,7 +1226,7 @@@ * not mrouter) cannot join to more than one interface - it will * result in receiving multiple packets. */ - NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev, + NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev, ipmr_forward_finish); return; @@@ -1287,7 -1289,7 +1287,7 @@@ static int ip_mr_forward(struct sk_buf large chunk of pimd to kernel. Ough... --ANK */ (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && - time_after(jiffies, + time_after(jiffies, cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { cache->mfc_un.res.last_assert = jiffies; ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF); @@@ -1424,14 -1426,14 +1424,14 @@@ int pim_rcv_v1(struct sk_buff * skb struct iphdr *encap; struct net_device *reg_dev = NULL; - if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) + if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; pim = (struct igmphdr*)skb->h.raw; - if (!mroute_do_pim || + if (!mroute_do_pim || skb->len < sizeof(*pim) + sizeof(*encap) || - pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) + pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr)); @@@ -1443,7 -1445,7 +1443,7 @@@ */ if (!MULTICAST(encap->daddr) || encap->tot_len == 0 || - ntohs(encap->tot_len) + sizeof(*pim) > skb->len) + ntohs(encap->tot_len) + sizeof(*pim) > skb->len) goto drop; read_lock(&mrt_lock); @@@ -1453,7 -1455,7 +1453,7 @@@ dev_hold(reg_dev); read_unlock(&mrt_lock); - if (reg_dev == NULL) + if (reg_dev == NULL) goto drop; skb->mac.raw = skb->nh.raw; @@@ -1484,13 -1486,13 +1484,13 @@@ static int pim_rcv(struct sk_buff * skb struct iphdr *encap; struct net_device *reg_dev = NULL; - if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) + if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; pim = (struct pimreghdr*)skb->h.raw; - if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || + if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || (pim->flags&PIM_NULL_REGISTER) || - (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && + (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; @@@ -1498,7 -1500,7 +1498,7 @@@ encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr)); if (!MULTICAST(encap->daddr) || encap->tot_len == 0 || - ntohs(encap->tot_len) + sizeof(*pim) > skb->len) + ntohs(encap->tot_len) + sizeof(*pim) > skb->len) goto drop; read_lock(&mrt_lock); @@@ -1508,7 -1510,7 +1508,7 @@@ dev_hold(reg_dev); read_unlock(&mrt_lock); - if (reg_dev == NULL) + if (reg_dev == NULL) goto drop; skb->mac.raw = skb->nh.raw; @@@ -1614,7 -1616,7 +1614,7 @@@ int ipmr_get_route(struct sk_buff *skb return err; } - #ifdef CONFIG_PROC_FS + #ifdef CONFIG_PROC_FS /* * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif */ @@@ -1628,7 -1630,7 +1628,7 @@@ static struct vif_device *ipmr_vif_seq_ for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) { if(!VIF_EXISTS(iter->ct)) continue; - if (pos-- == 0) + if (pos-- == 0) return &vif_table[iter->ct]; } return NULL; @@@ -1637,7 -1639,7 +1637,7 @@@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) { read_lock(&mrt_lock); - return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1) + return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1) : SEQ_START_TOKEN; } @@@ -1648,7 -1650,7 +1648,7 @@@ static void *ipmr_vif_seq_next(struct s ++*pos; if (v == SEQ_START_TOKEN) return ipmr_vif_seq_idx(iter, 0); - + while (++iter->ct < maxvif) { if(!VIF_EXISTS(iter->ct)) continue; @@@ -1665,7 -1667,7 +1665,7 @@@ static void ipmr_vif_seq_stop(struct se static int ipmr_vif_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { - seq_puts(seq, + seq_puts(seq, "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); } else { const struct vif_device *vif = v; @@@ -1674,7 -1676,7 +1674,7 @@@ seq_printf(seq, "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", vif - vif_table, - name, vif->bytes_in, vif->pkt_in, + name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out, vif->flags, vif->local, vif->remote); } @@@ -1693,7 -1695,7 +1693,7 @@@ static int ipmr_vif_open(struct inode * struct seq_file *seq; int rc = -ENOMEM; struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); - + if (!s) goto out; @@@ -1732,15 -1734,15 +1732,15 @@@ static struct mfc_cache *ipmr_mfc_seq_i it->cache = mfc_cache_array; read_lock(&mrt_lock); - for (it->ct = 0; it->ct < MFC_LINES; it->ct++) - for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next) - if (pos-- == 0) + for (it->ct = 0; it->ct < MFC_LINES; it->ct++) + for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next) + if (pos-- == 0) return mfc; read_unlock(&mrt_lock); it->cache = &mfc_unres_queue; spin_lock_bh(&mfc_unres_lock); - for(mfc = mfc_unres_queue; mfc; mfc = mfc->next) + for(mfc = mfc_unres_queue; mfc; mfc = mfc->next) if (pos-- == 0) return mfc; spin_unlock_bh(&mfc_unres_lock); @@@ -1755,7 -1757,7 +1755,7 @@@ static void *ipmr_mfc_seq_start(struct struct ipmr_mfc_iter *it = seq->private; it->cache = NULL; it->ct = 0; - return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1) + return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1) : SEQ_START_TOKEN; } @@@ -1771,8 -1773,8 +1771,8 @@@ static void *ipmr_mfc_seq_next(struct s if (mfc->next) return mfc->next; - - if (it->cache == &mfc_unres_queue) + + if (it->cache == &mfc_unres_queue) goto end_of_list; BUG_ON(it->cache != mfc_cache_array); @@@ -1787,10 -1789,10 +1787,10 @@@ read_unlock(&mrt_lock); it->cache = &mfc_unres_queue; it->ct = 0; - + spin_lock_bh(&mfc_unres_lock); mfc = mfc_unres_queue; - if (mfc) + if (mfc) return mfc; end_of_list: @@@ -1815,12 -1817,12 +1815,12 @@@ static int ipmr_mfc_seq_show(struct seq int n; if (v == SEQ_START_TOKEN) { - seq_puts(seq, + seq_puts(seq, "Group Origin Iif Pkts Bytes Wrong Oifs\n"); } else { const struct mfc_cache *mfc = v; const struct ipmr_mfc_iter *it = seq->private; - + seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld", (unsigned long) mfc->mfc_mcastgrp, (unsigned long) mfc->mfc_origin, @@@ -1830,12 -1832,12 +1830,12 @@@ mfc->mfc_un.res.wrong_if); if (it->cache != &mfc_unres_queue) { - for(n = mfc->mfc_un.res.minvif; + for(n = mfc->mfc_un.res.minvif; n < mfc->mfc_un.res.maxvif; n++ ) { - if(VIF_EXISTS(n) + if(VIF_EXISTS(n) && mfc->mfc_un.res.ttls[n] < 255) - seq_printf(seq, - " %2d:%-3d", + seq_printf(seq, + " %2d:%-3d", n, mfc->mfc_un.res.ttls[n]); } } @@@ -1856,7 -1858,7 +1856,7 @@@ static int ipmr_mfc_open(struct inode * struct seq_file *seq; int rc = -ENOMEM; struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); - + if (!s) goto out; @@@ -1881,7 -1883,7 +1881,7 @@@ static struct file_operations ipmr_mfc_ .llseek = seq_lseek, .release = seq_release_private, }; - #endif + #endif #ifdef CONFIG_IP_PIMSM_V2 static struct net_protocol pim_protocol = { @@@ -1893,7 -1895,7 +1893,7 @@@ /* * Setup for IP multicast routing */ - + void __init ip_mr_init(void) { mrt_cachep = kmem_cache_create("ip_mrt_cache", @@@ -1903,8 -1905,8 +1903,8 @@@ init_timer(&ipmr_expire_timer); ipmr_expire_timer.function=ipmr_expire_process; register_netdevice_notifier(&ip_mr_notifier); - #ifdef CONFIG_PROC_FS + #ifdef CONFIG_PROC_FS proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops); proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops); - #endif + #endif } diff --combined net/ipv4/ipvs/ip_vs_conn.c index 3aec4ac66e,6feeb1f1c9..0b5e03476c --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c @@@ -494,8 -494,8 +494,8 @@@ int ip_vs_check_template(struct ip_vs_c * Checking the dest server status. */ if ((dest == NULL) || - !(dest->flags & IP_VS_DEST_F_AVAILABLE) || - (sysctl_ip_vs_expire_quiescent_template && + !(dest->flags & IP_VS_DEST_F_AVAILABLE) || + (sysctl_ip_vs_expire_quiescent_template && (atomic_read(&dest->weight) == 0))) { IP_VS_DBG(9, "check_template: dest not available for " "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " @@@ -603,12 -603,13 +603,12 @@@ ip_vs_conn_new(int proto, __be32 caddr struct ip_vs_conn *cp; struct ip_vs_protocol *pp = ip_vs_proto_get(proto); - cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC); + cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC); if (cp == NULL) { IP_VS_ERR_RL("ip_vs_conn_new: no memory available.\n"); return NULL; } - memset(cp, 0, sizeof(*cp)); INIT_LIST_HEAD(&cp->c_list); init_timer(&cp->timer); cp->timer.data = (unsigned long)cp; @@@ -666,7 -667,7 +666,7 @@@ static void *ip_vs_conn_array(struct se { int idx; struct ip_vs_conn *cp; - + for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { ct_read_lock_bh(idx); list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { @@@ -694,7 -695,7 +694,7 @@@ static void *ip_vs_conn_seq_next(struc int idx; ++*pos; - if (v == SEQ_START_TOKEN) + if (v == SEQ_START_TOKEN) return ip_vs_conn_array(seq, 0); /* more on same hash chain? */ @@@ -709,7 -710,7 +709,7 @@@ list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { seq->private = &ip_vs_conn_tab[idx]; return cp; - } + } ct_read_unlock_bh(idx); } seq->private = NULL; diff --combined net/ipv4/netfilter/ip_conntrack_core.c index 62be2eb376,2e6e42199f..04e466d53c --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@@ -2,7 -2,7 +2,7 @@@ but required by, the NAT layer; it can also be used by an iptables extension. */ - /* (C) 1999-2001 Paul `Rusty' Russell + /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team * * This program is free software; you can redistribute it and/or modify @@@ -99,7 -99,7 +99,7 @@@ __ip_ct_deliver_cached_events(struct ip void ip_ct_deliver_cached_events(const struct ip_conntrack *ct) { struct ip_conntrack_ecache *ecache; - + local_bh_disable(); ecache = &__get_cpu_var(ip_conntrack_ecache); if (ecache->ct == ct) @@@ -147,9 -147,9 +147,9 @@@ static u_int32_t __hash_conntrack(cons unsigned int size, unsigned int rnd) { return (jhash_3words((__force u32)tuple->src.ip, - ((__force u32)tuple->dst.ip ^ tuple->dst.protonum), - (tuple->src.u.all | (tuple->dst.u.all << 16)), - rnd) % size); + ((__force u32)tuple->dst.ip ^ tuple->dst.protonum), + (tuple->src.u.all | (tuple->dst.u.all << 16)), + rnd) % size); } static u_int32_t @@@ -219,7 -219,7 +219,7 @@@ struct ip_conntrack_expect __ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple) { struct ip_conntrack_expect *i; - + list_for_each_entry(i, &ip_conntrack_expect_list, list) { if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) return i; @@@ -232,7 -232,7 +232,7 @@@ struct ip_conntrack_expect ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple) { struct ip_conntrack_expect *i; - + read_lock_bh(&ip_conntrack_lock); i = __ip_conntrack_expect_find(tuple); if (i) @@@ -398,7 -398,7 +398,7 @@@ ip_conntrack_find_get(const struct ip_c static void __ip_conntrack_hash_insert(struct ip_conntrack *ct, unsigned int hash, - unsigned int repl_hash) + unsigned int repl_hash) { ct->id = ++ip_conntrack_next_id; list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list, @@@ -446,15 -446,15 +446,15 @@@ __ip_conntrack_confirm(struct sk_buff * /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ /* No external references means noone else could have - confirmed us. */ + confirmed us. */ IP_NF_ASSERT(!is_confirmed(ct)); DEBUGP("Confirming conntrack %p\n", ct); write_lock_bh(&ip_conntrack_lock); /* See if there's one in the list already, including reverse: - NAT could have grabbed it without realizing, since we're - not in the hash. If there is, we lost race. */ + NAT could have grabbed it without realizing, since we're + not in the hash. If there is, we lost race. */ list_for_each_entry(h, &ip_conntrack_hash[hash], list) if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, &h->tuple)) @@@ -602,7 -602,7 +602,7 @@@ ip_conntrack_proto_find_get(u_int8_t pr p = &ip_conntrack_generic_protocol; } preempt_enable(); - + return p; } @@@ -638,13 -638,14 +638,13 @@@ struct ip_conntrack *ip_conntrack_alloc } } - conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC); + conntrack = kmem_cache_zalloc(ip_conntrack_cachep, GFP_ATOMIC); if (!conntrack) { DEBUGP("Can't allocate conntrack.\n"); atomic_dec(&ip_conntrack_count); return ERR_PTR(-ENOMEM); } - memset(conntrack, 0, sizeof(*conntrack)); atomic_set(&conntrack->ct_general.use, 1); conntrack->ct_general.destroy = destroy_conntrack; conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; @@@ -745,7 -746,7 +745,7 @@@ resolve_normal_ct(struct sk_buff *skb IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0); - if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4, + if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4, &tuple,proto)) return NULL; @@@ -770,7 -771,7 +770,7 @@@ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { DEBUGP("ip_conntrack_in: normal packet for %p\n", ct); - *ctinfo = IP_CT_ESTABLISHED; + *ctinfo = IP_CT_ESTABLISHED; } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { DEBUGP("ip_conntrack_in: related packet for %p\n", ct); @@@ -821,7 -822,7 +821,7 @@@ unsigned int ip_conntrack_in(unsigned i if ((*pskb)->pkt_type == PACKET_BROADCAST) { printk("Broadcast packet!\n"); return NF_ACCEPT; - } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) + } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) == htonl(0x000000FF)) { printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n", NIPQUAD((*pskb)->nh.iph->saddr), @@@ -835,7 -836,7 +835,7 @@@ /* It may be an special packet, error, unclean... * inverse of the return code tells to the netfilter * core what to do with the packet. */ - if (proto->error != NULL + if (proto->error != NULL && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) { CONNTRACK_STAT_INC(error); CONNTRACK_STAT_INC(invalid); @@@ -875,7 -876,7 +875,7 @@@ int invert_tuplepr(struct ip_conntrack_tuple *inverse, const struct ip_conntrack_tuple *orig) { - return ip_ct_invert_tuple(inverse, orig, + return ip_ct_invert_tuple(inverse, orig, __ip_conntrack_proto_find(orig->dst.protonum)); } @@@ -884,7 -885,7 +884,7 @@@ static inline int expect_clash(const st const struct ip_conntrack_expect *b) { /* Part covered by intersection of masks must be unequal, - otherwise they clash */ + otherwise they clash */ struct ip_conntrack_tuple intersect_mask = { { a->mask.src.ip & b->mask.src.ip, { a->mask.src.u.all & b->mask.src.u.all } }, @@@ -922,7 -923,7 +922,7 @@@ void ip_conntrack_unexpect_related(stru } /* We don't increase the master conntrack refcount for non-fulfilled - * conntracks. During the conntrack destruction, the expectations are + * conntracks. During the conntrack destruction, the expectations are * always killed before the conntrack itself */ struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me) { @@@ -1011,7 -1012,7 +1011,7 @@@ int ip_conntrack_expect_related(struct } /* Will be over limit? */ - if (expect->master->helper->max_expected && + if (expect->master->helper->max_expected && expect->master->expecting >= expect->master->helper->max_expected) evict_oldest_expect(expect->master); @@@ -1020,7 -1021,7 +1020,7 @@@ ret = 0; out: write_unlock_bh(&ip_conntrack_lock); - return ret; + return ret; } /* Alter reply tuple (maybe alter helper). This is for NAT, and is @@@ -1068,7 -1069,7 +1068,7 @@@ static inline void unhelp(struct ip_con const struct ip_conntrack_helper *me) { if (tuplehash_to_ctrack(i)->helper == me) { - ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i)); + ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i)); tuplehash_to_ctrack(i)->helper = NULL; } } @@@ -1104,8 -1105,8 +1104,8 @@@ void ip_conntrack_helper_unregister(str } /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ - void __ip_ct_refresh_acct(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, + void __ip_ct_refresh_acct(struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, const struct sk_buff *skb, unsigned long extra_jiffies, int do_acct) @@@ -1139,7 -1140,7 +1139,7 @@@ #ifdef CONFIG_IP_NF_CT_ACCT if (do_acct) { ct->counters[CTINFO2DIR(ctinfo)].packets++; - ct->counters[CTINFO2DIR(ctinfo)].bytes += + ct->counters[CTINFO2DIR(ctinfo)].bytes += ntohs(skb->nh.iph->tot_len); if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) @@@ -1193,7 -1194,7 +1193,7 @@@ ip_ct_gather_frags(struct sk_buff *skb { skb_orphan(skb); - local_bh_disable(); + local_bh_disable(); skb = ip_defrag(skb, user); local_bh_enable(); @@@ -1210,7 -1211,7 +1210,7 @@@ static void ip_conntrack_attach(struct /* This ICMP is in reverse direction to the packet which caused it */ ct = ip_conntrack_get(skb, &ctinfo); - + if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; else @@@ -1278,7 -1279,7 +1278,7 @@@ getorigdst(struct sock *sk, int optval struct inet_sock *inet = inet_sk(sk); struct ip_conntrack_tuple_hash *h; struct ip_conntrack_tuple tuple; - + IP_CT_TUPLE_U_BLANK(&tuple); tuple.src.ip = inet->rcv_saddr; tuple.src.u.tcp.port = inet->sport; @@@ -1346,7 -1347,7 +1346,7 @@@ static void free_conntrack_hash(struct if (vmalloced) vfree(hash); else - free_pages((unsigned long)hash, + free_pages((unsigned long)hash, get_order(sizeof(struct list_head) * size)); } @@@ -1357,8 -1358,8 +1357,8 @@@ void ip_conntrack_cleanup(void ip_ct_attach = NULL; /* This makes sure all current packets have passed through - netfilter framework. Roll on, two-stage module - delete... */ + netfilter framework. Roll on, two-stage module + delete... */ synchronize_net(); ip_ct_event_cache_flush(); @@@ -1384,11 -1385,11 +1384,11 @@@ static struct list_head *alloc_hashtabl struct list_head *hash; unsigned int i; - *vmalloced = 0; - hash = (void*)__get_free_pages(GFP_KERNEL, + *vmalloced = 0; + hash = (void*)__get_free_pages(GFP_KERNEL, get_order(sizeof(struct list_head) * size)); - if (!hash) { + if (!hash) { *vmalloced = 1; printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n"); hash = vmalloc(sizeof(struct list_head) * size); @@@ -1421,7 -1422,7 +1421,7 @@@ static int set_hashsize(const char *val if (!hash) return -ENOMEM; - /* We have to rehash for the new table anyway, so we also can + /* We have to rehash for the new table anyway, so we also can * use a new random seed */ get_random_bytes(&rnd, 4); @@@ -1459,7 -1460,7 +1459,7 @@@ int __init ip_conntrack_init(void /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ - if (!ip_conntrack_htable_size) { + if (!ip_conntrack_htable_size) { ip_conntrack_htable_size = (((num_physpages << PAGE_SHIFT) / 16384) / sizeof(struct list_head)); @@@ -1489,8 -1490,8 +1489,8 @@@ } ip_conntrack_cachep = kmem_cache_create("ip_conntrack", - sizeof(struct ip_conntrack), 0, - 0, NULL, NULL); + sizeof(struct ip_conntrack), 0, + 0, NULL, NULL); if (!ip_conntrack_cachep) { printk(KERN_ERR "Unable to create ip_conntrack slab cache\n"); goto err_free_hash; diff --combined net/ipv6/ip6_fib.c index 827f8842b5,8c9024890b..f4d7be77eb --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@@ -1,9 -1,9 +1,9 @@@ /* - * Linux INET6 implementation + * Linux INET6 implementation * Forwarding Information Database * * Authors: - * Pedro Roque + * Pedro Roque * * $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $ * @@@ -97,7 -97,7 +97,7 @@@ static DEFINE_TIMER(ip6_fib_timer, fib6 static struct fib6_walker_t fib6_walker_list = { .prev = &fib6_walker_list, - .next = &fib6_walker_list, + .next = &fib6_walker_list, }; #define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next) @@@ -131,7 -131,7 +131,7 @@@ static __inline__ u32 fib6_new_sernum(v /* * Auxiliary address test functions for the radix tree. * - * These assume a 32bit processor (although it will work on + * These assume a 32bit processor (although it will work on * 64bit processors) */ @@@ -150,7 -150,8 +150,7 @@@ static __inline__ struct fib6_node * no { struct fib6_node *fn; - if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL) - memset(fn, 0, sizeof(struct fib6_node)); + fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC); return fn; } @@@ -297,7 -298,7 +297,7 @@@ static int fib6_dump_node(struct fib6_w int res; struct rt6_info *rt; - for (rt = w->leaf; rt; rt = rt->u.next) { + for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { res = rt6_dump_route(rt, w->args); if (res < 0) { /* Frame is full, suspend walking */ @@@ -433,7 -434,7 +433,7 @@@ static struct fib6_node * fib6_add_1(st struct fib6_node *pn = NULL; struct rt6key *key; int bit; - __be32 dir = 0; + __be32 dir = 0; __u32 sernum = fib6_new_sernum(); RT6_TRACE("fib6_add_1\n"); @@@ -451,27 -452,27 +451,27 @@@ if (plen < fn->fn_bit || !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) goto insert_above; - + /* * Exact match ? */ - + if (plen == fn->fn_bit) { /* clean up an intermediate node */ if ((fn->fn_flags & RTN_RTINFO) == 0) { rt6_release(fn->leaf); fn->leaf = NULL; } - + fn->fn_sernum = sernum; - + return fn; } /* * We have more bits to go */ - + /* Try to walk down on tree. */ fn->fn_sernum = sernum; dir = addr_bit_set(addr, fn->fn_bit); @@@ -489,7 -490,7 +489,7 @@@ if (ln == NULL) return NULL; ln->fn_bit = plen; - + ln->parent = pn; ln->fn_sernum = sernum; @@@ -503,7 -504,7 +503,7 @@@ insert_above: /* - * split since we don't have a common prefix anymore or + * split since we don't have a common prefix anymore or * we have a less significant route. * we've to insert an intermediate node on the list * this new node will point to the one we need to create @@@ -517,18 -518,18 +517,18 @@@ See comment in __ipv6_addr_diff: bit may be an invalid value, but if it is >= plen, the value is ignored in any case. */ - + bit = __ipv6_addr_diff(addr, &key->addr, addrlen); - /* - * (intermediate)[in] + /* + * (intermediate)[in] * / \ * (new leaf node)[ln] (old node)[fn] */ if (plen > bit) { in = node_alloc(); ln = node_alloc(); - + if (in == NULL || ln == NULL) { if (in) node_free(in); @@@ -537,8 -538,8 +537,8 @@@ return NULL; } - /* - * new intermediate node. + /* + * new intermediate node. * RTN_RTINFO will * be off since that an address that chooses one of * the branches would not match less specific routes @@@ -575,7 -576,7 +575,7 @@@ } } else { /* plen <= bit */ - /* + /* * (new leaf node)[ln] * / \ * (old node)[fn] NULL @@@ -591,7 -592,7 +591,7 @@@ ln->parent = pn; ln->fn_sernum = sernum; - + if (dir) pn->right = ln; else @@@ -623,11 -624,11 +623,11 @@@ static int fib6_add_rt2node(struct fib6 fn->leaf == &ip6_null_entry && !(rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ){ fn->leaf = rt; - rt->u.next = NULL; + rt->u.dst.rt6_next = NULL; goto out; } - for (iter = fn->leaf; iter; iter=iter->u.next) { + for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) { /* * Search for duplicates */ @@@ -655,7 -656,7 +655,7 @@@ if (iter->rt6i_metric > rt->rt6i_metric) break; - ins = &iter->u.next; + ins = &iter->u.dst.rt6_next; } /* @@@ -663,7 -664,7 +663,7 @@@ */ out: - rt->u.next = iter; + rt->u.dst.rt6_next = iter; *ins = rt; rt->rt6i_node = fn; atomic_inc(&rt->rt6i_ref); @@@ -1104,7 -1105,7 +1104,7 @@@ static void fib6_del_route(struct fib6_ RT6_TRACE("fib6_del_route\n"); /* Unlink it */ - *rtp = rt->u.next; + *rtp = rt->u.dst.rt6_next; rt->rt6i_node = NULL; rt6_stats.fib_rt_entries--; rt6_stats.fib_discarded_routes++; @@@ -1114,14 -1115,14 +1114,14 @@@ FOR_WALKERS(w) { if (w->state == FWS_C && w->leaf == rt) { RT6_TRACE("walker %p adjusted by delroute\n", w); - w->leaf = rt->u.next; + w->leaf = rt->u.dst.rt6_next; if (w->leaf == NULL) w->state = FWS_U; } } read_unlock(&fib6_walker_lock); - rt->u.next = NULL; + rt->u.dst.rt6_next = NULL; if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT) fn->leaf = &ip6_null_entry; @@@ -1189,7 -1190,7 +1189,7 @@@ int fib6_del(struct rt6_info *rt, struc * Walk the leaf entries looking for ourself */ - for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.next) { + for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) { if (*rtp == rt) { fib6_del_route(fn, rtp, info); return 0; @@@ -1205,7 -1206,7 +1205,7 @@@ * However, it is internally reenterable wrt itself and fib6_add/fib6_del. * It means, that we can modify tree during walking * and use this function for garbage collection, clone pruning, - * cleaning tree when a device goes down etc. etc. + * cleaning tree when a device goes down etc. etc. * * It guarantees that every node will be traversed, * and that it will be traversed only once. @@@ -1244,7 -1245,7 +1244,7 @@@ static int fib6_walk_continue(struct fi continue; } w->state = FWS_L; - #endif + #endif case FWS_L: if (fn->left) { w->node = fn->left; @@@ -1316,7 -1317,7 +1316,7 @@@ static int fib6_clean_node(struct fib6_ struct rt6_info *rt; struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w; - for (rt = w->leaf; rt; rt = rt->u.next) { + for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { res = c->func(rt, c->arg); if (res < 0) { w->leaf = rt; @@@ -1337,7 -1338,7 +1337,7 @@@ /* * Convenient frontend to tree walker. - * + * * func is called on each route. * It may return -1 -> delete this route. * 0 -> continue walking diff --combined net/sctp/sm_make_chunk.c index 7834818601,b975116556..f7fb29d5a0 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@@ -118,7 -118,7 +118,7 @@@ void sctp_init_cause(struct sctp_chun int padlen; __u16 len; - /* Cause code constants are now defined in network order. */ + /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; padlen = len % 4; @@@ -295,11 -295,11 +295,11 @@@ struct sctp_chunk *sctp_make_init_ack(c */ chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len; - /* Tell peer that we'll do ECN only if peer advertised such cap. */ + /* Tell peer that we'll do ECN only if peer advertised such cap. */ if (asoc->peer.ecn_capable) chunksize += sizeof(ecap_param); - /* Tell peer that we'll do PR-SCTP only if peer advertised. */ + /* Tell peer that we'll do PR-SCTP only if peer advertised. */ if (asoc->peer.prsctp_capable) chunksize += sizeof(prsctp_param); @@@ -728,7 -728,7 +728,7 @@@ struct sctp_chunk *sctp_make_shutdown_c if (retval && chunk) retval->transport = chunk->transport; - return retval; + return retval; } /* Create an ABORT. Note that we set the T bit if we have no @@@ -844,7 -844,7 +844,7 @@@ err_chunk return retval; } - /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ + /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ struct sctp_chunk *sctp_make_abort_violation( const struct sctp_association *asoc, const struct sctp_chunk *chunk, @@@ -979,10 -979,11 +979,10 @@@ struct sctp_chunk *sctp_chunkify(struc { struct sctp_chunk *retval; - retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC); + retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC); if (!retval) goto nodata; - memset(retval, 0, sizeof(struct sctp_chunk)); if (!sk) { SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb); @@@ -1264,8 -1265,8 +1264,8 @@@ static sctp_cookie_param_t *sctp_pack_c /* Header size is static data prior to the actual cookie, including * any padding. */ - headersize = sizeof(sctp_paramhdr_t) + - (sizeof(struct sctp_signed_cookie) - + headersize = sizeof(sctp_paramhdr_t) + + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = sizeof(struct sctp_cookie) + ntohs(init_chunk->chunk_hdr->length) + addrs_len; @@@ -1314,7 -1315,7 +1314,7 @@@ memcpy((__u8 *)&cookie->c.peer_init[0] + ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); - if (sctp_sk(ep->base.sk)->hmac) { + if (sctp_sk(ep->base.sk)->hmac) { struct hash_desc desc; /* Sign the message. */ @@@ -1323,8 -1324,8 +1323,8 @@@ sg.length = bodysize; keylen = SCTP_SECRET_SIZE; key = (char *)ep->secret_key[ep->current_key]; - desc.tfm = sctp_sk(ep->base.sk)->hmac; - desc.flags = 0; + desc.tfm = sctp_sk(ep->base.sk)->hmac; + desc.flags = 0; if (crypto_hash_setkey(desc.tfm, key, keylen) || crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) @@@ -1364,7 -1365,7 +1364,7 @@@ struct sctp_association *sctp_unpack_co * any padding. */ headersize = sizeof(sctp_chunkhdr_t) + - (sizeof(struct sctp_signed_cookie) - + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = ntohs(chunk->chunk_hdr->length) - headersize; fixed_size = headersize + sizeof(struct sctp_cookie); @@@ -1592,7 -1593,7 +1592,7 @@@ static int sctp_process_inv_paramlength struct sctp_chunk **errp) { char error[] = "The following parameter had invalid length:"; - size_t payload_len = WORD_ROUND(sizeof(error)) + + size_t payload_len = WORD_ROUND(sizeof(error)) + sizeof(sctp_paramhdr_t); @@@ -1751,7 -1752,7 +1751,7 @@@ static int sctp_verify_param(const stru case SCTP_PARAM_FWD_TSN_SUPPORT: if (sctp_prsctp_enable) break; - /* Fall Through */ + /* Fall Through */ default: SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n", ntohs(param.p->type), cid); @@@ -1860,7 -1861,7 +1860,7 @@@ int sctp_process_init(struct sctp_assoc sctp_walk_params(param, peer_init, init_hdr.params) { if (!sctp_process_param(asoc, param, peer_addr, gfp)) - goto clean_up; + goto clean_up; } /* Walk list of transports, removing transports in the UNKNOWN state. */ @@@ -1936,7 -1937,7 +1936,7 @@@ */ /* Allocate storage for the negotiated streams if it is not a temporary - * association. + * association. */ if (!asoc->temp) { int assoc_id; @@@ -2108,7 -2109,7 +2108,7 @@@ static int sctp_process_param(struct sc asoc->peer.prsctp_capable = 1; break; } - /* Fall Through */ + /* Fall Through */ default: /* Any unrecognized parameters should have been caught * and handled by sctp_verify_param() which should be @@@ -2167,7 -2168,7 +2167,7 @@@ __u32 sctp_generate_tsn(const struct sc * | ASCONF Parameter #N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * - * Address Parameter and other parameter will not be wrapped in this function + * Address Parameter and other parameter will not be wrapped in this function */ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, union sctp_addr *addr, @@@ -2289,7 -2290,7 +2289,7 @@@ struct sctp_chunk *sctp_make_asconf_upd * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * - * Create an ASCONF chunk with Set Primary IP address parameter. + * Create an ASCONF chunk with Set Primary IP address parameter. */ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, union sctp_addr *addr) @@@ -2338,7 -2339,7 +2338,7 @@@ * | ASCONF Parameter Response#N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * - * Create an ASCONF_ACK chunk with enough space for the parameter responses. + * Create an ASCONF_ACK chunk with enough space for the parameter responses. */ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, __u32 serial, int vparam_len) @@@ -2380,7 -2381,7 +2380,7 @@@ static void sctp_add_asconf_response(st ntohs(asconf_param->param_hdr.length); } - /* Add Success Indication or Error Cause Indication parameter. */ + /* Add Success Indication or Error Cause Indication parameter. */ ack_param.param_hdr.type = response_type; ack_param.param_hdr.length = htons(sizeof(ack_param) + err_param_len + @@@ -2423,11 -2424,11 +2423,11 @@@ static __be16 sctp_process_asconf_param switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address - * request and does not have the local resources to add this - * new address to the association, it MUST return an Error - * Cause TLV set to the new error code 'Operation Refused - * Due to Resource Shortage'. - */ + * request and does not have the local resources to add this + * new address to the association, it MUST return an Error + * Cause TLV set to the new error code 'Operation Refused + * Due to Resource Shortage'. + */ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); if (!peer) @@@ -2439,10 -2440,10 +2439,10 @@@ break; case SCTP_PARAM_DEL_IP: /* ADDIP 4.3 D7) If a request is received to delete the - * last remaining IP address of a peer endpoint, the receiver - * MUST send an Error Cause TLV with the error cause set to the - * new error code 'Request to Delete Last Remaining IP Address'. - */ + * last remaining IP address of a peer endpoint, the receiver + * MUST send an Error Cause TLV with the error cause set to the + * new error code 'Request to Delete Last Remaining IP Address'. + */ pos = asoc->peer.transport_addr_list.next; if (pos->next == &asoc->peer.transport_addr_list) return SCTP_ERROR_DEL_LAST_IP; @@@ -2474,7 -2475,7 +2474,7 @@@ return SCTP_ERROR_NO_ERROR; } - /* Process an incoming ASCONF chunk with the next expected serial no. and + /* Process an incoming ASCONF chunk with the next expected serial no. and * return an ASCONF_ACK chunk to be sent in response. */ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, @@@ -2494,19 -2495,19 +2494,19 @@@ hdr = (sctp_addiphdr_t *)asconf->skb->data; serial = ntohl(hdr->serial); - /* Skip the addiphdr and store a pointer to address parameter. */ + /* Skip the addiphdr and store a pointer to address parameter. */ length = sizeof(sctp_addiphdr_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); chunk_len -= length; /* Skip the address parameter and store a pointer to the first * asconf paramter. - */ + */ length = ntohs(addr_param->v4.param_hdr.length); asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); chunk_len -= length; - /* create an ASCONF_ACK chunk. + /* create an ASCONF_ACK chunk. * Based on the definitions of parameters, we know that the size of * ASCONF_ACK parameters are less than or equal to the twice of ASCONF * paramters. @@@ -2537,7 -2538,7 +2537,7 @@@ /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add * an IP address sends an 'Out of Resource' in its response, it * MUST also fail any subsequent add or delete requests bundled - * in the ASCONF. + * in the ASCONF. */ if (SCTP_ERROR_RSRC_LOW == err_code) goto done; @@@ -2548,12 -2549,12 +2548,12 @@@ length); chunk_len -= length; } - + done: asoc->peer.addip_serial++; /* If we are sending a new ASCONF_ACK hold a reference to it in assoc - * after freeing the reference to old asconf ack if any. + * after freeing the reference to old asconf ack if any. */ if (asconf_ack) { if (asoc->addip_last_asconf_ack) @@@ -2621,7 -2622,7 +2621,7 @@@ static int sctp_asconf_param_success(st /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk * for the given asconf parameter. If there is no response for this parameter, - * return the error code based on the third argument 'no_err'. + * return the error code based on the third argument 'no_err'. * ADDIP 4.1 * A7) If an error response is received for a TLV parameter, all TLVs with no * response before the failed TLV are considered successful if not reported. @@@ -2645,7 -2646,7 +2645,7 @@@ static __be16 sctp_get_asconf_response( /* Skip the addiphdr from the asconf_ack chunk and store a pointer to * the first asconf_ack parameter. - */ + */ length = sizeof(sctp_addiphdr_t); asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data + length); @@@ -2696,14 -2697,14 +2696,14 @@@ int sctp_process_asconf_ack(struct sctp /* Skip the chunkhdr and addiphdr from the last asconf sent and store * a pointer to address parameter. - */ + */ length = sizeof(sctp_addip_chunk_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); asconf_len -= length; /* Skip the address parameter in the last asconf sent and store a * pointer to the first asconf paramter. - */ + */ length = ntohs(addr_param->v4.param_hdr.length); asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); asconf_len -= length; @@@ -2740,7 -2741,7 +2740,7 @@@ case SCTP_ERROR_INV_PARAM: /* Disable sending this type of asconf parameter in * future. - */ + */ asoc->peer.addip_disabled_mask |= asconf_param->param_hdr.type; break; @@@ -2754,7 -2755,7 +2754,7 @@@ /* Skip the processed asconf parameter and move to the next * one. - */ + */ length = ntohs(asconf_param->param_hdr.length); asconf_param = (sctp_addip_param_t *)((void *)asconf_param + length); @@@ -2783,14 -2784,14 +2783,14 @@@ return retval; } - /* Make a FWD TSN chunk. */ + /* Make a FWD TSN chunk. */ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist) { struct sctp_chunk *retval = NULL; struct sctp_fwdtsn_chunk *ftsn_chunk; - struct sctp_fwdtsn_hdr ftsn_hdr; + struct sctp_fwdtsn_hdr ftsn_hdr; struct sctp_fwdtsn_skip skip; size_t hint; int i;