* 4) All operations modify state, so a spinlock is used.
*/
static struct dst_entry *dst_garbage_list;
- #if RT_CACHE_DEBUG >= 2
+ #if RT_CACHE_DEBUG >= 2
static atomic_t dst_total = ATOMIC_INIT(0);
#endif
static DEFINE_SPINLOCK(dst_lock);
if (ops->gc())
return NULL;
}
- dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
+ dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
if (!dst)
return NULL;
- memset(dst, 0, ops->entry_size);
atomic_set(&dst->__refcnt, 0);
dst->ops = ops;
dst->lastuse = jiffies;
dst->path = dst;
dst->input = dst_discard_in;
dst->output = dst_discard_out;
- #if RT_CACHE_DEBUG >= 2
+ #if RT_CACHE_DEBUG >= 2
atomic_inc(&dst_total);
#endif
atomic_inc(&ops->entries);
dst->ops->destroy(dst);
if (dst->dev)
dev_put(dst->dev);
- #if RT_CACHE_DEBUG >= 2
+ #if RT_CACHE_DEBUG >= 2
atomic_dec(&dst_total);
#endif
kmem_cache_free(dst->ops->kmem_cachep, dst);
goto out_entries;
}
- n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC);
+ n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
if (!n)
goto out_entries;
- memset(n, 0, tbl->entry_size);
-
skb_queue_head_init(&n->arp_queue);
rwlock_init(&n->lock);
n->updated = n->used = now;
struct neighbour *n;
int key_len = tbl->key_len;
u32 hash_val = tbl->hash(pkey, dev);
-
+
NEIGH_CACHE_STAT_INC(tbl, lookups);
read_lock_bh(&tbl->lock);
np = &n->next;
}
- /* Cycle through all hash buckets every base_reachable_time/2 ticks.
- * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
- * base_reachable_time.
+ /* Cycle through all hash buckets every base_reachable_time/2 ticks.
+ * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
+ * base_reachable_time.
*/
expire = tbl->parms.base_reachable_time >> 1;
expire /= (tbl->hash_mask + 1);
}
if (state & NUD_REACHABLE) {
- if (time_before_eq(now,
+ if (time_before_eq(now,
neigh->confirmed + neigh->parms->reachable_time)) {
NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
next = neigh->confirmed + neigh->parms->reachable_time;
notify = 1;
}
} else if (state & NUD_DELAY) {
- if (time_before_eq(now,
+ if (time_before_eq(now,
neigh->confirmed + neigh->parms->delay_probe_time)) {
NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
neigh->nud_state = NUD_REACHABLE;
goto out_unlock_bh;
now = jiffies;
-
+
if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
atomic_set(&neigh->probes, neigh->parms->ucast_probes);
NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
if it is different.
NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
- lladdr instead of overriding it
+ lladdr instead of overriding it
if it is different.
It also allows to retain current state
if lladdr is unchanged.
NEIGH_UPDATE_F_ADMIN means that the change is administrative.
- NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
+ NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
NTF_ROUTER flag.
NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
a router.
old = neigh->nud_state;
err = -EPERM;
- if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
(old & (NUD_NOARP | NUD_PERMANENT)))
goto out;
- compare new & old
- if they are different, check override flag
*/
- if ((old & NUD_VALID) &&
+ if ((old & NUD_VALID) &&
!memcmp(lladdr, neigh->ha, dev->addr_len))
lladdr = neigh->ha;
} else {
neigh_del_timer(neigh);
if (new & NUD_IN_TIMER) {
neigh_hold(neigh);
- neigh_add_timer(neigh, (jiffies +
- ((new & NUD_REACHABLE) ?
+ neigh_add_timer(neigh, (jiffies +
+ ((new & NUD_REACHABLE) ?
neigh->parms->reachable_time :
0)));
}
struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
lladdr || !dev->addr_len);
if (neigh)
- neigh_update(neigh, lladdr, NUD_STALE,
+ neigh_update(neigh, lladdr, NUD_STALE,
NEIGH_UPDATE_F_OVERRIDE);
return neigh;
}
if (dev->hard_header &&
dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
- skb->len) < 0 &&
+ skb->len) < 0 &&
dev->rebuild_header(skb))
return 0;
tbl->stats = alloc_percpu(struct neigh_statistics);
if (!tbl->stats)
panic("cannot create neighbour cache statistics");
-
+
#ifdef CONFIG_PROC_FS
tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
- if (!tbl->pde)
+ if (!tbl->pde)
panic("cannot create neighbour proc dir entry");
tbl->pde->proc_fops = &neigh_stat_seq_fops;
tbl->pde->data = tbl;
err = -ENOENT;
goto out_dev_put;
}
-
+
neigh = __neigh_lookup_errno(tbl, dst, dev);
if (IS_ERR(neigh)) {
err = PTR_ERR(neigh);
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
-
+
static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
int ifindex)
{
struct neigh_parms *p;
-
+
for (p = &tbl->parms; p; p = p->next)
if ((p->dev && p->dev->ifindex == ifindex) ||
(!p->dev && !ifindex))
goto errout_locked;
}
- /*
+ /*
* We acquire tbl->lock to be nice to the periodic timers and
* make sure they always see a consistent set of values.
*/
if (*pos == 0)
return SEQ_START_TOKEN;
-
+
for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
if (!cpu_possible(cpu))
continue;
};
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
- int p_id, int pdev_id, char *p_name,
+ int p_id, int pdev_id, char *p_name,
proc_handler *handler, ctl_handler *strategy)
{
struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template,
t->neigh_vars[14].procname = NULL;
t->neigh_vars[15].procname = NULL;
} else {
- dev_name_source = t->neigh_dev[0].procname;
+ dev_name_source = t->neigh_dev[0].procname;
t->neigh_vars[12].data = (int *)(p + 1);
t->neigh_vars[13].data = (int *)(p + 1) + 1;
t->neigh_vars[14].data = (int *)(p + 1) + 2;
goto free;
}
- t->neigh_dev[0].procname = dev_name;
+ t->neigh_dev[0].procname = dev_name;
t->neigh_neigh_dir[0].ctl_name = pdev_id;
#define dz_prefix(key,dz) ((key).datum)
#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
- for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
+ for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
#define endfor_nexthops(fi) }
}
static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
- u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
- struct dn_fib_info *fi, unsigned int flags)
+ u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
+ struct dn_fib_info *fi, unsigned int flags)
{
- struct rtmsg *rtm;
- struct nlmsghdr *nlh;
- unsigned char *b = skb->tail;
-
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
- rtm = NLMSG_DATA(nlh);
- rtm->rtm_family = AF_DECnet;
- rtm->rtm_dst_len = dst_len;
- rtm->rtm_src_len = 0;
- rtm->rtm_tos = 0;
- rtm->rtm_table = tb_id;
+ struct rtmsg *rtm;
+ struct nlmsghdr *nlh;
+ unsigned char *b = skb->tail;
+
+ nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
+ rtm = NLMSG_DATA(nlh);
+ rtm->rtm_family = AF_DECnet;
+ rtm->rtm_dst_len = dst_len;
+ rtm->rtm_src_len = 0;
+ rtm->rtm_tos = 0;
+ rtm->rtm_table = tb_id;
RTA_PUT_U32(skb, RTA_TABLE, tb_id);
- rtm->rtm_flags = fi->fib_flags;
- rtm->rtm_scope = scope;
+ rtm->rtm_flags = fi->fib_flags;
+ rtm->rtm_scope = scope;
rtm->rtm_type = type;
- if (rtm->rtm_dst_len)
- RTA_PUT(skb, RTA_DST, 2, dst);
- rtm->rtm_protocol = fi->fib_protocol;
- if (fi->fib_priority)
- RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
+ if (rtm->rtm_dst_len)
+ RTA_PUT(skb, RTA_DST, 2, dst);
+ rtm->rtm_protocol = fi->fib_protocol;
+ if (fi->fib_priority)
+ RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
goto rtattr_failure;
- if (fi->fib_nhs == 1) {
- if (fi->fib_nh->nh_gw)
- RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw);
- if (fi->fib_nh->nh_oif)
- RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
- }
- if (fi->fib_nhs > 1) {
- struct rtnexthop *nhp;
- struct rtattr *mp_head;
- if (skb_tailroom(skb) <= RTA_SPACE(0))
- goto rtattr_failure;
- mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0));
-
- for_nexthops(fi) {
- if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
- goto rtattr_failure;
- nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
- nhp->rtnh_flags = nh->nh_flags & 0xFF;
- nhp->rtnh_hops = nh->nh_weight - 1;
- nhp->rtnh_ifindex = nh->nh_oif;
- if (nh->nh_gw)
- RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw);
- nhp->rtnh_len = skb->tail - (unsigned char *)nhp;
- } endfor_nexthops(fi);
- mp_head->rta_type = RTA_MULTIPATH;
- mp_head->rta_len = skb->tail - (u8*)mp_head;
- }
-
- nlh->nlmsg_len = skb->tail - b;
- return skb->len;
+ if (fi->fib_nhs == 1) {
+ if (fi->fib_nh->nh_gw)
+ RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw);
+ if (fi->fib_nh->nh_oif)
+ RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
+ }
+ if (fi->fib_nhs > 1) {
+ struct rtnexthop *nhp;
+ struct rtattr *mp_head;
+ if (skb_tailroom(skb) <= RTA_SPACE(0))
+ goto rtattr_failure;
+ mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0));
+
+ for_nexthops(fi) {
+ if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
+ goto rtattr_failure;
+ nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
+ nhp->rtnh_flags = nh->nh_flags & 0xFF;
+ nhp->rtnh_hops = nh->nh_weight - 1;
+ nhp->rtnh_ifindex = nh->nh_oif;
+ if (nh->nh_gw)
+ RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw);
+ nhp->rtnh_len = skb->tail - (unsigned char *)nhp;
+ } endfor_nexthops(fi);
+ mp_head->rta_type = RTA_MULTIPATH;
+ mp_head->rta_len = skb->tail - (u8*)mp_head;
+ }
+
+ nlh->nlmsg_len = skb->tail - b;
+ return skb->len;
nlmsg_failure:
rtattr_failure:
- skb_trim(skb, b - skb->data);
- return -EMSGSIZE;
+ skb_trim(skb, b - skb->data);
+ return -EMSGSIZE;
}
static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
- struct nlmsghdr *nlh, struct netlink_skb_parms *req)
+ struct nlmsghdr *nlh, struct netlink_skb_parms *req)
{
- struct sk_buff *skb;
- u32 pid = req ? req->pid : 0;
+ struct sk_buff *skb;
+ u32 pid = req ? req->pid : 0;
int err = -ENOBUFS;
- skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
- if (skb == NULL)
+ skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
+ if (skb == NULL)
goto errout;
- err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
+ err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
f->fn_type, f->fn_scope, &f->fn_key, z,
DN_FIB_INFO(f), 0);
if (err < 0) {
rtnl_set_sk_err(RTNLGRP_DECnet_ROUTE, err);
}
- static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
+ static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
struct netlink_callback *cb,
struct dn_fib_table *tb,
struct dn_zone *dz,
continue;
if (f->fn_state & DN_S_ZOMBIE)
continue;
- if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
+ if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWROUTE,
- tb->n,
+ tb->n,
(f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type,
- f->fn_scope, &f->fn_key, dz->dz_order,
+ f->fn_scope, &f->fn_key, dz->dz_order,
f->fn_info, NLM_F_MULTI) < 0) {
cb->args[4] = i;
return -1;
return skb->len;
}
- static __inline__ int dn_hash_dump_zone(struct sk_buff *skb,
+ static __inline__ int dn_hash_dump_zone(struct sk_buff *skb,
struct netlink_callback *cb,
struct dn_fib_table *tb,
struct dn_zone *dz)
return skb->len;
}
- static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
- struct netlink_callback *cb)
+ static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
+ struct netlink_callback *cb)
{
- int m, s_m;
+ int m, s_m;
struct dn_zone *dz;
struct dn_hash *table = (struct dn_hash *)tb->data;
read_unlock(&dn_fib_tables_lock);
cb->args[2] = m;
- return skb->len;
+ return skb->len;
}
int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
goto next;
if (dumped)
memset(&cb->args[2], 0, sizeof(cb->args) -
- 2 * sizeof(cb->args[0]));
+ 2 * sizeof(cb->args[0]));
if (tb->dump(tb, skb, cb) < 0)
goto out;
dumped = 1;
struct dn_fib_node *new_f, *f, **fp, **del_fp;
struct dn_zone *dz;
struct dn_fib_info *fi;
- int z = r->rtm_dst_len;
+ int z = r->rtm_dst_len;
int type = r->rtm_type;
dn_fib_key_t key;
- int err;
+ int err;
- if (z > 16)
- return -EINVAL;
+ if (z > 16)
+ return -EINVAL;
dz = table->dh_zones[z];
if (!dz && !(dz = dn_new_zone(table, z)))
key = dz_key(dst, dz);
}
- if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL)
- return err;
+ if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL)
+ return err;
if (dz->dz_nent > (dz->dz_divisor << 2) &&
dz->dz_divisor > DN_MAX_DIVISOR &&
replace:
err = -ENOBUFS;
- new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL);
+ new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL);
if (new_f == NULL)
goto out;
- memset(new_f, 0, sizeof(struct dn_fib_node));
-
new_f->fn_key = key;
new_f->fn_type = type;
new_f->fn_scope = r->rtm_scope;
dn_rt_cache_flush(-1);
}
- dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
+ dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
- return 0;
+ return 0;
out:
dn_fib_release_info(fi);
return err;
{
struct dn_hash *table = (struct dn_hash*)tb->data;
struct dn_fib_node **fp, **del_fp, *f;
- int z = r->rtm_dst_len;
+ int z = r->rtm_dst_len;
struct dn_zone *dz;
dn_fib_key_t key;
int matched;
- if (z > 16)
- return -EINVAL;
+ if (z > 16)
+ return -EINVAL;
if ((dz = table->dh_zones[z]) == NULL)
return -ESRCH;
if (del_fp == NULL &&
(!r->rtm_type || f->fn_type == r->rtm_type) &&
(r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) &&
- (!r->rtm_protocol ||
+ (!r->rtm_protocol ||
fi->fib_protocol == r->rtm_protocol) &&
dn_fib_nh_match(r, n, rta, fi) == 0)
del_fp = fp;
if (del_fp) {
f = *del_fp;
- dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
+ dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
if (matched != 1) {
write_lock_bh(&dn_fib_tables_lock);
return 0;
}
- return -ESRCH;
+ return -ESRCH;
}
static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table)
static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp, struct dn_fib_res *res)
{
- int err;
+ int err;
struct dn_zone *dz;
struct dn_hash *t = (struct dn_hash *)tb->data;
if (err == 0) {
res->type = f->fn_type;
- res->scope = f->fn_scope;
+ res->scope = f->fn_scope;
res->prefixlen = dz->dz_order;
goto out;
}
err = 1;
out:
read_unlock(&dn_fib_tables_lock);
- return err;
+ return err;
}
struct dn_fib_table *dn_fib_get_table(u32 n, int create)
{
- struct dn_fib_table *t;
+ struct dn_fib_table *t;
struct hlist_node *node;
unsigned int h;
- if (n < RT_TABLE_MIN)
- return NULL;
+ if (n < RT_TABLE_MIN)
+ return NULL;
- if (n > RT_TABLE_MAX)
- return NULL;
+ if (n > RT_TABLE_MAX)
+ return NULL;
h = n & (DN_FIB_TABLE_HASHSZ - 1);
rcu_read_lock();
}
rcu_read_unlock();
- if (!create)
- return NULL;
+ if (!create)
+ return NULL;
- if (in_interrupt() && net_ratelimit()) {
- printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n");
- return NULL;
- }
+ if (in_interrupt() && net_ratelimit()) {
+ printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n");
+ return NULL;
+ }
- t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash),
+ t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash),
GFP_KERNEL);
- if (t == NULL)
- return NULL;
-
- t->n = n;
- t->insert = dn_fib_table_insert;
- t->delete = dn_fib_table_delete;
- t->lookup = dn_fib_table_lookup;
- t->flush = dn_fib_table_flush;
- t->dump = dn_fib_table_dump;
+ if (t == NULL)
+ return NULL;
+
+ t->n = n;
+ t->insert = dn_fib_table_insert;
+ t->delete = dn_fib_table_delete;
+ t->lookup = dn_fib_table_lookup;
+ t->flush = dn_fib_table_flush;
+ t->dump = dn_fib_table_dump;
hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]);
- return t;
+ return t;
}
struct dn_fib_table *dn_fib_empty_table(void)
{
- u32 id;
+ u32 id;
- for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++)
+ for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++)
if (dn_fib_get_table(id, 0) == NULL)
- return dn_fib_get_table(id, 1);
- return NULL;
+ return dn_fib_get_table(id, 1);
+ return NULL;
}
void dn_fib_flush(void)
{
- int flushed = 0;
- struct dn_fib_table *tb;
+ int flushed = 0;
+ struct dn_fib_table *tb;
struct hlist_node *node;
unsigned int h;
for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist)
- flushed += tb->flush(tb);
- }
+ flushed += tb->flush(tb);
+ }
- if (flushed)
- dn_rt_cache_flush(-1);
+ if (flushed)
+ dn_rt_cache_flush(-1);
}
void __init dn_fib_table_init(void)
write_lock(&dn_fib_tables_lock);
for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h],
- hlist) {
+ hlist) {
hlist_del(&t->hlist);
kfree(t);
}
/*
* Delete a VIF entry
*/
-
+
static int vif_delete(int vifi)
{
struct vif_device *v;
return -ENOBUFS;
break;
#endif
- case VIFF_TUNNEL:
+ case VIFF_TUNNEL:
dev = ipmr_new_tunnel(vifc);
if (!dev)
return -ENOBUFS;
*/
static struct mfc_cache *ipmr_cache_alloc(void)
{
- struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
+ struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if(c==NULL)
return NULL;
- memset(c, 0, sizeof(*c));
c->mfc_un.res.minvif = MAXVIFS;
return c;
}
static struct mfc_cache *ipmr_cache_alloc_unres(void)
{
- struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
+ struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
if(c==NULL)
return NULL;
- memset(c, 0, sizeof(*c));
skb_queue_head_init(&c->mfc_un.unres.unresolved);
c->mfc_un.unres.expires = jiffies + 10*HZ;
return c;
/*
* A cache entry has gone into a resolved state from queued
*/
-
+
static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
{
struct sk_buff *skb;
*
* Called under mrt_lock.
*/
-
+
static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
{
struct sk_buff *skb;
memcpy(msg, pkt->nh.raw, sizeof(struct iphdr));
msg->im_msgtype = IGMPMSG_WHOLEPKT;
msg->im_mbz = 0;
- msg->im_vif = reg_vif_num;
+ msg->im_vif = reg_vif_num;
skb->nh.iph->ihl = sizeof(struct iphdr) >> 2;
skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr));
- } else
+ } else
#endif
- {
-
+ {
+
/*
* Copy the IP header
*/
igmp->code = 0;
skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */
skb->h.raw = skb->nh.raw;
- }
+ }
if (mroute_socket == NULL) {
kfree_skb(skb);
/*
* Queue a packet for resolution. It gets locked cache entry!
*/
-
+
static int
ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
{
* Reflect first query at mrouted.
*/
if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
- /* If the report failed throw the cache entry
+ /* If the report failed throw the cache entry
out - Brad Parker
*/
spin_unlock_bh(&mfc_unres_lock);
/*
* Close the multicast socket, and clear the vif tables etc
*/
-
+
static void mroute_clean_tables(struct sock *sk)
{
int i;
-
+
/*
* Shut down all active vif entries
*/
* that's how BSD mrouted happens to think. Maybe one day with a proper
* MOSPF/PIM router set up we can clean this up.
*/
-
+
int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen)
{
int ret;
struct vifctl vif;
struct mfcctl mfc;
-
+
if(optname!=MRT_INIT)
{
if(sk!=mroute_socket && !capable(CAP_NET_ADMIN))
if(optlen!=sizeof(vif))
return -EINVAL;
if (copy_from_user(&vif,optval,sizeof(vif)))
- return -EFAULT;
+ return -EFAULT;
if(vif.vifc_vifi >= MAXVIFS)
return -ENFILE;
rtnl_lock();
/*
* Getsock opt support for the multicast routing system.
*/
-
+
int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen)
{
int olr;
int val;
- if(optname!=MRT_VERSION &&
+ if(optname!=MRT_VERSION &&
#ifdef CONFIG_IP_PIMSM
optname!=MRT_PIM &&
#endif
olr = min_t(unsigned int, olr, sizeof(int));
if (olr < 0)
return -EINVAL;
-
+
if(put_user(olr,optlen))
return -EFAULT;
if(optname==MRT_VERSION)
/*
* The IP multicast ioctl support routines.
*/
-
+
int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
{
struct sioc_sg_req sr;
struct sioc_vif_req vr;
struct vif_device *vif;
struct mfc_cache *c;
-
+
switch(cmd)
{
case SIOCGETVIFCNT:
if (copy_from_user(&vr,arg,sizeof(vr)))
- return -EFAULT;
+ return -EFAULT;
if(vr.vifi>=maxvif)
return -EINVAL;
read_lock(&mrt_lock);
* This avoids tunnel drivers and other mess and gives us the speed so
* important for multicast video.
*/
-
+
static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
{
struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr));
encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
if (skb_cow(skb, encap)) {
- ip_rt_put(rt);
+ ip_rt_put(rt);
goto out_free;
}
* not mrouter) cannot join to more than one interface - it will
* result in receiving multiple packets.
*/
- NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
+ NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
ipmr_forward_finish);
return;
large chunk of pimd to kernel. Ough... --ANK
*/
(mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
- time_after(jiffies,
+ time_after(jiffies,
cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
cache->mfc_un.res.last_assert = jiffies;
ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
struct iphdr *encap;
struct net_device *reg_dev = NULL;
- if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
+ if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
goto drop;
pim = (struct igmphdr*)skb->h.raw;
- if (!mroute_do_pim ||
+ if (!mroute_do_pim ||
skb->len < sizeof(*pim) + sizeof(*encap) ||
- pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
+ pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
goto drop;
encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr));
*/
if (!MULTICAST(encap->daddr) ||
encap->tot_len == 0 ||
- ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
+ ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
goto drop;
read_lock(&mrt_lock);
dev_hold(reg_dev);
read_unlock(&mrt_lock);
- if (reg_dev == NULL)
+ if (reg_dev == NULL)
goto drop;
skb->mac.raw = skb->nh.raw;
struct iphdr *encap;
struct net_device *reg_dev = NULL;
- if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
+ if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
goto drop;
pim = (struct pimreghdr*)skb->h.raw;
- if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
+ if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
(pim->flags&PIM_NULL_REGISTER) ||
- (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
+ (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr));
if (!MULTICAST(encap->daddr) ||
encap->tot_len == 0 ||
- ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
+ ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
goto drop;
read_lock(&mrt_lock);
dev_hold(reg_dev);
read_unlock(&mrt_lock);
- if (reg_dev == NULL)
+ if (reg_dev == NULL)
goto drop;
skb->mac.raw = skb->nh.raw;
return err;
}
- #ifdef CONFIG_PROC_FS
+ #ifdef CONFIG_PROC_FS
/*
* The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
*/
for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
if(!VIF_EXISTS(iter->ct))
continue;
- if (pos-- == 0)
+ if (pos-- == 0)
return &vif_table[iter->ct];
}
return NULL;
static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
{
read_lock(&mrt_lock);
- return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
+ return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
++*pos;
if (v == SEQ_START_TOKEN)
return ipmr_vif_seq_idx(iter, 0);
-
+
while (++iter->ct < maxvif) {
if(!VIF_EXISTS(iter->ct))
continue;
static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
- seq_puts(seq,
+ seq_puts(seq,
"Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
} else {
const struct vif_device *vif = v;
seq_printf(seq,
"%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
vif - vif_table,
- name, vif->bytes_in, vif->pkt_in,
+ name, vif->bytes_in, vif->pkt_in,
vif->bytes_out, vif->pkt_out,
vif->flags, vif->local, vif->remote);
}
struct seq_file *seq;
int rc = -ENOMEM;
struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
-
+
if (!s)
goto out;
it->cache = mfc_cache_array;
read_lock(&mrt_lock);
- for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
- for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
- if (pos-- == 0)
+ for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
+ for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
+ if (pos-- == 0)
return mfc;
read_unlock(&mrt_lock);
it->cache = &mfc_unres_queue;
spin_lock_bh(&mfc_unres_lock);
- for(mfc = mfc_unres_queue; mfc; mfc = mfc->next)
+ for(mfc = mfc_unres_queue; mfc; mfc = mfc->next)
if (pos-- == 0)
return mfc;
spin_unlock_bh(&mfc_unres_lock);
struct ipmr_mfc_iter *it = seq->private;
it->cache = NULL;
it->ct = 0;
- return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
+ return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
if (mfc->next)
return mfc->next;
-
- if (it->cache == &mfc_unres_queue)
+
+ if (it->cache == &mfc_unres_queue)
goto end_of_list;
BUG_ON(it->cache != mfc_cache_array);
read_unlock(&mrt_lock);
it->cache = &mfc_unres_queue;
it->ct = 0;
-
+
spin_lock_bh(&mfc_unres_lock);
mfc = mfc_unres_queue;
- if (mfc)
+ if (mfc)
return mfc;
end_of_list:
int n;
if (v == SEQ_START_TOKEN) {
- seq_puts(seq,
+ seq_puts(seq,
"Group Origin Iif Pkts Bytes Wrong Oifs\n");
} else {
const struct mfc_cache *mfc = v;
const struct ipmr_mfc_iter *it = seq->private;
-
+
seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
(unsigned long) mfc->mfc_mcastgrp,
(unsigned long) mfc->mfc_origin,
mfc->mfc_un.res.wrong_if);
if (it->cache != &mfc_unres_queue) {
- for(n = mfc->mfc_un.res.minvif;
+ for(n = mfc->mfc_un.res.minvif;
n < mfc->mfc_un.res.maxvif; n++ ) {
- if(VIF_EXISTS(n)
+ if(VIF_EXISTS(n)
&& mfc->mfc_un.res.ttls[n] < 255)
- seq_printf(seq,
- " %2d:%-3d",
+ seq_printf(seq,
+ " %2d:%-3d",
n, mfc->mfc_un.res.ttls[n]);
}
}
struct seq_file *seq;
int rc = -ENOMEM;
struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
-
+
if (!s)
goto out;
.llseek = seq_lseek,
.release = seq_release_private,
};
- #endif
+ #endif
#ifdef CONFIG_IP_PIMSM_V2
static struct net_protocol pim_protocol = {
/*
* Setup for IP multicast routing
*/
-
+
void __init ip_mr_init(void)
{
mrt_cachep = kmem_cache_create("ip_mrt_cache",
init_timer(&ipmr_expire_timer);
ipmr_expire_timer.function=ipmr_expire_process;
register_netdevice_notifier(&ip_mr_notifier);
- #ifdef CONFIG_PROC_FS
+ #ifdef CONFIG_PROC_FS
proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops);
proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops);
- #endif
+ #endif
}
* Checking the dest server status.
*/
if ((dest == NULL) ||
- !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
- (sysctl_ip_vs_expire_quiescent_template &&
+ !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
+ (sysctl_ip_vs_expire_quiescent_template &&
(atomic_read(&dest->weight) == 0))) {
IP_VS_DBG(9, "check_template: dest not available for "
"protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d "
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp = ip_vs_proto_get(proto);
- cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
+ cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
if (cp == NULL) {
IP_VS_ERR_RL("ip_vs_conn_new: no memory available.\n");
return NULL;
}
- memset(cp, 0, sizeof(*cp));
INIT_LIST_HEAD(&cp->c_list);
init_timer(&cp->timer);
cp->timer.data = (unsigned long)cp;
{
int idx;
struct ip_vs_conn *cp;
-
+
for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) {
ct_read_lock_bh(idx);
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
int idx;
++*pos;
- if (v == SEQ_START_TOKEN)
+ if (v == SEQ_START_TOKEN)
return ip_vs_conn_array(seq, 0);
/* more on same hash chain? */
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
seq->private = &ip_vs_conn_tab[idx];
return cp;
- }
+ }
ct_read_unlock_bh(idx);
}
seq->private = NULL;
but required by, the NAT layer; it can also be used by an iptables
extension. */
- /* (C) 1999-2001 Paul `Rusty' Russell
+ /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
void ip_ct_deliver_cached_events(const struct ip_conntrack *ct)
{
struct ip_conntrack_ecache *ecache;
-
+
local_bh_disable();
ecache = &__get_cpu_var(ip_conntrack_ecache);
if (ecache->ct == ct)
unsigned int size, unsigned int rnd)
{
return (jhash_3words((__force u32)tuple->src.ip,
- ((__force u32)tuple->dst.ip ^ tuple->dst.protonum),
- (tuple->src.u.all | (tuple->dst.u.all << 16)),
- rnd) % size);
+ ((__force u32)tuple->dst.ip ^ tuple->dst.protonum),
+ (tuple->src.u.all | (tuple->dst.u.all << 16)),
+ rnd) % size);
}
static u_int32_t
__ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
{
struct ip_conntrack_expect *i;
-
+
list_for_each_entry(i, &ip_conntrack_expect_list, list) {
if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
return i;
ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
{
struct ip_conntrack_expect *i;
-
+
read_lock_bh(&ip_conntrack_lock);
i = __ip_conntrack_expect_find(tuple);
if (i)
static void __ip_conntrack_hash_insert(struct ip_conntrack *ct,
unsigned int hash,
- unsigned int repl_hash)
+ unsigned int repl_hash)
{
ct->id = ++ip_conntrack_next_id;
list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list,
/* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
/* No external references means noone else could have
- confirmed us. */
+ confirmed us. */
IP_NF_ASSERT(!is_confirmed(ct));
DEBUGP("Confirming conntrack %p\n", ct);
write_lock_bh(&ip_conntrack_lock);
/* See if there's one in the list already, including reverse:
- NAT could have grabbed it without realizing, since we're
- not in the hash. If there is, we lost race. */
+ NAT could have grabbed it without realizing, since we're
+ not in the hash. If there is, we lost race. */
list_for_each_entry(h, &ip_conntrack_hash[hash], list)
if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&h->tuple))
p = &ip_conntrack_generic_protocol;
}
preempt_enable();
-
+
return p;
}
}
}
- conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
+ conntrack = kmem_cache_zalloc(ip_conntrack_cachep, GFP_ATOMIC);
if (!conntrack) {
DEBUGP("Can't allocate conntrack.\n");
atomic_dec(&ip_conntrack_count);
return ERR_PTR(-ENOMEM);
}
- memset(conntrack, 0, sizeof(*conntrack));
atomic_set(&conntrack->ct_general.use, 1);
conntrack->ct_general.destroy = destroy_conntrack;
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);
- if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4,
+ if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4,
&tuple,proto))
return NULL;
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
DEBUGP("ip_conntrack_in: normal packet for %p\n",
ct);
- *ctinfo = IP_CT_ESTABLISHED;
+ *ctinfo = IP_CT_ESTABLISHED;
} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
DEBUGP("ip_conntrack_in: related packet for %p\n",
ct);
if ((*pskb)->pkt_type == PACKET_BROADCAST) {
printk("Broadcast packet!\n");
return NF_ACCEPT;
- } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF))
+ } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF))
== htonl(0x000000FF)) {
printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",
NIPQUAD((*pskb)->nh.iph->saddr),
/* It may be an special packet, error, unclean...
* inverse of the return code tells to the netfilter
* core what to do with the packet. */
- if (proto->error != NULL
+ if (proto->error != NULL
&& (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) {
CONNTRACK_STAT_INC(error);
CONNTRACK_STAT_INC(invalid);
int invert_tuplepr(struct ip_conntrack_tuple *inverse,
const struct ip_conntrack_tuple *orig)
{
- return ip_ct_invert_tuple(inverse, orig,
+ return ip_ct_invert_tuple(inverse, orig,
__ip_conntrack_proto_find(orig->dst.protonum));
}
const struct ip_conntrack_expect *b)
{
/* Part covered by intersection of masks must be unequal,
- otherwise they clash */
+ otherwise they clash */
struct ip_conntrack_tuple intersect_mask
= { { a->mask.src.ip & b->mask.src.ip,
{ a->mask.src.u.all & b->mask.src.u.all } },
}
/* We don't increase the master conntrack refcount for non-fulfilled
- * conntracks. During the conntrack destruction, the expectations are
+ * conntracks. During the conntrack destruction, the expectations are
* always killed before the conntrack itself */
struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me)
{
}
/* Will be over limit? */
- if (expect->master->helper->max_expected &&
+ if (expect->master->helper->max_expected &&
expect->master->expecting >= expect->master->helper->max_expected)
evict_oldest_expect(expect->master);
ret = 0;
out:
write_unlock_bh(&ip_conntrack_lock);
- return ret;
+ return ret;
}
/* Alter reply tuple (maybe alter helper). This is for NAT, and is
const struct ip_conntrack_helper *me)
{
if (tuplehash_to_ctrack(i)->helper == me) {
- ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i));
+ ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i));
tuplehash_to_ctrack(i)->helper = NULL;
}
}
}
/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
- void __ip_ct_refresh_acct(struct ip_conntrack *ct,
- enum ip_conntrack_info ctinfo,
+ void __ip_ct_refresh_acct(struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
const struct sk_buff *skb,
unsigned long extra_jiffies,
int do_acct)
#ifdef CONFIG_IP_NF_CT_ACCT
if (do_acct) {
ct->counters[CTINFO2DIR(ctinfo)].packets++;
- ct->counters[CTINFO2DIR(ctinfo)].bytes +=
+ ct->counters[CTINFO2DIR(ctinfo)].bytes +=
ntohs(skb->nh.iph->tot_len);
if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
|| (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
{
skb_orphan(skb);
- local_bh_disable();
+ local_bh_disable();
skb = ip_defrag(skb, user);
local_bh_enable();
/* This ICMP is in reverse direction to the packet which caused it */
ct = ip_conntrack_get(skb, &ctinfo);
-
+
if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
else
struct inet_sock *inet = inet_sk(sk);
struct ip_conntrack_tuple_hash *h;
struct ip_conntrack_tuple tuple;
-
+
IP_CT_TUPLE_U_BLANK(&tuple);
tuple.src.ip = inet->rcv_saddr;
tuple.src.u.tcp.port = inet->sport;
if (vmalloced)
vfree(hash);
else
- free_pages((unsigned long)hash,
+ free_pages((unsigned long)hash,
get_order(sizeof(struct list_head) * size));
}
ip_ct_attach = NULL;
/* This makes sure all current packets have passed through
- netfilter framework. Roll on, two-stage module
- delete... */
+ netfilter framework. Roll on, two-stage module
+ delete... */
synchronize_net();
ip_ct_event_cache_flush();
struct list_head *hash;
unsigned int i;
- *vmalloced = 0;
- hash = (void*)__get_free_pages(GFP_KERNEL,
+ *vmalloced = 0;
+ hash = (void*)__get_free_pages(GFP_KERNEL,
get_order(sizeof(struct list_head)
* size));
- if (!hash) {
+ if (!hash) {
*vmalloced = 1;
printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n");
hash = vmalloc(sizeof(struct list_head) * size);
if (!hash)
return -ENOMEM;
- /* We have to rehash for the new table anyway, so we also can
+ /* We have to rehash for the new table anyway, so we also can
* use a new random seed */
get_random_bytes(&rnd, 4);
/* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
* machine has 256 buckets. >= 1GB machines have 8192 buckets. */
- if (!ip_conntrack_htable_size) {
+ if (!ip_conntrack_htable_size) {
ip_conntrack_htable_size
= (((num_physpages << PAGE_SHIFT) / 16384)
/ sizeof(struct list_head));
}
ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
- sizeof(struct ip_conntrack), 0,
- 0, NULL, NULL);
+ sizeof(struct ip_conntrack), 0,
+ 0, NULL, NULL);
if (!ip_conntrack_cachep) {
printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
goto err_free_hash;
/*
- * Linux INET6 implementation
+ * Linux INET6 implementation
* Forwarding Information Database
*
* Authors:
- * Pedro Roque <roque@di.fc.ul.pt>
+ * Pedro Roque <roque@di.fc.ul.pt>
*
* $Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $
*
static struct fib6_walker_t fib6_walker_list = {
.prev = &fib6_walker_list,
- .next = &fib6_walker_list,
+ .next = &fib6_walker_list,
};
#define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next)
/*
* Auxiliary address test functions for the radix tree.
*
- * These assume a 32bit processor (although it will work on
+ * These assume a 32bit processor (although it will work on
* 64bit processors)
*/
{
struct fib6_node *fn;
- if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL)
- memset(fn, 0, sizeof(struct fib6_node));
+ fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC);
return fn;
}
int res;
struct rt6_info *rt;
- for (rt = w->leaf; rt; rt = rt->u.next) {
+ for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
res = rt6_dump_route(rt, w->args);
if (res < 0) {
/* Frame is full, suspend walking */
struct fib6_node *pn = NULL;
struct rt6key *key;
int bit;
- __be32 dir = 0;
+ __be32 dir = 0;
__u32 sernum = fib6_new_sernum();
RT6_TRACE("fib6_add_1\n");
if (plen < fn->fn_bit ||
!ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
goto insert_above;
-
+
/*
* Exact match ?
*/
-
+
if (plen == fn->fn_bit) {
/* clean up an intermediate node */
if ((fn->fn_flags & RTN_RTINFO) == 0) {
rt6_release(fn->leaf);
fn->leaf = NULL;
}
-
+
fn->fn_sernum = sernum;
-
+
return fn;
}
/*
* We have more bits to go
*/
-
+
/* Try to walk down on tree. */
fn->fn_sernum = sernum;
dir = addr_bit_set(addr, fn->fn_bit);
if (ln == NULL)
return NULL;
ln->fn_bit = plen;
-
+
ln->parent = pn;
ln->fn_sernum = sernum;
insert_above:
/*
- * split since we don't have a common prefix anymore or
+ * split since we don't have a common prefix anymore or
* we have a less significant route.
* we've to insert an intermediate node on the list
* this new node will point to the one we need to create
See comment in __ipv6_addr_diff: bit may be an invalid value,
but if it is >= plen, the value is ignored in any case.
*/
-
+
bit = __ipv6_addr_diff(addr, &key->addr, addrlen);
- /*
- * (intermediate)[in]
+ /*
+ * (intermediate)[in]
* / \
* (new leaf node)[ln] (old node)[fn]
*/
if (plen > bit) {
in = node_alloc();
ln = node_alloc();
-
+
if (in == NULL || ln == NULL) {
if (in)
node_free(in);
return NULL;
}
- /*
- * new intermediate node.
+ /*
+ * new intermediate node.
* RTN_RTINFO will
* be off since that an address that chooses one of
* the branches would not match less specific routes
}
} else { /* plen <= bit */
- /*
+ /*
* (new leaf node)[ln]
* / \
* (old node)[fn] NULL
ln->parent = pn;
ln->fn_sernum = sernum;
-
+
if (dir)
pn->right = ln;
else
fn->leaf == &ip6_null_entry &&
!(rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ){
fn->leaf = rt;
- rt->u.next = NULL;
+ rt->u.dst.rt6_next = NULL;
goto out;
}
- for (iter = fn->leaf; iter; iter=iter->u.next) {
+ for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) {
/*
* Search for duplicates
*/
if (iter->rt6i_metric > rt->rt6i_metric)
break;
- ins = &iter->u.next;
+ ins = &iter->u.dst.rt6_next;
}
/*
*/
out:
- rt->u.next = iter;
+ rt->u.dst.rt6_next = iter;
*ins = rt;
rt->rt6i_node = fn;
atomic_inc(&rt->rt6i_ref);
RT6_TRACE("fib6_del_route\n");
/* Unlink it */
- *rtp = rt->u.next;
+ *rtp = rt->u.dst.rt6_next;
rt->rt6i_node = NULL;
rt6_stats.fib_rt_entries--;
rt6_stats.fib_discarded_routes++;
FOR_WALKERS(w) {
if (w->state == FWS_C && w->leaf == rt) {
RT6_TRACE("walker %p adjusted by delroute\n", w);
- w->leaf = rt->u.next;
+ w->leaf = rt->u.dst.rt6_next;
if (w->leaf == NULL)
w->state = FWS_U;
}
}
read_unlock(&fib6_walker_lock);
- rt->u.next = NULL;
+ rt->u.dst.rt6_next = NULL;
if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT)
fn->leaf = &ip6_null_entry;
* Walk the leaf entries looking for ourself
*/
- for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.next) {
+ for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) {
if (*rtp == rt) {
fib6_del_route(fn, rtp, info);
return 0;
* However, it is internally reenterable wrt itself and fib6_add/fib6_del.
* It means, that we can modify tree during walking
* and use this function for garbage collection, clone pruning,
- * cleaning tree when a device goes down etc. etc.
+ * cleaning tree when a device goes down etc. etc.
*
* It guarantees that every node will be traversed,
* and that it will be traversed only once.
continue;
}
w->state = FWS_L;
- #endif
+ #endif
case FWS_L:
if (fn->left) {
w->node = fn->left;
struct rt6_info *rt;
struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w;
- for (rt = w->leaf; rt; rt = rt->u.next) {
+ for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
res = c->func(rt, c->arg);
if (res < 0) {
w->leaf = rt;
/*
* Convenient frontend to tree walker.
- *
+ *
* func is called on each route.
* It may return -1 -> delete this route.
* 0 -> continue walking
int padlen;
__u16 len;
- /* Cause code constants are now defined in network order. */
+ /* Cause code constants are now defined in network order. */
err.cause = cause_code;
len = sizeof(sctp_errhdr_t) + paylen;
padlen = len % 4;
*/
chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len;
- /* Tell peer that we'll do ECN only if peer advertised such cap. */
+ /* Tell peer that we'll do ECN only if peer advertised such cap. */
if (asoc->peer.ecn_capable)
chunksize += sizeof(ecap_param);
- /* Tell peer that we'll do PR-SCTP only if peer advertised. */
+ /* Tell peer that we'll do PR-SCTP only if peer advertised. */
if (asoc->peer.prsctp_capable)
chunksize += sizeof(prsctp_param);
if (retval && chunk)
retval->transport = chunk->transport;
- return retval;
+ return retval;
}
/* Create an ABORT. Note that we set the T bit if we have no
return retval;
}
- /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */
+ /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */
struct sctp_chunk *sctp_make_abort_violation(
const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
{
struct sctp_chunk *retval;
- retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC);
+ retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC);
if (!retval)
goto nodata;
- memset(retval, 0, sizeof(struct sctp_chunk));
if (!sk) {
SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb);
/* Header size is static data prior to the actual cookie, including
* any padding.
*/
- headersize = sizeof(sctp_paramhdr_t) +
- (sizeof(struct sctp_signed_cookie) -
+ headersize = sizeof(sctp_paramhdr_t) +
+ (sizeof(struct sctp_signed_cookie) -
sizeof(struct sctp_cookie));
bodysize = sizeof(struct sctp_cookie)
+ ntohs(init_chunk->chunk_hdr->length) + addrs_len;
memcpy((__u8 *)&cookie->c.peer_init[0] +
ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
- if (sctp_sk(ep->base.sk)->hmac) {
+ if (sctp_sk(ep->base.sk)->hmac) {
struct hash_desc desc;
/* Sign the message. */
sg.length = bodysize;
keylen = SCTP_SECRET_SIZE;
key = (char *)ep->secret_key[ep->current_key];
- desc.tfm = sctp_sk(ep->base.sk)->hmac;
- desc.flags = 0;
+ desc.tfm = sctp_sk(ep->base.sk)->hmac;
+ desc.flags = 0;
if (crypto_hash_setkey(desc.tfm, key, keylen) ||
crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
* any padding.
*/
headersize = sizeof(sctp_chunkhdr_t) +
- (sizeof(struct sctp_signed_cookie) -
+ (sizeof(struct sctp_signed_cookie) -
sizeof(struct sctp_cookie));
bodysize = ntohs(chunk->chunk_hdr->length) - headersize;
fixed_size = headersize + sizeof(struct sctp_cookie);
struct sctp_chunk **errp)
{
char error[] = "The following parameter had invalid length:";
- size_t payload_len = WORD_ROUND(sizeof(error)) +
+ size_t payload_len = WORD_ROUND(sizeof(error)) +
sizeof(sctp_paramhdr_t);
case SCTP_PARAM_FWD_TSN_SUPPORT:
if (sctp_prsctp_enable)
break;
- /* Fall Through */
+ /* Fall Through */
default:
SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
ntohs(param.p->type), cid);
sctp_walk_params(param, peer_init, init_hdr.params) {
if (!sctp_process_param(asoc, param, peer_addr, gfp))
- goto clean_up;
+ goto clean_up;
}
/* Walk list of transports, removing transports in the UNKNOWN state. */
*/
/* Allocate storage for the negotiated streams if it is not a temporary
- * association.
+ * association.
*/
if (!asoc->temp) {
int assoc_id;
asoc->peer.prsctp_capable = 1;
break;
}
- /* Fall Through */
+ /* Fall Through */
default:
/* Any unrecognized parameters should have been caught
* and handled by sctp_verify_param() which should be
* | ASCONF Parameter #N |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
- * Address Parameter and other parameter will not be wrapped in this function
+ * Address Parameter and other parameter will not be wrapped in this function
*/
static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
union sctp_addr *addr,
* | Address Parameter |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
- * Create an ASCONF chunk with Set Primary IP address parameter.
+ * Create an ASCONF chunk with Set Primary IP address parameter.
*/
struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
union sctp_addr *addr)
* | ASCONF Parameter Response#N |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
- * Create an ASCONF_ACK chunk with enough space for the parameter responses.
+ * Create an ASCONF_ACK chunk with enough space for the parameter responses.
*/
static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc,
__u32 serial, int vparam_len)
ntohs(asconf_param->param_hdr.length);
}
- /* Add Success Indication or Error Cause Indication parameter. */
+ /* Add Success Indication or Error Cause Indication parameter. */
ack_param.param_hdr.type = response_type;
ack_param.param_hdr.length = htons(sizeof(ack_param) +
err_param_len +
switch (asconf_param->param_hdr.type) {
case SCTP_PARAM_ADD_IP:
/* ADDIP 4.3 D9) If an endpoint receives an ADD IP address
- * request and does not have the local resources to add this
- * new address to the association, it MUST return an Error
- * Cause TLV set to the new error code 'Operation Refused
- * Due to Resource Shortage'.
- */
+ * request and does not have the local resources to add this
+ * new address to the association, it MUST return an Error
+ * Cause TLV set to the new error code 'Operation Refused
+ * Due to Resource Shortage'.
+ */
peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
if (!peer)
break;
case SCTP_PARAM_DEL_IP:
/* ADDIP 4.3 D7) If a request is received to delete the
- * last remaining IP address of a peer endpoint, the receiver
- * MUST send an Error Cause TLV with the error cause set to the
- * new error code 'Request to Delete Last Remaining IP Address'.
- */
+ * last remaining IP address of a peer endpoint, the receiver
+ * MUST send an Error Cause TLV with the error cause set to the
+ * new error code 'Request to Delete Last Remaining IP Address'.
+ */
pos = asoc->peer.transport_addr_list.next;
if (pos->next == &asoc->peer.transport_addr_list)
return SCTP_ERROR_DEL_LAST_IP;
return SCTP_ERROR_NO_ERROR;
}
- /* Process an incoming ASCONF chunk with the next expected serial no. and
+ /* Process an incoming ASCONF chunk with the next expected serial no. and
* return an ASCONF_ACK chunk to be sent in response.
*/
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
hdr = (sctp_addiphdr_t *)asconf->skb->data;
serial = ntohl(hdr->serial);
- /* Skip the addiphdr and store a pointer to address parameter. */
+ /* Skip the addiphdr and store a pointer to address parameter. */
length = sizeof(sctp_addiphdr_t);
addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
chunk_len -= length;
/* Skip the address parameter and store a pointer to the first
* asconf paramter.
- */
+ */
length = ntohs(addr_param->v4.param_hdr.length);
asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
chunk_len -= length;
- /* create an ASCONF_ACK chunk.
+ /* create an ASCONF_ACK chunk.
* Based on the definitions of parameters, we know that the size of
* ASCONF_ACK parameters are less than or equal to the twice of ASCONF
* paramters.
/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
* an IP address sends an 'Out of Resource' in its response, it
* MUST also fail any subsequent add or delete requests bundled
- * in the ASCONF.
+ * in the ASCONF.
*/
if (SCTP_ERROR_RSRC_LOW == err_code)
goto done;
length);
chunk_len -= length;
}
-
+
done:
asoc->peer.addip_serial++;
/* If we are sending a new ASCONF_ACK hold a reference to it in assoc
- * after freeing the reference to old asconf ack if any.
+ * after freeing the reference to old asconf ack if any.
*/
if (asconf_ack) {
if (asoc->addip_last_asconf_ack)
/* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk
* for the given asconf parameter. If there is no response for this parameter,
- * return the error code based on the third argument 'no_err'.
+ * return the error code based on the third argument 'no_err'.
* ADDIP 4.1
* A7) If an error response is received for a TLV parameter, all TLVs with no
* response before the failed TLV are considered successful if not reported.
/* Skip the addiphdr from the asconf_ack chunk and store a pointer to
* the first asconf_ack parameter.
- */
+ */
length = sizeof(sctp_addiphdr_t);
asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data +
length);
/* Skip the chunkhdr and addiphdr from the last asconf sent and store
* a pointer to address parameter.
- */
+ */
length = sizeof(sctp_addip_chunk_t);
addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
asconf_len -= length;
/* Skip the address parameter in the last asconf sent and store a
* pointer to the first asconf paramter.
- */
+ */
length = ntohs(addr_param->v4.param_hdr.length);
asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
asconf_len -= length;
case SCTP_ERROR_INV_PARAM:
/* Disable sending this type of asconf parameter in
* future.
- */
+ */
asoc->peer.addip_disabled_mask |=
asconf_param->param_hdr.type;
break;
/* Skip the processed asconf parameter and move to the next
* one.
- */
+ */
length = ntohs(asconf_param->param_hdr.length);
asconf_param = (sctp_addip_param_t *)((void *)asconf_param +
length);
return retval;
}
- /* Make a FWD TSN chunk. */
+ /* Make a FWD TSN chunk. */
struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_fwdtsn_skip *skiplist)
{
struct sctp_chunk *retval = NULL;
struct sctp_fwdtsn_chunk *ftsn_chunk;
- struct sctp_fwdtsn_hdr ftsn_hdr;
+ struct sctp_fwdtsn_hdr ftsn_hdr;
struct sctp_fwdtsn_skip skip;
size_t hint;
int i;