Clean up if-statements, remove function calls in if-statements and add likely()/unlikely() to help guide the compiler and cpu. Signed-off-by: Martin Josefsson --- net/netfilter/nf_conntrack_core.c | 45 ++++++++++++++++++++---------------- net/netfilter/nf_conntrack_expect.c | 18 +++++++++----- 2 files changed, 37 insertions(+), 26 deletions(-) Index: net-2.6.quilt/net/netfilter/nf_conntrack_core.c =================================================================== --- net-2.6.quilt.orig/net/netfilter/nf_conntrack_core.c 2006-12-03 20:36:25.000000000 +0100 +++ net-2.6.quilt/net/netfilter/nf_conntrack_core.c 2006-12-03 20:39:28.000000000 +0100 @@ -617,11 +617,11 @@ __nf_conntrack_alloc(const struct nf_con /* We don't want any race condition at early drop stage */ atomic_inc(&nf_conntrack_count); - if (nf_conntrack_max - && atomic_read(&nf_conntrack_count) > nf_conntrack_max) { + if (unlikely(nf_conntrack_max + && atomic_read(&nf_conntrack_count) > nf_conntrack_max)) { unsigned int hash = hash_conntrack(orig); /* Try dropping from this hash chain. */ - if (!early_drop(&nf_conntrack_hash[hash])) { + if (unlikely(!early_drop(&nf_conntrack_hash[hash]))) { atomic_dec(&nf_conntrack_count); if (net_ratelimit()) printk(KERN_WARNING @@ -651,7 +651,7 @@ __nf_conntrack_alloc(const struct nf_con } conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC); - if (conntrack == NULL) { + if (unlikely(conntrack == NULL)) { DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n"); goto out; } @@ -724,8 +724,10 @@ init_conntrack(const struct nf_conntrack struct nf_conntrack_tuple repl_tuple; struct nf_conntrack_expect *exp; u_int32_t features = 0; + int ret; - if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { + ret = nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto); + if (unlikely(!ret)) { DEBUGP("Can't invert tuple.\n"); return NULL; } @@ -737,12 +739,13 @@ init_conntrack(const struct nf_conntrack spin_unlock_bh(&nf_conntrack_lock); conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto, l4proto, features); - if (conntrack == NULL || IS_ERR(conntrack)) { + if (unlikely(conntrack == NULL || IS_ERR(conntrack))) { DEBUGP("Can't allocate conntrack.\n"); return (struct nf_conntrack_tuple_hash *)conntrack; } - if (!conntrack->l4proto->new(conntrack, skb, dataoff)) { + ret = conntrack->l4proto->new(conntrack, skb, dataoff); + if (unlikely(!ret)) { nf_conntrack_free(conntrack); DEBUGP("init conntrack: can't track with proto module\n"); return NULL; @@ -751,7 +754,7 @@ init_conntrack(const struct nf_conntrack spin_lock_bh(&nf_conntrack_lock); exp = find_expectation(tuple); - if (exp) { + if (unlikely(exp)) { DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n", conntrack, exp); /* Welcome, Mr. Bond. We've been expecting you... */ @@ -780,7 +783,7 @@ init_conntrack(const struct nf_conntrack spin_unlock_bh(&nf_conntrack_lock); - if (exp) { + if (unlikely(exp)) { if (exp->expectfn) exp->expectfn(conntrack, exp); nf_conntrack_expect_put(exp); @@ -803,21 +806,23 @@ resolve_normal_ct(struct sk_buff *skb, struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; + int ret; - if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data), + ret = nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data), dataoff, l3num, protonum, &tuple, l3proto, - l4proto)) { + l4proto); + if (unlikely(!ret)) { DEBUGP("resolve_normal_ct: Can't get tuple\n"); return NULL; } /* look for tuple match */ h = nf_conntrack_find_get(&tuple, NULL); - if (!h) { + if (unlikely(!h)) { h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff); - if (!h) + if (unlikely(!h)) return NULL; - if (IS_ERR(h)) + if (unlikely(IS_ERR(h))) return (void *)h; } ct = nf_ct_tuplehash_to_ctrack(h); @@ -859,7 +864,7 @@ nf_conntrack_in(int pf, unsigned int hoo int ret; /* Previously seen (loopback or untracked)? Ignore. */ - if ((*pskb)->nfct) { + if (unlikely((*pskb)->nfct)) { NF_CT_STAT_INC(ignore); return NF_ACCEPT; } @@ -890,13 +895,13 @@ nf_conntrack_in(int pf, unsigned int hoo ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, l4proto, &set_reply, &ctinfo); - if (!ct) { + if (unlikely(!ct)) { /* Not valid part of a connection */ NF_CT_STAT_INC(invalid); return NF_ACCEPT; } - if (IS_ERR(ct)) { + if (unlikely(IS_ERR(ct))) { /* Too stressed to deal. */ NF_CT_STAT_INC(drop); return NF_DROP; @@ -905,7 +910,7 @@ nf_conntrack_in(int pf, unsigned int hoo NF_CT_ASSERT((*pskb)->nfct); ret = ct->l4proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum); - if (ret < 0) { + if (unlikely(ret < 0)) { /* Invalid: inverse of the return code tells * the netfilter core what to do */ DEBUGP("nf_conntrack_in: Can't track with proto module\n"); @@ -976,13 +981,13 @@ void __nf_ct_refresh_acct(struct nf_conn spin_lock_bh(&nf_conntrack_lock); /* Only update if this is not a fixed timeout */ - if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) { + if (unlikely(test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))) { spin_unlock_bh(&nf_conntrack_lock); return; } /* If not in hash table, timer will not be active yet */ - if (!nf_ct_is_confirmed(ct)) { + if (unlikely(!nf_ct_is_confirmed(ct))) { ct->timeout.expires = extra_jiffies; event = IPCT_REFRESH; } else { Index: net-2.6.quilt/net/netfilter/nf_conntrack_expect.c =================================================================== --- net-2.6.quilt.orig/net/netfilter/nf_conntrack_expect.c 2006-12-03 20:36:25.000000000 +0100 +++ net-2.6.quilt/net/netfilter/nf_conntrack_expect.c 2006-12-03 20:40:53.000000000 +0100 @@ -61,9 +61,11 @@ struct nf_conntrack_expect * __nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_expect *i; - + int ret; + list_for_each_entry(i, &nf_conntrack_expect_list, list) { - if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) + ret = nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask); + if (ret) return i; } return NULL; @@ -92,6 +94,7 @@ struct nf_conntrack_expect * find_expectation(const struct nf_conntrack_tuple *tuple) { struct nf_conntrack_expect *i; + int ret; list_for_each_entry(i, &nf_conntrack_expect_list, list) { /* If master is not in hash table yet (ie. packet hasn't left @@ -99,8 +102,8 @@ find_expectation(const struct nf_conntra Hence these are not the droids you are looking for (if master ct never got confirmed, we'd hold a reference to it and weird things would happen to future packets). */ - if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) - && nf_ct_is_confirmed(i->master)) { + ret = nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask); + if (ret && nf_ct_is_confirmed(i->master)) { if (i->flags & NF_CT_EXPECT_PERMANENT) { atomic_inc(&i->use); return i; @@ -172,11 +175,13 @@ static inline int expect_matches(const s void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp) { struct nf_conntrack_expect *i; + int ret; spin_lock_bh(&nf_conntrack_lock); /* choose the the oldest expectation to evict */ list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { - if (expect_matches(i, exp) && del_timer(&i->timeout)) { + ret = expect_matches(i, exp) && del_timer(&i->timeout); + if (ret) { nf_ct_unlink_expect(i); spin_unlock_bh(&nf_conntrack_lock); nf_conntrack_expect_put(i); @@ -335,7 +340,8 @@ int nf_conntrack_expect_related(struct n spin_lock_bh(&nf_conntrack_lock); list_for_each_entry(i, &nf_conntrack_expect_list, list) { - if (expect_matches(i, expect)) { + ret = expect_matches(i, expect); + if (ret) { /* Refresh timer: if it's dying, ignore.. */ if (refresh_timer(i)) { ret = 0;