Now that we are using mod_timer_noact() for timer updates there's no need to hold the global lock during the timer update since the actual timeout update is now protected by the timer locking. Signed-off-by: Martin Josefsson --- net/netfilter/nf_conntrack_core.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) Index: linux-2.6.19-rc3-git4.quilt/net/netfilter/nf_conntrack_core.c =================================================================== --- linux-2.6.19-rc3-git4.quilt.orig/net/netfilter/nf_conntrack_core.c 2006-11-02 19:18:31.000000000 +0100 +++ linux-2.6.19-rc3-git4.quilt/net/netfilter/nf_conntrack_core.c 2006-11-02 19:18:34.000000000 +0100 @@ -938,15 +938,13 @@ void __nf_ct_refresh_acct(struct nf_conn NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); NF_CT_ASSERT(skb); - spin_lock_bh(&nf_conntrack_lock); - /* Only update if this is not a fixed timeout */ if (unlikely(test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))) { - spin_unlock_bh(&nf_conntrack_lock); return; } - /* If not in hash table, timer will not be active yet */ + /* If not in hash table, timer will not be active yet and we are the + only one able to see it. */ if (unlikely(!nf_ct_is_confirmed(ct))) { ct->timeout.expires = extra_jiffies; event = IPCT_REFRESH; @@ -963,6 +961,7 @@ void __nf_ct_refresh_acct(struct nf_conn #ifdef CONFIG_NF_CT_ACCT if (do_acct) { + spin_lock_bh(&nf_conntrack_lock); ct->counters[CTINFO2DIR(ctinfo)].packets++; ct->counters[CTINFO2DIR(ctinfo)].bytes += skb->len - (unsigned int)(skb->nh.raw - skb->data); @@ -971,11 +970,10 @@ void __nf_ct_refresh_acct(struct nf_conn || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) { event |= IPCT_COUNTER_FILLING; } + spin_unlock_bh(&nf_conntrack_lock); } #endif - spin_unlock_bh(&nf_conntrack_lock); - /* must be unlocked when calling event cache */ if (event) nf_conntrack_event_cache(event, ct);