Now that we are using mod_timer_noact() for timer updates there's no need to hold the global lock during the timer update since the actual timeout update is now protected by the timer locking. Signed-off-by: Martin Josefsson --- net/netfilter/nf_conntrack_core.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) Index: linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_core.c =================================================================== --- linux-2.6.23-rc6.quilt.orig/net/netfilter/nf_conntrack_core.c 2007-09-13 11:36:38.000000000 +0200 +++ linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_core.c 2007-09-13 11:36:45.000000000 +0200 @@ -797,15 +797,13 @@ void __nf_ct_refresh_acct(struct nf_conn NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); NF_CT_ASSERT(skb); - spin_lock_bh(&nf_conntrack_lock); - /* Only update if this is not a fixed timeout */ if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) { - spin_unlock_bh(&nf_conntrack_lock); return; } - /* If not in hash table, timer will not be active yet */ + /* If not in hash table, timer will not be active yet, + we are the only one able to see it. */ if (!nf_ct_is_confirmed(ct)) { ct->timeout.expires = extra_jiffies; event = IPCT_REFRESH; @@ -822,6 +820,7 @@ void __nf_ct_refresh_acct(struct nf_conn #ifdef CONFIG_NF_CT_ACCT if (do_acct) { + spin_lock_bh(&nf_conntrack_lock); ct->counters[CTINFO2DIR(ctinfo)].packets++; ct->counters[CTINFO2DIR(ctinfo)].bytes += skb->len - skb_network_offset(skb); @@ -829,11 +828,10 @@ void __nf_ct_refresh_acct(struct nf_conn if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) event |= IPCT_COUNTER_FILLING; + spin_unlock_bh(&nf_conntrack_lock); } #endif - spin_unlock_bh(&nf_conntrack_lock); - /* must be unlocked when calling event cache */ if (event) nf_conntrack_event_cache(event, ct);