Replace DEFINE_RWLOCK(nf_conntrack_lock) with DEFINE_SPINLOCK(nf_conntrack_lock) A small review didn't reveal any recursive read locking, I know we used to have some in debug code earlier, so the conversion should be safe and lockdep hasn't complained yet either. Still untested on SMP. Signed-off-by: Martin Josefsson --- include/net/netfilter/nf_conntrack_core.h | 2 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | 8 +- net/netfilter/nf_conntrack_core.c | 56 +++++++++--------- net/netfilter/nf_conntrack_expect.c | 20 +++--- net/netfilter/nf_conntrack_h323_main.c | 4 - net/netfilter/nf_conntrack_helper.c | 12 +-- net/netfilter/nf_conntrack_netlink.c | 30 ++++----- net/netfilter/nf_conntrack_standalone.c | 4 - net/netfilter/xt_connlimit.c | 4 - 9 files changed, 70 insertions(+), 70 deletions(-) Index: linux-2.6.23-rc6.quilt/include/net/netfilter/nf_conntrack_core.h =================================================================== --- linux-2.6.23-rc6.quilt.orig/include/net/netfilter/nf_conntrack_core.h 2007-09-12 10:02:48.000000000 +0200 +++ linux-2.6.23-rc6.quilt/include/net/netfilter/nf_conntrack_core.h 2007-09-12 10:28:13.000000000 +0200 @@ -84,7 +84,7 @@ print_tuple(struct seq_file *s, const st struct nf_conntrack_l4proto *proto); extern struct hlist_head *nf_conntrack_hash; -extern rwlock_t nf_conntrack_lock ; +extern spinlock_t nf_conntrack_lock; extern struct hlist_head unconfirmed; #endif /* _NF_CONNTRACK_CORE_H */ Index: linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_core.c =================================================================== --- linux-2.6.23-rc6.quilt.orig/net/netfilter/nf_conntrack_core.c 2007-09-12 10:02:48.000000000 +0200 +++ linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_core.c 2007-09-12 10:28:13.000000000 +0200 @@ -40,7 +40,7 @@ #define NF_CONNTRACK_VERSION "0.5.0" -DEFINE_RWLOCK(nf_conntrack_lock); +DEFINE_SPINLOCK(nf_conntrack_lock); EXPORT_SYMBOL_GPL(nf_conntrack_lock); /* nf_conntrack_standalone needs this */ @@ -207,7 +207,7 @@ destroy_conntrack(struct nf_conntrack *n rcu_read_unlock(); - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); /* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, * before connection is in the list, so we need to clean here, @@ -221,7 +221,7 @@ destroy_conntrack(struct nf_conntrack *n } NF_CT_STAT_INC(delete); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); if (ct->master) nf_ct_put(ct->master); @@ -256,12 +256,12 @@ static void death_by_timeout(unsigned lo rcu_read_unlock(); } - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); /* Inside lock so preempt is disabled on module removal path. * Otherwise we can get spurious warnings. */ NF_CT_STAT_INC(delete_list); clean_from_lists(ct); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); nf_ct_put(ct); } @@ -292,11 +292,11 @@ nf_conntrack_find_get(const struct nf_co { struct nf_conntrack_tuple_hash *h; - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); h = __nf_conntrack_find(tuple, NULL); if (h) atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use); - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return h; } @@ -320,9 +320,9 @@ void nf_conntrack_hash_insert(struct nf_ hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); __nf_conntrack_hash_insert(ct, hash, repl_hash); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); @@ -359,7 +359,7 @@ __nf_conntrack_confirm(struct sk_buff ** NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); pr_debug("Confirming conntrack %p\n", ct); - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're @@ -385,7 +385,7 @@ __nf_conntrack_confirm(struct sk_buff ** atomic_inc(&ct->ct_general.use); set_bit(IPS_CONFIRMED_BIT, &ct->status); NF_CT_STAT_INC(insert); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); help = nfct_help(ct); if (help && help->helper) nf_conntrack_event_cache(IPCT_HELPER, ct); @@ -400,7 +400,7 @@ __nf_conntrack_confirm(struct sk_buff ** out: NF_CT_STAT_INC(insert_failed); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return NF_DROP; } EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); @@ -413,9 +413,9 @@ nf_conntrack_tuple_taken(const struct nf { struct nf_conntrack_tuple_hash *h; - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); h = __nf_conntrack_find(tuple, ignored_conntrack); - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return h != NULL; } @@ -434,7 +434,7 @@ static int early_drop(unsigned int hash) unsigned int i, cnt = 0; int dropped = 0; - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); for (i = 0; i < nf_conntrack_htable_size; i++) { hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) { tmp = nf_ct_tuplehash_to_ctrack(h); @@ -448,7 +448,7 @@ static int early_drop(unsigned int hash) } if (ct) atomic_inc(&ct->ct_general.use); - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); if (!ct) return dropped; @@ -545,7 +545,7 @@ init_conntrack(const struct nf_conntrack return NULL; } - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); exp = nf_ct_find_expectation(tuple); if (exp) { pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", @@ -583,7 +583,7 @@ init_conntrack(const struct nf_conntrack hlist_add_head(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].hnode, &unconfirmed); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); if (exp) { if (exp->expectfn) @@ -750,7 +750,7 @@ void nf_conntrack_alter_reply(struct nf_ struct nf_conn_help *help = nfct_help(ct); struct nf_conntrack_helper *helper; - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); /* Should be unconfirmed, so not in hash table yet */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); @@ -778,7 +778,7 @@ void nf_conntrack_alter_reply(struct nf_ rcu_assign_pointer(help->helper, helper); out: - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); @@ -794,11 +794,11 @@ void __nf_ct_refresh_acct(struct nf_conn NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); NF_CT_ASSERT(skb); - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); /* Only update if this is not a fixed timeout */ if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) { - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return; } @@ -832,7 +832,7 @@ void __nf_ct_refresh_acct(struct nf_conn } #endif - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); /* must be unlocked when calling event cache */ if (event) @@ -923,7 +923,7 @@ get_next_corpse(int (*iter)(struct nf_co struct nf_conn *ct; struct hlist_node *n; - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) { ct = nf_ct_tuplehash_to_ctrack(h); @@ -936,11 +936,11 @@ get_next_corpse(int (*iter)(struct nf_co if (iter(ct, data)) set_bit(IPS_DYING_BIT, &ct->status); } - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return NULL; found: atomic_inc(&ct->ct_general.use); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return ct; } @@ -1064,7 +1064,7 @@ int set_hashsize(const char *val, struct * use a newrandom seed */ get_random_bytes(&rnd, 4); - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); for (i = 0; i < nf_conntrack_htable_size; i++) { while (!hlist_empty(&nf_conntrack_hash[i])) { h = hlist_entry(nf_conntrack_hash[i].first, @@ -1082,7 +1082,7 @@ int set_hashsize(const char *val, struct nf_conntrack_vmalloc = vmalloced; nf_conntrack_hash = hash; nf_conntrack_hash_rnd = rnd; - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); return 0; Index: linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_netlink.c =================================================================== --- linux-2.6.23-rc6.quilt.orig/net/netfilter/nf_conntrack_netlink.c 2007-09-11 17:21:44.000000000 +0200 +++ linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_netlink.c 2007-09-12 10:28:13.000000000 +0200 @@ -432,7 +432,7 @@ ctnetlink_dump_table(struct sk_buff *skb struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); u_int8_t l3proto = nfmsg->nfgen_family; - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); last = (struct nf_conn *)cb->args[1]; for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { restart: @@ -471,7 +471,7 @@ restart: } } out: - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); if (last) nf_ct_put(last); @@ -1037,14 +1037,14 @@ ctnetlink_new_conntrack(struct sock *ctn return err; } - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); if (cda[CTA_TUPLE_ORIG-1]) h = __nf_conntrack_find(&otuple, NULL); else if (cda[CTA_TUPLE_REPLY-1]) h = __nf_conntrack_find(&rtuple, NULL); if (h == NULL) { - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) err = ctnetlink_create_conntrack(cda, &otuple, &rtuple); @@ -1066,7 +1066,7 @@ ctnetlink_new_conntrack(struct sock *ctn } out_unlock: - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return err; } @@ -1251,7 +1251,7 @@ ctnetlink_exp_dump_table(struct sk_buff struct hlist_node *n; u_int8_t l3proto = nfmsg->nfgen_family; - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); last = (struct nf_conntrack_expect *)cb->args[1]; for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { restart: @@ -1279,7 +1279,7 @@ restart: } } out: - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); if (last) nf_ct_expect_put(last); @@ -1398,10 +1398,10 @@ ctnetlink_del_expect(struct sock *ctnl, struct nf_conn_help *m_help; /* delete all expectations for this helper */ - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); h = __nf_conntrack_helper_find_byname(name); if (!h) { - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return -EINVAL; } for (i = 0; i < nf_ct_expect_hsize; i++) { @@ -1416,10 +1416,10 @@ ctnetlink_del_expect(struct sock *ctnl, } } } - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } else { /* This basically means we have to flush everything*/ - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); for (i = 0; i < nf_ct_expect_hsize; i++) { hlist_for_each_entry_safe(exp, n, next, &nf_ct_expect_hash[i], @@ -1430,7 +1430,7 @@ ctnetlink_del_expect(struct sock *ctnl, } } } - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } return 0; @@ -1519,11 +1519,11 @@ ctnetlink_new_expect(struct sock *ctnl, if (err < 0) return err; - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); exp = __nf_ct_expect_find(&tuple); if (!exp) { - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) err = ctnetlink_create_expect(cda, u3); @@ -1533,7 +1533,7 @@ ctnetlink_new_expect(struct sock *ctnl, err = -EEXIST; if (!(nlh->nlmsg_flags & NLM_F_EXCL)) err = ctnetlink_change_expect(exp, cda); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return err; } Index: linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_standalone.c =================================================================== --- linux-2.6.23-rc6.quilt.orig/net/netfilter/nf_conntrack_standalone.c 2007-09-11 17:21:44.000000000 +0200 +++ linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_standalone.c 2007-09-12 10:28:13.000000000 +0200 @@ -93,7 +93,7 @@ static struct hlist_node *ct_get_idx(str static void *ct_seq_start(struct seq_file *seq, loff_t *pos) { - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); return ct_get_idx(seq, *pos); } @@ -105,7 +105,7 @@ static void *ct_seq_next(struct seq_file static void ct_seq_stop(struct seq_file *s, void *v) { - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } /* return 0 on success, 1 in case of error */ Index: linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_expect.c =================================================================== --- linux-2.6.23-rc6.quilt.orig/net/netfilter/nf_conntrack_expect.c 2007-09-12 10:07:04.000000000 +0200 +++ linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_expect.c 2007-09-12 10:28:56.000000000 +0200 @@ -65,9 +65,9 @@ static void nf_ct_expectation_timed_out( { struct nf_conntrack_expect *exp = (void *)ul_expect; - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); nf_ct_unlink_expect(exp); - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); nf_ct_expect_put(exp); } @@ -109,11 +109,11 @@ nf_ct_expect_find_get(const struct nf_co { struct nf_conntrack_expect *i; - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); i = __nf_ct_expect_find(tuple); if (i) atomic_inc(&i->use); - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return i; } @@ -199,12 +199,12 @@ static inline int expect_matches(const s /* Generally a bad idea to call this: could have matched already. */ void nf_ct_unexpect_related(struct nf_conntrack_expect *exp) { - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); if (del_timer(&exp->timeout)) { nf_ct_unlink_expect(exp); nf_ct_expect_put(exp); } - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } EXPORT_SYMBOL_GPL(nf_ct_unexpect_related); @@ -345,7 +345,7 @@ int nf_ct_expect_related(struct nf_connt NF_CT_ASSERT(master_help); - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); if (!master_help->helper) { ret = -ESHUTDOWN; goto out; @@ -380,7 +380,7 @@ int nf_ct_expect_related(struct nf_connt nf_conntrack_expect_event(IPEXP_NEW, expect); ret = 0; out: - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return ret; } EXPORT_SYMBOL_GPL(nf_ct_expect_related); @@ -427,7 +427,7 @@ static struct hlist_node *ct_expect_get_ static void *exp_seq_start(struct seq_file *seq, loff_t *pos) { - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); return ct_expect_get_idx(seq, *pos); } @@ -439,7 +439,7 @@ static void *exp_seq_next(struct seq_fil static void exp_seq_stop(struct seq_file *seq, void *v) { - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } static int exp_seq_show(struct seq_file *s, void *v) Index: linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_helper.c =================================================================== --- linux-2.6.23-rc6.quilt.orig/net/netfilter/nf_conntrack_helper.c 2007-09-12 10:02:48.000000000 +0200 +++ linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_helper.c 2007-09-12 10:28:13.000000000 +0200 @@ -68,7 +68,7 @@ nf_ct_helper_find_get(const struct nf_co /* need nf_conntrack_lock to assure that helper exists until * try_module_get() is called */ - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); helper = __nf_ct_helper_find(tuple); if (helper) { @@ -79,7 +79,7 @@ nf_ct_helper_find_get(const struct nf_co helper = NULL; } - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return helper; } @@ -140,10 +140,10 @@ int nf_conntrack_helper_register(struct BUG_ON(me->timeout == 0); - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); hlist_add_head(&me->hnode, &nf_ct_helper_hash[h]); nf_ct_helper_count++; - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); return 0; } @@ -157,7 +157,7 @@ void nf_conntrack_helper_unregister(stru unsigned int i; /* Need write lock here, to delete helper. */ - write_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); hlist_del(&me->hnode); nf_ct_helper_count--; @@ -181,7 +181,7 @@ void nf_conntrack_helper_unregister(stru hlist_for_each_entry(h, n, &nf_conntrack_hash[i], hnode) unhelp(h, me); } - write_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); /* Someone could be still looking at the helper in a bh. */ synchronize_net(); Index: linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_h323_main.c =================================================================== --- linux-2.6.23-rc6.quilt.orig/net/netfilter/nf_conntrack_h323_main.c 2007-09-11 17:21:44.000000000 +0200 +++ linux-2.6.23-rc6.quilt/net/netfilter/nf_conntrack_h323_main.c 2007-09-12 10:28:13.000000000 +0200 @@ -1410,7 +1410,7 @@ static int process_rcf(struct sk_buff ** nf_ct_refresh(ct, *pskb, info->timeout * HZ); /* Set expect timeout */ - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3, info->sig_port[!dir]); if (exp) { @@ -1420,7 +1420,7 @@ static int process_rcf(struct sk_buff ** NF_CT_DUMP_TUPLE(&exp->tuple); set_expect_timeout(exp, info->timeout); } - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } return 0; Index: linux-2.6.23-rc6.quilt/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c =================================================================== --- linux-2.6.23-rc6.quilt.orig/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 2007-09-11 17:21:44.000000000 +0200 +++ linux-2.6.23-rc6.quilt/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c 2007-09-12 10:28:13.000000000 +0200 @@ -74,7 +74,7 @@ static struct hlist_node *ct_get_idx(str static void *ct_seq_start(struct seq_file *seq, loff_t *pos) { - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); return ct_get_idx(seq, *pos); } @@ -86,7 +86,7 @@ static void *ct_seq_next(struct seq_file static void ct_seq_stop(struct seq_file *s, void *v) { - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } static int ct_seq_show(struct seq_file *s, void *v) @@ -241,7 +241,7 @@ static struct hlist_node *ct_expect_get_ static void *exp_seq_start(struct seq_file *seq, loff_t *pos) { - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); return ct_expect_get_idx(seq, *pos); } @@ -253,7 +253,7 @@ static void *exp_seq_next(struct seq_fil static void exp_seq_stop(struct seq_file *seq, void *v) { - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); } static int exp_seq_show(struct seq_file *s, void *v) Index: linux-2.6.23-rc6.quilt/net/netfilter/xt_connlimit.c =================================================================== --- linux-2.6.23-rc6.quilt.orig/net/netfilter/xt_connlimit.c 2007-09-11 17:21:44.000000000 +0200 +++ linux-2.6.23-rc6.quilt/net/netfilter/xt_connlimit.c 2007-09-12 10:28:13.000000000 +0200 @@ -119,7 +119,7 @@ static int count_them(struct xt_connlimi else hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)]; - read_lock_bh(&nf_conntrack_lock); + spin_lock_bh(&nf_conntrack_lock); /* check the saved connections */ list_for_each_entry_safe(conn, tmp, hash, list) { @@ -162,7 +162,7 @@ static int count_them(struct xt_connlimi ++matches; } - read_unlock_bh(&nf_conntrack_lock); + spin_unlock_bh(&nf_conntrack_lock); if (addit) { /* save the new connection in our list */