--- linux-2.4.19-rc3/net/ipv4/netfilter/ip_nat_core.c.orig 2002-07-29 23:44:11.000000000 +0200 +++ linux-2.4.19-rc3/net/ipv4/netfilter/ip_nat_core.c 2002-07-31 16:39:31.000000000 +0200 @@ -43,6 +43,9 @@ /* Calculated at init based on memory size */ static unsigned int ip_nat_htable_size; +static int ip_nat_vmalloc_bysource; +static int ip_nat_vmalloc_byipsproto; + static struct list_head *bysource; static struct list_head *byipsproto; LIST_HEAD(protos); @@ -962,12 +965,38 @@ /* Leave them the same for the moment. */ ip_nat_htable_size = ip_conntrack_htable_size; - /* One vmalloc for both hash tables */ - bysource = vmalloc(sizeof(struct list_head) * ip_nat_htable_size*2); + /* One allocation for both hash tables */ + ip_nat_vmalloc_bysource = 0; + bysource = (void *)__get_free_pages(GFP_KERNEL, + get_order(sizeof(struct list_head) * + ip_nat_htable_size)); + if (!bysource) { + ip_nat_vmalloc_bysource = 1; + printk("ip_nat: bysource: falling back to vmalloc. performance may be degraded.\n"); + bysource = vmalloc(sizeof(struct list_head) * ip_nat_htable_size); + } if (!bysource) { return -ENOMEM; } - byipsproto = bysource + ip_nat_htable_size; + + ip_nat_vmalloc_byipsproto = 0; + byipsproto = (void *)__get_free_pages(GFP_KERNEL, + get_order(sizeof(struct list_head) * + ip_nat_htable_size)); + if (!byipsproto) { + ip_nat_vmalloc_byipsproto = 1; + printk("ip_nat: byipsproto: falling back to vmalloc. performance may be degraded.\n"); + byipsproto = vmalloc(sizeof(struct list_head) * ip_nat_htable_size); + } + if (!byipsproto) { + if (ip_nat_vmalloc_bysource) + vfree(bysource); + else + free_pages((unsigned long)bysource, + get_order(sizeof(struct list_head) * ip_nat_htable_size)); + + return -ENOMEM; + } /* Sew in builtin protocols. */ WRITE_LOCK(&ip_nat_lock); @@ -1005,5 +1034,17 @@ { ip_ct_selective_cleanup(&clean_nat, NULL); ip_conntrack_destroyed = NULL; - vfree(bysource); + + if (ip_nat_vmalloc_bysource) + vfree(bysource); + else + free_pages((unsigned long)bysource, + get_order(sizeof(struct list_head) * ip_nat_htable_size)); + + if (ip_nat_vmalloc_byipsproto) + vfree(byipsproto); + else + free_pages((unsigned long)byipsproto, + get_order(sizeof(struct list_head) * ip_nat_htable_size)); + }