diff -urN linux-2.4.33-wt3-pre1/include/linux/netfilter_ipv4/ip_conntrack.h linux-2.4.33-wt3-pre1-cttproxy-01-natres/include/linux/netfilter_ipv4/ip_conntrack.h --- linux-2.4.33-wt3-pre1/include/linux/netfilter_ipv4/ip_conntrack.h 2006-10-28 19:49:44 +0200 +++ linux-2.4.33-wt3-pre1-cttproxy-01-natres/include/linux/netfilter_ipv4/ip_conntrack.h 2006-10-29 07:49:15 +0100 @@ -155,6 +155,11 @@ /* Timer function; deletes the expectation. */ struct timer_list timeout; +#ifdef CONFIG_IP_NF_NAT_NRES + /* List of registered reservations */ + struct list_head reserved_list; +#endif + /* Data filled out by the conntrack helpers follow: */ /* We expect this tuple, with the following mask */ @@ -279,6 +284,10 @@ /* Call me when a conntrack is destroyed. */ extern void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack); +#ifdef CONFIG_IP_NF_NAT_NRES +/* Call when an expectation is destroyed. */ +extern void (*ip_conntrack_expect_destroyed)(struct ip_conntrack_expect *exp); +#endif /* Returns new sk_buff, or NULL */ struct sk_buff * diff -urN linux-2.4.33-wt3-pre1/include/linux/netfilter_ipv4/ip_nat.h linux-2.4.33-wt3-pre1-cttproxy-01-natres/include/linux/netfilter_ipv4/ip_nat.h --- linux-2.4.33-wt3-pre1/include/linux/netfilter_ipv4/ip_nat.h 2006-10-28 19:49:43 +0200 +++ linux-2.4.33-wt3-pre1-cttproxy-01-natres/include/linux/netfilter_ipv4/ip_nat.h 2006-10-29 07:51:54 +0100 @@ -21,6 +21,11 @@ #define IP_NAT_RANGE_MAP_IPS 1 #define IP_NAT_RANGE_PROTO_SPECIFIED 2 + +#ifdef CONFIG_IP_NF_NAT_NRES +#define IP_NAT_RANGE_USE_RESERVED 8 +#endif + /* Used internally by get_unique_tuple(). */ #define IP_NAT_RANGE_FULL 4 @@ -108,6 +113,18 @@ struct ip_nat_seq seq[IP_CT_DIR_MAX]; }; +#ifdef CONFIG_IP_NF_NAT_NRES +/* Structure to store reserved manips */ +struct ip_nat_reserved { + struct list_head hash; /* Hash chain */ + struct list_head exp; /* Per-expectation list */ + atomic_t use; /* Reference count */ + struct ip_conntrack_manip manip; /* Reserved manip */ + struct ip_conntrack_manip peer; /* Peer (optional) */ + u_int16_t proto; /* Protocol number of reserved manip */ +}; +#endif + /* Set up the info structure to map into this range. */ extern unsigned int ip_nat_setup_info(struct ip_conntrack *conntrack, const struct ip_nat_multi_range *mr, @@ -115,7 +132,40 @@ /* Is this tuple already taken? (not by us)*/ extern int ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack *ignored_conntrack); + const struct ip_conntrack *ignored_conntrack, + const enum ip_nat_manip_type maniptype, + const unsigned int flags); + +#ifdef CONFIG_IP_NF_NAT_NRES +struct ip_conntrack_expect; + +/* NAT port reservation: allocate and hash a new entry */ +extern struct ip_nat_reserved *__ip_nat_reserved_new_hash(const struct ip_conntrack_manip *manip, + const u_int16_t proto, const struct ip_conntrack_manip *peer); + +/* NAT port reservation: unhash an entry */ +extern struct ip_nat_reserved *__ip_nat_reserved_unhash(const struct ip_conntrack_manip *manip, + const u_int16_t proto, const struct ip_conntrack_manip *peer); + +/* NAT port reservation: free a reservation */ +extern void __ip_nat_reserved_free(struct ip_nat_reserved *res); + +/* NAT port reservation: register a new reservation */ +extern int ip_nat_reserved_register(struct ip_conntrack_expect *exp, + const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer); + +/* NAT port reservation: unregister a reservation */ +extern int ip_nat_reserved_unregister(struct ip_conntrack_expect *exp, + const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer); + +/* NAT port reservation: unregister all reservations for a given expectation */ +extern void ip_nat_reserved_unregister_all(struct ip_conntrack_expect *exp); + +#endif /*CONFIG_IP_NF_NAT_NRES*/ /* Calculate relative checksum. */ extern u_int16_t ip_nat_cheat_check(u_int32_t oldvalinv, diff -urN linux-2.4.33-wt3-pre1/net/ipv4/netfilter/Config.in linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/Config.in --- linux-2.4.33-wt3-pre1/net/ipv4/netfilter/Config.in 2006-10-28 18:06:46 +0200 +++ linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/Config.in 2006-10-29 07:49:15 +0100 @@ -118,6 +118,7 @@ dep_tristate ' Full NAT' CONFIG_IP_NF_NAT $CONFIG_IP_NF_IPTABLES $CONFIG_IP_NF_CONNTRACK if [ "$CONFIG_IP_NF_NAT" != "n" ]; then define_bool CONFIG_IP_NF_NAT_NEEDED y + bool ' NAT reservations support (for transparent proxying)' CONFIG_IP_NF_NAT_NRES dep_tristate ' MASQUERADE target support' CONFIG_IP_NF_TARGET_MASQUERADE $CONFIG_IP_NF_NAT dep_tristate ' REDIRECT target support' CONFIG_IP_NF_TARGET_REDIRECT $CONFIG_IP_NF_NAT # If they want talk, set to $CONFIG_IP_NF_NAT (m or y), diff -urN linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_conntrack_core.c linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_conntrack_core.c --- linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_conntrack_core.c 2006-10-28 18:06:46 +0200 +++ linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_conntrack_core.c 2006-10-29 07:49:15 +0100 @@ -56,6 +56,9 @@ DECLARE_RWLOCK(ip_conntrack_expect_tuple_lock); void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL; +#ifdef CONFIG_IP_NF_NAT_NRES +void (*ip_conntrack_expect_destroyed)(struct ip_conntrack_expect *expect) = NULL; +#endif LIST_HEAD(ip_conntrack_expect_list); LIST_HEAD(protocol_list); static LIST_HEAD(helpers); @@ -181,6 +184,11 @@ IP_NF_ASSERT(atomic_read(&exp->use) == 0); IP_NF_ASSERT(!timer_pending(&exp->timeout)); +#ifdef CONFIG_IP_NF_NAT_NRES + if (ip_conntrack_expect_destroyed) + ip_conntrack_expect_destroyed(exp); +#endif + kfree(exp); ip_conntrack_stat[smp_processor_id()].expect_delete++; } @@ -1087,6 +1095,9 @@ memcpy(new, expect, sizeof(*expect)); new->expectant = related_to; new->sibling = NULL; +#ifdef CONFIG_IP_NF_NAT_NRES + INIT_LIST_HEAD(&new->reserved_list); +#endif atomic_set(&new->use, 1); /* add to expected list for this connection */ diff -urN linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_conntrack_standalone.c linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_conntrack_standalone.c --- linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_conntrack_standalone.c 2006-10-28 18:06:46 +0200 +++ linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_conntrack_standalone.c 2006-10-29 07:49:15 +0100 @@ -558,6 +558,9 @@ EXPORT_SYMBOL(ip_ct_gather_frags); EXPORT_SYMBOL(ip_conntrack_htable_size); EXPORT_SYMBOL(ip_conntrack_expect_list); +#ifdef CONFIG_IP_NF_NAT_NRES +EXPORT_SYMBOL(ip_conntrack_expect_destroyed); +#endif EXPORT_SYMBOL(ip_conntrack_lock); EXPORT_SYMBOL(ip_conntrack_hash); EXPORT_SYMBOL_GPL(ip_conntrack_find_get); diff -urN linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_nat_core.c linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_nat_core.c --- linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_nat_core.c 2006-10-28 18:06:46 +0200 +++ linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_nat_core.c 2006-10-29 07:49:15 +0100 @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -40,8 +41,17 @@ /* Calculated at init based on memory size */ static unsigned int ip_nat_htable_size; +#ifdef CONFIG_IP_NF_NAT_NRES +static kmem_cache_t *ip_nat_reserved_cachep; +static atomic_t ip_nat_reserved_count; +#endif + static struct list_head *bysource; static struct list_head *byipsproto; +#ifdef CONFIG_IP_NF_NAT_NRES +static struct list_head *natreserved; +#endif + LIST_HEAD(protos); LIST_HEAD(helpers); @@ -63,6 +73,15 @@ return (manip->ip + manip->u.all + proto) % ip_nat_htable_size; } +static inline size_t +hash_nat_reserved(const struct ip_conntrack_manip *foreign, + const struct ip_conntrack_manip *peer, + const u_int16_t proto) +{ + return (foreign->ip + foreign->u.all + proto + + (peer ? (peer->ip + peer->u.all) : 0)) % ip_nat_htable_size; +} + /* Noone using conntrack by the time this called. */ static void ip_nat_cleanup_conntrack(struct ip_conntrack *conn) { @@ -118,10 +137,368 @@ return i; } +#ifdef CONFIG_IP_NF_NAT_NRES +static inline int +reserved_manip_cmp(const struct ip_nat_reserved *i, + const struct ip_conntrack_manip *manip, + const u_int16_t proto) +{ + DEBUGP("reserved_manip_cmp: manip proto %u %u.%u.%u.%u:%u, " + "reservation proto %u %u.%u.%u.%u:%u\n peer %u.%u.%u.%u:%u\n", + proto, NIPQUAD(manip->ip), ntohs(manip->u.all), + i->proto, NIPQUAD(i->manip.ip), ntohs(i->manip.u.all), + NIPQUAD(i->peer.ip), ntohs(i->peer.u.all)); + return (i->proto == proto && + i->manip.ip == manip->ip && i->manip.u.all == manip->u.all); +} + +static inline int +reserved_manip_cmp_peer(const struct ip_nat_reserved *i, + const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer) +{ + DEBUGP("reserved_manip_cmp_peer: manip proto %u %u.%u.%u.%u:%u peer %u.%u.%u.%u:%u, " + "reservation proto %u %u.%u.%u.%u:%u peer %u.%u.%u.%u:%u\n", + proto, NIPQUAD(manip->ip), ntohs(manip->u.all), + NIPQUAD(peer->ip), ntohs(peer->u.all), + i->proto, NIPQUAD(i->manip.ip), ntohs(i->manip.u.all), + NIPQUAD(i->peer.ip), ntohs(i->peer.u.all)); + + return (i->proto == proto && + i->manip.ip == manip->ip && i->manip.u.all == manip->u.all && + ((i->peer.ip == 0) || (i->peer.ip == peer->ip && i->peer.u.all == peer->u.all))); +} + +static inline int +reserved_manip_cmp_peer_exact(const struct ip_nat_reserved *i, + const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer) +{ + DEBUGP("reserved_manip_cmp_peer_exact: manip proto %u %u.%u.%u.%u:%u peer %u.%u.%u.%u:%u, " + "reservation proto %u %u.%u.%u.%u:%u peer %u.%u.%u.%u:%u\n", + proto, NIPQUAD(manip->ip), ntohs(manip->u.all), + NIPQUAD(peer->ip), ntohs(peer->u.all), + i->proto, NIPQUAD(i->manip.ip), ntohs(i->manip.u.all), + NIPQUAD(i->peer.ip), ntohs(i->peer.u.all)); + + return (i->proto == proto && + i->manip.ip == manip->ip && i->manip.u.all == manip->u.all && + i->peer.ip == peer->ip && i->peer.u.all == peer->u.all); +} + +/* Is this manip reserved? + * exact means full peer match is required, used for reservation deletion */ +static struct ip_nat_reserved * +__ip_nat_reserved_find_manip(const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer, + const int exact) +{ + struct ip_nat_reserved *i; + unsigned int h = hash_nat_reserved(manip, peer, proto); + + MUST_BE_READ_LOCKED(&ip_nat_lock); + + DEBUGP("__ip_nat_reserved_find_manip: find proto %u %u.%u.%u.%u:%u\n", + proto, NIPQUAD(manip->ip), ntohs(manip->u.all)); + + if (peer) { + if (exact) + i = LIST_FIND(&natreserved[h], reserved_manip_cmp_peer_exact, + struct ip_nat_reserved *, manip, proto, peer); + else + i = LIST_FIND(&natreserved[h], reserved_manip_cmp_peer, + struct ip_nat_reserved *, manip, proto, peer); + } else + i = LIST_FIND(&natreserved[h], reserved_manip_cmp, + struct ip_nat_reserved *, manip, proto); + + return i; +} + +/* Is this tuple clashing with a reserved manip? */ +static struct ip_nat_reserved * +__ip_nat_reserved_find_tuple(const struct ip_conntrack_tuple *tuple, + const enum ip_nat_manip_type maniptype) +{ + struct ip_conntrack_manip m = {.ip = tuple->dst.ip, .u = {.all = tuple->dst.u.all}}; + + MUST_BE_READ_LOCKED(&ip_nat_lock); + + if (maniptype == IP_NAT_MANIP_SRC) { + DEBUGP("__ip_nat_reserved_find_tuple: IP_NAT_MANIP_SRC search\n"); + return __ip_nat_reserved_find_manip(&tuple->src, tuple->dst.protonum, &m, 0); + } else { + DEBUGP("__ip_nat_reserved_find_tuple: IP_NAT_MANIP_DST search\n"); + return __ip_nat_reserved_find_manip(&m, tuple->dst.protonum, &tuple->src, 0); + } +} + +static inline int +clashing_ct_cmp(const struct ip_conntrack_tuple_hash *i, const void *data) +{ + const struct ip_conntrack_manip *m = (struct ip_conntrack_manip *) data; + const struct ip_conntrack_tuple *t = &i->tuple; + + /* FIXME: every connection has two entries, we should check only the REPLY direction */ + + DEBUGP("clashing_ct_cmp: manip %u.%u.%u.%u:%u ct reply src %u.%u.%u.%u:%u dst %u.%u.%u.%u:%u\n", + NIPQUAD(m->ip), ntohs(m->u.all), NIPQUAD(t->src.ip), ntohs(t->src.u.all), + NIPQUAD(t->dst.ip), ntohs(t->dst.u.all)); + return (((t->src.ip == m->ip) && (t->src.u.all == m->u.all)) || + ((t->dst.ip == m->ip) && (t->dst.u.all == m->u.all))); +} + +/* Create a new reservation */ +struct ip_nat_reserved * +__ip_nat_reserved_new_hash(const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer) +{ + struct ip_nat_reserved *res; + unsigned int h; + + MUST_BE_WRITE_LOCKED(&ip_nat_lock); + + DEBUGP("__ip_nat_reserved_new_hash: manip proto %u %u.%u.%u.%u:%u\n", + proto, NIPQUAD(manip->ip), ntohs(manip->u.all)); + + /* check if it's already reserved */ + if (__ip_nat_reserved_find_manip(manip, proto, peer, 1)) { + DEBUGP("__ip_nat_reserved_new_hash: already reserved\n"); + return NULL; + } + + /* FIXME: check if a clashing connection exists... This is problematic, + * since the final decision in ip_nat_used_tuple() is based on a full + * tuple, but we only have a manip... =(:< */ + + /* Current solutuion: we provide two methods for checking: + * - Strong check: in this case, the conntrack table is scanned if an + * already existing connection uses the manip in its REPLY direction. + * if such a conntrack entry is found, the mapping fails. This check is + * extremely pessimistic, since it fails to register reservations which could + * happily coexist with current conntracks if the other side of the tuple is + * different... + * - Exact check: if the callee provides a peer manip, then an exact lookup + * can be made in the conntrack hash. This is a more fine-grained check. + */ + + if (peer) { + /* Exact check */ + struct ip_conntrack_tuple t = {.src = *peer, + .dst = {.protonum = proto, + .ip = manip->ip, + .u = {.all = manip->u.all}}}; + + if (ip_conntrack_tuple_taken(&t, NULL)) { + DEBUGP("__ip_nat_reserved_new_hash: manip clashes with an already existing connection\n"); + return NULL; + } + } else { + /* Strong check: we have only a manip, unfortunately we scan the whole conntrack + * hash for possible clashing connections... */ + struct ip_conntrack_tuple_hash *h = NULL; + unsigned int i; + + READ_LOCK(&ip_conntrack_lock); + for (i = 0; !h && i < ip_conntrack_htable_size; i++) { + h = LIST_FIND(&ip_conntrack_hash[i], clashing_ct_cmp, + struct ip_conntrack_tuple_hash *, manip); + if (h) + break; + } + READ_UNLOCK(&ip_conntrack_lock); + if (h) { + DEBUGP("__ip_nat_reserved_new_hash: manip clashes with an already existing connection\n"); + return NULL; + } + } + + /* else allocate a new structure */ + res = kmem_cache_alloc(ip_nat_reserved_cachep, GFP_ATOMIC); + if (!res) + return NULL; + + memset(res, 0, sizeof(*res)); + res->proto = proto; + res->manip = *manip; + if (peer) + res->peer = *peer; + + /* put it into the hash */ + h = hash_nat_reserved(manip, peer, proto); + atomic_inc(&ip_nat_reserved_count); + list_prepend(&natreserved[h], &res->hash); + DEBUGP("__ip_nat_reserved_new_hash: hashed manip proto %u %u.%u.%u.%u:%u\n", + proto, NIPQUAD(manip->ip), ntohs(manip->u.all)); + + return res; +} + +/* Register a new reservation */ +static int +__ip_nat_reserved_register(struct ip_conntrack_expect *expect, + const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer) +{ + struct ip_nat_reserved *res; + + MUST_BE_WRITE_LOCKED(&ip_nat_lock); + + DEBUGP("__ip_nat_reserved_register: registering proto %u %u.%u.%u.%u:%u\n", + proto, NIPQUAD(manip->ip), ntohs(manip->u.all)); + + /* allocate and put into the hash */ + res = __ip_nat_reserved_new_hash(manip, proto, peer); + if (!res) + return 0; + + /* append to the per-expectation reserved list */ + list_append(&expect->reserved_list, &res->exp); + + return 1; +} + +int +ip_nat_reserved_register(struct ip_conntrack_expect *expect, + const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer) +{ + int ret; + + WRITE_LOCK(&ip_nat_lock); + + ret = __ip_nat_reserved_register(expect, manip, proto, peer); + + WRITE_UNLOCK(&ip_nat_lock); + + return ret; +} + +/* Unhash a reservation */ +struct ip_nat_reserved * +__ip_nat_reserved_unhash(const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer) +{ + struct ip_nat_reserved *res; + + MUST_BE_WRITE_LOCKED(&ip_nat_lock); + + DEBUGP("__ip_nat_reserved_unhash: unhashing proto %u %u.%u.%u.%u:%u\n", + proto, NIPQUAD(manip->ip), ntohs(manip->u.all)); + + /* check if it's really reserved */ + if (!(res = __ip_nat_reserved_find_manip(manip, proto, peer, 1))) { + DEBUGP("__ip_nat_reserved_unhash: trying to unreg a nonexisting reservation\n"); + return NULL; + } + + /* delete from the hash table */ + list_del(&res->hash); + + atomic_dec(&ip_nat_reserved_count); + + return res; +} + +/* Return a reservation structure into the slab cache */ +void +__ip_nat_reserved_free(struct ip_nat_reserved *res) +{ + kmem_cache_free(ip_nat_reserved_cachep, res); +} + +/* Unregister a reservation */ +static int +__ip_nat_reserved_unregister(struct ip_conntrack_expect *expect, + const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer) +{ + struct ip_nat_reserved *res; + + MUST_BE_WRITE_LOCKED(&ip_nat_lock); + + DEBUGP("__ip_nat_reserved_unregister: unregistering proto %u %u.%u.%u.%u:%u\n", + proto, NIPQUAD(manip->ip), ntohs(manip->u.all)); + + /* look up and unhash */ + res = __ip_nat_reserved_unhash(manip, proto, peer); + if (!res) + return 0; + + /* delete from the per-expectation list */ + list_del(&res->exp); + + /* free the structure */ + __ip_nat_reserved_free(res); + + return 1; +} + +int +ip_nat_reserved_unregister(struct ip_conntrack_expect *expect, + const struct ip_conntrack_manip *manip, + const u_int16_t proto, + const struct ip_conntrack_manip *peer) +{ + int ret; + + WRITE_LOCK(&ip_nat_lock); + + ret = __ip_nat_reserved_unregister(expect, manip, proto, peer); + + WRITE_UNLOCK(&ip_nat_lock); + + return ret; +} + +/* Unregister all reservations for a given expectation */ +void +ip_nat_reserved_unregister_all(struct ip_conntrack_expect *expect) +{ + struct list_head *i; + struct ip_nat_reserved *res; + + DEBUGP("ip_nat_reserved_unregister_all: deleting all reservations for expectation %p\n", + expect); + + WRITE_LOCK(&ip_nat_lock); + + i = expect->reserved_list.next; + while (i != &expect->reserved_list) { + res = list_entry(i, struct ip_nat_reserved, exp); + i = i->next; + + /* clear from lists */ + list_del(&res->hash); + list_del(&res->exp); + + kmem_cache_free(ip_nat_reserved_cachep, res); + } + + WRITE_UNLOCK(&ip_nat_lock); +} + +static void +ip_nat_reserved_cleanup_expect(struct ip_conntrack_expect *expect) +{ + ip_nat_reserved_unregister_all(expect); +} +#endif /* CONFIG_IP_NF_NAT_NRES */ + /* Is this tuple already taken? (not by us) */ int ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple, - const struct ip_conntrack *ignored_conntrack) + const struct ip_conntrack *ignored_conntrack, + const enum ip_nat_manip_type maniptype, + const unsigned int flags) { /* Conntrack tracking doesn't keep track of outgoing tuples; only incoming ones. NAT means they don't have a fixed mapping, @@ -129,6 +506,18 @@ We could keep a separate hash if this proves too slow. */ struct ip_conntrack_tuple reply; +#ifdef CONFIG_IP_NF_NAT_NRES + struct ip_nat_reserved *res; + + /* check if the tuple is reserved if there are any reservations */ + if (atomic_read(&ip_nat_reserved_count)) { + res = __ip_nat_reserved_find_tuple(tuple, maniptype); + + /* If we may not allocate reserved ports, return */ + if ((flags & IP_NAT_RANGE_USE_RESERVED) == 0 && res) + return 1; + } +#endif invert_tuplepr(&reply, tuple); return ip_conntrack_tuple_taken(&reply, ignored_conntrack); @@ -433,7 +822,7 @@ if ((!(rptr->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || proto->in_range(tuple, HOOK2MANIP(hooknum), &rptr->min, &rptr->max)) - && !ip_nat_used_tuple(tuple, conntrack)) { + && !ip_nat_used_tuple(tuple, conntrack, HOOK2MANIP(hooknum), rptr->flags)) { ret = 1; goto clear_fulls; } else { @@ -442,7 +831,7 @@ conntrack)) { /* Must be unique. */ IP_NF_ASSERT(!ip_nat_used_tuple(tuple, - conntrack)); + conntrack, HOOK2MANIP(hooknum), rptr->flags)); ret = 1; goto clear_fulls; } else if (HOOK2MANIP(hooknum) == IP_NAT_MANIP_DST) { @@ -459,7 +848,7 @@ conntrack)) { /* Must be unique. */ IP_NF_ASSERT(!ip_nat_used_tuple - (tuple, conntrack)); + (tuple, conntrack, IP_NAT_MANIP_DST, 0)); ret = 1; goto clear_fulls; } @@ -1055,12 +1444,26 @@ /* Leave them the same for the moment. */ ip_nat_htable_size = ip_conntrack_htable_size; +#ifdef CONFIG_IP_NF_NAT_NRES + /* Create nat_reserved slab cache */ + ip_nat_reserved_cachep = kmem_cache_create("ip_nat_reserved", + sizeof(struct ip_nat_reserved), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!ip_nat_reserved_cachep) { + printk(KERN_ERR "Unable to create ip_nat_reserved slab cache\n"); + return -ENOMEM; + } +#endif + /* One vmalloc for both hash tables */ - bysource = vmalloc(sizeof(struct list_head) * ip_nat_htable_size*2); + bysource = vmalloc(sizeof(struct list_head) * ip_nat_htable_size*3); if (!bysource) { - return -ENOMEM; + goto free_reserved_slab; } byipsproto = bysource + ip_nat_htable_size; +#ifdef CONFIG_IP_NF_NAT_NRES + natreserved = byipsproto + ip_nat_htable_size; +#endif /* Sew in builtin protocols. */ WRITE_LOCK(&ip_nat_lock); @@ -1072,13 +1475,26 @@ for (i = 0; i < ip_nat_htable_size; i++) { INIT_LIST_HEAD(&bysource[i]); INIT_LIST_HEAD(&byipsproto[i]); +#ifdef CONFIG_IP_NF_NAT_NRES + INIT_LIST_HEAD(&natreserved[i]); +#endif } /* FIXME: Man, this is a hack. */ IP_NF_ASSERT(ip_conntrack_destroyed == NULL); + IP_NF_ASSERT(ip_conntrack_expect_destroyed == NULL); ip_conntrack_destroyed = &ip_nat_cleanup_conntrack; +#ifdef CONFIG_IP_NF_NAT_NRES + ip_conntrack_expect_destroyed = &ip_nat_reserved_cleanup_expect; +#endif return 0; + +free_reserved_slab: +#ifdef CONFIG_IP_NF_NAT_NRES + kmem_cache_destroy(ip_nat_reserved_cachep); +#endif + return -ENOMEM; } /* Clear NAT section of all conntracks, in case we're loaded again. */ @@ -1093,5 +1509,9 @@ { ip_ct_iterate_cleanup(&clean_nat, NULL); ip_conntrack_destroyed = NULL; +#ifdef CONFIG_IP_NF_NAT_NRES + ip_conntrack_expect_destroyed = NULL; + kmem_cache_destroy(ip_nat_reserved_cachep); +#endif vfree(bysource); } diff -urN linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_nat_proto_icmp.c linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_nat_proto_icmp.c --- linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_nat_proto_icmp.c 2005-11-18 21:08:36 +0100 +++ linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_nat_proto_icmp.c 2006-10-29 07:49:15 +0100 @@ -37,7 +37,7 @@ for (i = 0; i < range_size; i++, id++) { tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + (id % range_size)); - if (!ip_nat_used_tuple(tuple, conntrack)) + if (!ip_nat_used_tuple(tuple, conntrack, maniptype, range->flags)) return 1; } return 0; diff -urN linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_nat_proto_tcp.c linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_nat_proto_tcp.c --- linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_nat_proto_tcp.c 2005-11-18 21:08:36 +0100 +++ linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_nat_proto_tcp.c 2006-10-29 07:49:15 +0100 @@ -67,7 +67,7 @@ for (i = 0; i < range_size; i++, port++) { *portptr = htons(min + port % range_size); - if (!ip_nat_used_tuple(tuple, conntrack)) { + if (!ip_nat_used_tuple(tuple, conntrack, maniptype, range->flags)) { return 1; } } diff -urN linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_nat_proto_udp.c linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_nat_proto_udp.c --- linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_nat_proto_udp.c 2005-11-18 21:08:36 +0100 +++ linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_nat_proto_udp.c 2006-10-29 07:49:15 +0100 @@ -67,7 +67,7 @@ for (i = 0; i < range_size; i++, port++) { *portptr = htons(min + port % range_size); - if (!ip_nat_used_tuple(tuple, conntrack)) + if (!ip_nat_used_tuple(tuple, conntrack, maniptype, range->flags)) return 1; } return 0; diff -urN linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_nat_standalone.c linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_nat_standalone.c --- linux-2.4.33-wt3-pre1/net/ipv4/netfilter/ip_nat_standalone.c 2006-10-28 18:06:46 +0200 +++ linux-2.4.33-wt3-pre1-cttproxy-01-natres/net/ipv4/netfilter/ip_nat_standalone.c 2006-10-29 07:49:15 +0100 @@ -379,4 +379,14 @@ EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); EXPORT_SYMBOL(ip_nat_mangle_udp_packet); EXPORT_SYMBOL(ip_nat_used_tuple); +#ifdef CONFIG_IP_NF_NAT_NRES +EXPORT_SYMBOL(ip_nat_reserved_register); +#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE) +EXPORT_SYMBOL_GPL(__ip_nat_reserved_new_hash); +EXPORT_SYMBOL_GPL(__ip_nat_reserved_unhash); +EXPORT_SYMBOL_GPL(__ip_nat_reserved_free); +#endif +EXPORT_SYMBOL(ip_nat_reserved_unregister); +EXPORT_SYMBOL(ip_nat_reserved_unregister_all); +#endif MODULE_LICENSE("GPL");