Commit 7ddb3fd5 authored by Lionel Gauthier's avatar Lionel Gauthier

git-svn-id: http://svn.eurecom.fr/openair4G/trunk@6273 818b1a75-f10b-46b9-bf7c-635c3b92a50f
parent 00da7edc
......@@ -50,14 +50,22 @@
#endif
enum {
PARAM_ACTION = 1 << 0,
PARAM_LADDR = 1 << 0,
PARAM_LTUN = 1 << 1,
PARAM_RADDR = 1 << 2,
PARAM_RTUN = 1 << 3,
PARAM_ACTION = 1 << 4,
};
static void GTPURH_help(void)
{
printf(
"GTPURH target options\n"
" --action value Set action <value: remove>\n");
"GTPUAH target options\n"
" --action value Set action <value: remove>\n"
" --own-ip value Set own IP address\n"
" --own-tun value Set own tunnel id <value: 1-2^31>\n"
" --peer-ip value Set peer IP address\n"
" --peer-tun value Set peer tunnel id <value: 1-2^31>\n");
}
#if (IPTVERSION <= 135)
......@@ -97,17 +105,63 @@ void gtpurh_param_act(unsigned int status, const char *p1, ...)
#endif
static void parse_gtpurh_addr(const char *s, struct xt_gtpurh_target_info *info, int flag)
{
in_addr_t addr;
if ((addr = inet_addr(s)) == -1) {
switch (flag) {
case PARAM_LADDR:
gtpurh_param_act(GTPURH_PARAM_BAD_VALUE, "GTPURH", "--own-ip", s);
break;
case PARAM_RADDR:
gtpurh_param_act(GTPURH_PARAM_BAD_VALUE, "GTPURH", "--peer-ip", s);
break;
}
}
switch (flag) {
case PARAM_LADDR:
info->laddr = addr;
break;
case PARAM_RADDR:
info->raddr = addr;
break;
}
}
static void parse_gtpurh_tunid(char *s, struct xt_gtpurh_target_info *info, int flag)
{
unsigned int value;
if (!gtpurh_strtoui(s, &value, 0, UINT32_MAX)) {
switch (flag) {
case PARAM_LTUN:
gtpurh_param_act(GTPURH_PARAM_BAD_VALUE, "GTPURH", "--own-tun", s);
break;
case PARAM_RTUN:
gtpurh_param_act(GTPURH_PARAM_BAD_VALUE, "GTPURH", "--peer-tun", s);
break;
}
}
switch (flag) {
case PARAM_LTUN:
info->ltun = value;
break;
case PARAM_RTUN:
info->rtun = value;
break;
}
}
static void parse_gtpurh_action(char *s, struct xt_gtpurh_target_info *info, unsigned int *flags)
{
if (!strcmp(s, "remove"))
{
if (!strcmp(s, "remove")) {
info->action = PARAM_GTPURH_ACTION_REM;
*flags |= PARAM_GTPURH_ACTION_REM;
}
else
{
} else {
gtpurh_param_act(GTPURH_PARAM_BAD_VALUE, "GTPURH", "--action", s);
}
}
......@@ -128,10 +182,30 @@ GTPURH_parse(int c, char **argv, int invert, unsigned int *flags,
switch (c)
{
case '1':
gtpurh_param_act(GTPURH_PARAM_ONLY_ONCE, "GTPURH", "--action", *flags & PARAM_ACTION);
parse_gtpurh_action(optarg, info, flags);
*flags |= PARAM_ACTION;
return 1;
gtpurh_param_act(GTPURH_PARAM_ONLY_ONCE, "GTPURH", "--own-ip", *flags & PARAM_LADDR);
parse_gtpurh_addr(optarg, info, PARAM_LADDR);
*flags |= PARAM_LADDR;
return 1;
case '2':
gtpurh_param_act(GTPURH_PARAM_ONLY_ONCE, "GTPURH", "--own-tun", *flags & PARAM_LTUN);
parse_gtpurh_tunid(optarg, info, PARAM_LTUN);
*flags |= PARAM_LTUN;
return 1;
case '3':
gtpurh_param_act(GTPURH_PARAM_ONLY_ONCE, "GTPURH", "--peer-ip", *flags & PARAM_RADDR);
parse_gtpurh_addr(optarg, info, PARAM_RADDR);
*flags |= PARAM_RADDR;
return 1;
case '4':
gtpurh_param_act(GTPURH_PARAM_ONLY_ONCE, "GTPURH", "--peer-tun", *flags & PARAM_RTUN);
parse_gtpurh_tunid(optarg, info, PARAM_RTUN);
*flags |= PARAM_RTUN;
return 1;
case '5':
gtpurh_param_act(GTPURH_PARAM_ONLY_ONCE, "GTPURH", "--action", *flags & PARAM_ACTION);
parse_gtpurh_action(optarg, info, flags);
*flags |= PARAM_ACTION;
return 1;
}
return 1;
......@@ -143,10 +217,21 @@ static void GTPURH_check(unsigned int flags)
{
gtpurh_exit_error(PARAMETER_PROBLEM, "GTPURH: You must specify action");
}
if (flags & PARAM_GTPURH_ACTION_REM)
if (!(flags & PARAM_LADDR))
{
return;
gtpurh_exit_error(PARAMETER_PROBLEM, "GTPURH: You must specify local addr");
}
if (!(flags & PARAM_LTUN))
{
gtpurh_exit_error(PARAMETER_PROBLEM, "GTPURH: You must specify local tunnel id");
}
if (!(flags & PARAM_RADDR))
{
gtpurh_exit_error(PARAMETER_PROBLEM, "GTPURH: You must specify remote addr");
}
if (!(flags & PARAM_RTUN))
{
gtpurh_exit_error(PARAMETER_PROBLEM, "GTPURH: You must specify remote tunnel id");
}
}
......@@ -191,8 +276,12 @@ GTPURH_print(const void *ip,
}
static struct option GTPURH_opts[] = {
{ "action", 1, NULL, '1' },
{ .name = NULL }
{ "own-ip", 1, NULL, '1' },
{ "own-tun", 1, NULL, '2' },
{ "peer-ip", 1, NULL, '3' },
{ "peer-tun", 1, NULL, '4' },
{ "action", 1, NULL, '5' },
{ .name = NULL }
};
#if (IPTVERSION <= 135)
......
/*
* GTPu klm for Linux/iptables
*
......@@ -28,6 +27,9 @@
#if 0
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#endif
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
# define WITH_IPV6 1
#endif
//#define ROUTE_PACKET 1
......@@ -39,220 +41,50 @@
#error "Kernel version is not defined!!!! Exiting."
#endif
#define NIPADDR(addr) \
(uint8_t)(addr & 0x000000FF), \
(uint8_t)((addr & 0x0000FF00) >> 8), \
(uint8_t)((addr & 0x00FF0000) >> 16), \
(uint8_t)((addr & 0xFF000000) >> 24)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pradip Biswas <pradip_biswas@polarisnetworks.net>");
MODULE_DESCRIPTION("GTPu Data Path extension on netfilter");
static char* _gtpurh_nf_inet_hook_2_string(int nf_inet_hookP);
static inline bool _gtpurh_ip_is_fragment(const struct iphdr *iph_p);
static void _gtpurh_print_hex_octets(unsigned char* data_pP, unsigned short sizeP);
static unsigned int _gtpurh_tg4_rem(struct sk_buff *orig_skb_pP, const struct xt_action_param *par_pP);
#if defined(WITH_IPV6)
static unsigned int gtpurh_tg6(struct sk_buff *skb_pP, const struct xt_action_param *par_pP);
#endif
static unsigned int gtpurh_tg4(struct sk_buff *skb_pP, const struct xt_action_param *par_pP);
/*
* Statically sized hash table implementation
* (C) 2012 Sasha Levin <levinsasha928@gmail.com>
*/
#ifndef _LINUX_HASHTABLE_H
#define _LINUX_HASHTABLE_H
#include <linux/list.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/rculist.h>
#define DEFINE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
#define HASH_SIZE(name) (ARRAY_SIZE(name))
#define HASH_BITS(name) ilog2(HASH_SIZE(name))
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
#define hash_min(val, bits) \
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
INIT_HLIST_HEAD(&ht[i]);
}
/**
* hash_init - initialize a hash table
* @hashtable: hashtable to be initialized
*
* Calculates the size of the hashtable from the given parameter, otherwise
* same as hash_init_size.
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
/**
* hash_add - add an object to a hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add(hashtable, node, key) \
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_add_rcu - add an object to a rcu enabled hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add_rcu(hashtable, node, key) \
hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_hashed - check whether an object is in any hashtable
* @node: the &struct hlist_node of the object to be checked
*/
static inline bool hash_hashed(struct hlist_node *node)
{
return !hlist_unhashed(node);
}
static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
if (!hlist_empty(&ht[i]))
return false;
return true;
}
/**
* hash_empty - check whether a hashtable is empty
* @hashtable: hashtable to check
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
/**
* hash_del - remove an object from a hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del(struct hlist_node *node)
{
hlist_del_init(node);
}
/**
* hash_del_rcu - remove an object from a rcu enabled hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del_rcu(struct hlist_node *node)
{
hlist_del_init_rcu(node);
}
/**
* hash_for_each - iterate over a hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each(name, bkt, obj, pos, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry(obj, pos, &name[bkt], member)
/**
* hash_for_each_rcu - iterate over a rcu enabled hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_rcu(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_rcu(obj, &name[bkt], member)
/**
* hash_for_each_safe - iterate over a hashtable safe against removal of
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @tmp: a &struct used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_safe(name, bkt, tmp, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
/**
* hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket
* @name: hashtable to iterate
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible(name, tpos, pos, member, key) \
hlist_for_each_entry(tpos, pos, &name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
* same bucket in an rcu enabled hashtable
* in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_rcu(name, obj, member, key) \
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
member)
/**
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
* to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*
* This is the same as hash_for_each_possible_rcu() except that it does
* not do any RCU debugging or tracing.
*/
#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
hlist_for_each_entry_rcu_notrace(obj, \
&name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @tmp: a &struct used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
hlist_for_each_entry_safe(obj, tmp,\
&name[hash_min(key, HASH_BITS(name))], member)
static struct xt_target gtpurh_tg_reg[] __read_mostly = {
{
.name = "GTPURH",
.revision = 0,
.family = NFPROTO_IPV4,
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_OUT),
.proto = IPPROTO_UDP,
.table = "raw",
.target = gtpurh_tg4,
.targetsize = sizeof(struct xt_gtpurh_target_info),
.me = THIS_MODULE,
},
#if defined(WITH_IPV6)
{
.name = "GTPURH",
.revision = 0,
.family = NFPROTO_IPV6,
.proto = IPPROTO_UDP,
.table = "raw",
.target = gtpurh_tg6,
.me = THIS_MODULE,
},
#endif
};
struct gtpuhdr
{
......@@ -276,28 +108,26 @@ struct gtpuhdr
static char _gtpurh_print_buffer[GTPURH_2_PRINT_BUFFER_LEN];
/*typedef struct ip_fragment_entry_s {
struct sk_buff *skb_p;
//struct timeval first_ts; // TO DO WITH IP_FRAG_TIME
int flags; //ip header field
int offset; //ip header field
int end;
struct hlist_node node;
}ip_fragment_entry_t;
typedef struct ip_reassembly_entry_s {
}ip_reassembly_entry_t;
*/
//-----------------------------------------------------------------------------
static char*
_gtpurh_nf_inet_hook_2_string(int nf_inet_hookP) {
//-----------------------------------------------------------------------------
switch (nf_inet_hookP) {
case NF_INET_PRE_ROUTING: return "NF_INET_PRE_ROUTING";break;
case NF_INET_LOCAL_IN: return "NF_INET_LOCAL_IN";break;
case NF_INET_FORWARD: return "NF_INET_FORWARD";break;
case NF_INET_LOCAL_OUT: return "NF_INET_LOCAL_OUT";break;
case NF_INET_POST_ROUTING: return "NF_INET_POST_ROUTING";break;
default: return "NF_INET_UNKNOWN";
}
}
static inline bool _gtpurh_ip_is_fragment(const struct iphdr *iph_p)
{
//-----------------------------------------------------------------------------
static inline bool
_gtpurh_ip_is_fragment(const struct iphdr *iph_p) {
//-----------------------------------------------------------------------------
return (iph_p->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
}
#define MAX_IP_FRAGMENTS 16
#define GTPURH_HASHTABLE_BITS 7
DEFINE_HASHTABLE(ip_fragments, GTPURH_HASHTABLE_BITS);
//-----------------------------------------------------------------------------
void _gtpurh_print_hex_octets(unsigned char* data_pP, unsigned short sizeP)
......@@ -356,7 +186,9 @@ void _gtpurh_print_hex_octets(unsigned char* data_pP, unsigned short sizeP)
#if defined(ROUTE_PACKET)
//-----------------------------------------------------------------------------
static bool _gtpurh_route_packet(struct sk_buff *skb_pP, const struct xt_gtpurh_target_info *info_pP)
//-----------------------------------------------------------------------------
{
int err = 0;
struct rtable *rt = NULL;
......@@ -408,7 +240,7 @@ static bool _gtpurh_route_packet(struct sk_buff *skb_pP, const struct xt_gtpurh_
skb_dst_set(skb_pP, &rt->dst);
skb_pP->dev = skb_dst(skb_pP)->dev;
/* Send the GTPu message out */
// Send the GTPu message out
ip_local_out(skb_pP);
if (err == 0) {
......@@ -418,140 +250,12 @@ static bool _gtpurh_route_packet(struct sk_buff *skb_pP, const struct xt_gtpurh_
}
}
#endif
/*
static void
_gtpurh_delete_collection_ip_fragments(void)
{
int bucket_loop_cursor = 0;
struct ip_fragment_entry_s *ip_fragment_p = NULL;
struct hlist_node *pos_p = NULL;
hash_for_each(ip_fragments, bucket_loop_cursor, ip_fragment_p, pos_p, node) {
pr_info("GTPURH: %s struct ip_fragment_entry_s* %p, struct hlist_node* %p\n",
__FUNCTION__,
ip_fragment_p,
pos_p);
}
}
static int
_gtpurh_target_reassembly(struct sk_buff *orig_skb_pP, const struct xt_gtpurh_target_info *tgi_pP)
{
struct iphdr *iph_p = ip_hdr(orig_skb_pP);
struct sk_buff *skb_p = NULL;
struct sk_buff *new_skb_p = NULL;
uint16_t key = 0;
int flags, offset, last_frag_offset;
int ihl, end;
struct ip_fragment_entry_s *ip_fragment_p = NULL;
struct hlist_node *pos_p = NULL;
int all_segments_availables = 1;
if ((orig_skb_pP) && (tgi_pP)) {
offset = ntohs(iph_p->frag_off);
flags = offset & ~IP_OFFSET;
offset &= IP_OFFSET; // offset is in 8-byte chunks
offset <<= 3;
ihl = ip_hdrlen(orig_skb_pP);
end = offset + orig_skb_pP->len - ihl;
key = iph_p->id;
pr_info("---------------GTPURH: Dump fragment:------------------------------------------------\n");
_gtpurh_print_hex_octets((unsigned char*)iph_p, orig_skb_pP->len);
// Is this the final fragment?
if ((flags & IP_MF) == 0) {
pr_info("GTPURH: try reassembly last segment id %u len %u offset %u end %u\n",
key,orig_skb_pP->len, offset, end);
// may be the last segment received
// normally iterator gives elements from head to tail, but elements
// have been inserted at the head, so iterator should retrieve fragments
// in reverse order.
last_frag_offset = offset;
hash_for_each_possible(ip_fragments, ip_fragment_p, pos_p, node, key) {
pr_info("GTPURH: try reassembly segment id %u len %u offset %u end %u\n",
key,ip_fragment_p->skb_p->len, ip_fragment_p->offset, ip_fragment_p->end);
if (ip_fragment_p->end == offset) {
offset = ip_fragment_p->offset;
} else {
pr_info("GTPURH: try reassembly segment id %u failed\n",
key);
all_segments_availables = 0;
break;
}
}
if (all_segments_availables) {
// not optimal, but optimize later
new_skb_p = skb_copy_expand(orig_skb_pP,
last_frag_offset + skb_headroom(orig_skb_pP),
skb_tailroom(orig_skb_pP),
GFP_ATOMIC);
if (new_skb_p != NULL)
{
hash_for_each_possible(ip_fragments, ip_fragment_p, pos_p, node, key) {
skb_p = ip_fragment_p->skb_p;
skb_pull(new_skb_p, ihl);
pr_info("GTPURH: reassembly segment id %u len %u offset %u end %u\n",
key, skb_p->len, ip_fragment_p->offset, ip_fragment_p->end);
iph_p = (struct iphdr*)skb_push(new_skb_p, skb_p->len);
ihl = ip_hdrlen(skb_p);
memcpy(iph_p, skb_network_header(skb_p), skb_p->len);
}
skb_set_network_header(new_skb_p, 0);
if (_gtpurh_route_packet(new_skb_p, tgi_pP) != GTPURH_SUCCESS)
{
kfree_skb(new_skb_p);
}
} else {
pr_info("GTPURH: Failed in skb_p allocation (%u bytes)\n",
last_frag_offset + skb_headroom(orig_skb_pP));
}
}
if (1) {
hash_for_each_possible(ip_fragments, ip_fragment_p, pos_p, node, key) {
kfree_skb(ip_fragment_p->skb_p);
hash_del(pos_p);
kfree(ip_fragment_p);
}
}
} else {
pr_info("GTPURH: but in buffer not last segment id %u len %u offset %u end %u\n",
key,orig_skb_pP->len, offset, end);
// not the last fragment received
// assuming no out of order
ip_fragment_p = kmalloc(sizeof(struct ip_fragment_entry_s), GFP_KERNEL);
if (NULL != ip_fragment_p) {
ip_fragment_p->skb_p = skb_copy(orig_skb_pP, GFP_ATOMIC); // may use skb_clone(), TODO
ip_fragment_p->flags = flags;
ip_fragment_p->offset = offset;
ip_fragment_p->end = end;
pr_info("GTPURH: adding in hashtable key %04x ip_fragment %p skbuff %p\n",
key, ip_fragment_p, ip_fragment_p->skb_p);
INIT_HLIST_NODE(&ip_fragment_p->node);
hash_add(ip_fragments, &ip_fragment_p->node, key);
return 0;
} else {
pr_info("GTPURH: ERROR could not allocate memory for buffering IP fragment\n");
}
}
} else {
pr_info("%s(skb_p=%p,tgi_pP=%p) parameter skb_p or tgi_pP is NULL\n",
__FUNCTION__, orig_skb_pP, tgi_pP);
}
return -1;
}*/
//-----------------------------------------------------------------------------
static unsigned int
_gtpurh_tg4_rem(struct sk_buff *orig_skb_pP, const struct xt_action_param *par_pP)
{
_gtpurh_tg4_rem(struct sk_buff *orig_skb_pP, const struct xt_action_param *par_pP) {
//-----------------------------------------------------------------------------
struct iphdr *iph_p = ip_hdr(orig_skb_pP);
struct iphdr *iph2_p = NULL;
struct udphdr *udph_p = NULL;
......@@ -560,7 +264,6 @@ _gtpurh_tg4_rem(struct sk_buff *orig_skb_pP, const struct xt_action_param *par_p
#if defined(NEW_SKB)
struct sk_buff *new_skb_p = NULL;
struct iphdr *new_ip_p = NULL;
unsigned int addr_type = 0;
#endif
uint16_t gtp_payload_size = 0;
......@@ -730,57 +433,78 @@ _gtpurh_tg4_rem(struct sk_buff *orig_skb_pP, const struct xt_action_param *par_p
skb_p->priority = rt_tos2priority(iph2_p->tos);
skb_p->pkt_type = PACKET_OTHERHOST;
skb_dst_drop(skb_p);
skb_dst_set(skb_p, &rt->dst);
skb_dst_set(skb_p, dst_clone(&rt->dst));
skb_p->dev = skb_dst(skb_p)->dev;
}
#if defined(NEW_SKB)
new_skb_p = alloc_skb(LL_MAX_HEADER + ntohs(iph2_p->tot_len), GFP_ATOMIC);
if (new_skb_p == NULL) {
return NF_DROP;
}
new_skb_p = alloc_skb(LL_MAX_HEADER + ntohs(iph2_p->tot_len), GFP_ATOMIC);
if (new_skb_p == NULL) {
return NF_DROP;
}
skb_reserve(new_skb_p, LL_MAX_HEADER);
new_skb_p->protocol = skb_p->protocol;
skb_reserve(new_skb_p, LL_MAX_HEADER);
new_skb_p->protocol = skb_p->protocol;
skb_reset_network_header(new_skb_p);
new_ip_p = (void *)skb_put(new_skb_p, iph2_p->ihl << 2);
skb_reset_transport_header(new_skb_p);
skb_put(new_skb_p, ntohs(iph2_p->tot_len) - (iph2_p->ihl << 2));
memcpy(new_ip_p, iph2_p, ntohs(iph2_p->tot_len));
skb_reset_network_header(new_skb_p);
new_ip_p = (void *)skb_put(new_skb_p, iph2_p->ihl << 2);
skb_reset_transport_header(new_skb_p);
skb_put(new_skb_p, ntohs(iph2_p->tot_len) - (iph2_p->ihl << 2));
memcpy(new_ip_p, iph2_p, ntohs(iph2_p->tot_len));
new_skb_p->mark = ntohl(gtpuh_p->tunid);
new_skb_p->mark = ntohl(gtpuh_p->tunid);
//new_skb_p->mark = skb_p->mark;
/* ip_route_me_harder expects skb->dst to be set */
skb_dst_set(new_skb_p, dst_clone(skb_dst(skb_p)));
/* ip_route_me_harder expects skb->dst to be set */
skb_dst_set(new_skb_p, dst_clone(skb_dst(skb_p)));
if (ip_route_me_harder(new_skb_p, RTN_UNSPEC) < 0)
goto free_new_skb;
/*if (ip_route_me_harder(new_skb_p, RTN_UNSPEC) < 0) {
pr_info("GTPURH: cannot route harder dest 0x%x\n", daddr);
goto free_skb;
}*/
new_ip_p->ttl = ip4_dst_hoplimit(skb_dst(new_skb_p));
new_skb_p->ip_summed = CHECKSUM_NONE;
new_ip_p->ttl = new_ip_p->ttl -1; // ip4_dst_hoplimit(skb_dst(new_skb_p));
new_skb_p->ip_summed = CHECKSUM_NONE;
/* "Never happens" (?) */
if (new_skb_p->len > dst_mtu(skb_dst(new_skb_p)))
goto free_new_skb;
if (new_skb_p->len > dst_mtu(skb_dst(new_skb_p))) {
pr_info("GTPURH: bad length\n");
goto free_skb;
}
nf_ct_attach(new_skb_p, skb_p);
ip_local_out(new_skb_p);
return NF_DROP;
nf_ct_attach(new_skb_p, skb_p);
pr_info("GTPURH: ip_local_out %s/%s dev %s src %u.%u.%u.%u dst %u.%u.%u.%u \n",
_gtpurh_nf_inet_hook_2_string(par_pP->hooknum),
gtpurh_tg_reg[0].table,
(new_skb_p->dev == NULL) ? "NULL" : new_skb_p->dev->name,
NIPADDR(new_ip_p->saddr),
NIPADDR(new_ip_p->daddr));
ip_local_out(new_skb_p);
return NF_DROP;
free_skb:
pr_info("GTPURH: Dropped skb ip_local_out %s/%s\n",
_gtpurh_nf_inet_hook_2_string(par_pP->hooknum),
gtpurh_tg_reg[0].table);
kfree_skb(new_skb_p);
return NF_DROP;
free_new_skb:
kfree_skb(new_skb_p);
return NF_DROP;
#else
return NF_ACCEPT;
return NF_ACCEPT;
free_skb:
pr_info("GTPURH: Dropped skb\n");
kfree_skb(skb_p);
return NF_DROP;
#endif
}
#endif
}
#if defined(WITH_IPV6)
//-----------------------------------------------------------------------------
static unsigned int
gtpurh_tg6(struct sk_buff *skb_pP, const struct xt_action_param *par_pP)
//-----------------------------------------------------------------------------
{
const struct xt_gtpurh_target_info *tgi_p = par_pP->targinfo;
int result = NF_DROP;
......@@ -794,60 +518,70 @@ gtpurh_tg6(struct sk_buff *skb_pP, const struct xt_action_param *par_pP)
}
return result;
}
#endif
//-----------------------------------------------------------------------------
static unsigned int
gtpurh_tg4(struct sk_buff *skb_pP, const struct xt_action_param *par_pP)
//-----------------------------------------------------------------------------
{
const struct iphdr *iph_p = ip_hdr(skb_pP);
const struct rtable *rt_p = skb_rtable(skb_pP);
const struct xt_gtpurh_target_info *tgi_p = par_pP->targinfo;
int result = NF_ACCEPT;
if (tgi_p == NULL) {
return result;
return NF_ACCEPT;
}
if ((tgi_p->raddr != iph_p->saddr) || (tgi_p->laddr != iph_p->daddr)) {
pr_info("GTPURH: Not processed because of not matching saddr %d.%d.%d.%d raddr %d.%d.%d.%d laddr %d.%d.%d.%d daddr %d.%d.%d.%d\n",
NIPADDR(tgi_p->raddr),
NIPADDR(iph_p->saddr),
NIPADDR(tgi_p->laddr),
NIPADDR(iph_p->daddr));
return NF_ACCEPT;
}
if (ip_hdrlen(skb_pP) != sizeof(struct iphdr)) {
pr_info("GTPURH: Dropped because IP options\n");
return NF_DROP;
}
// Drop fragments
if (iph_p->frag_off & htons(IP_OFFSET)) {
pr_info("GTPURH: Dropped because is fragment\n");
return NF_DROP;
}
if (tgi_p->action == PARAM_GTPURH_ACTION_REM) {
result = _gtpurh_tg4_rem(skb_pP, par_pP);
return _gtpurh_tg4_rem(skb_pP, par_pP);
}
return result;
return NF_DROP;
}
static struct xt_target gtpurh_tg_reg[] __read_mostly = {
{
.name = "GTPURH",
.revision = 0,
.family = NFPROTO_IPV6,
.proto = IPPROTO_UDP,
.table = "raw",
.target = gtpurh_tg6,
.me = THIS_MODULE,
},
{
.name = "GTPURH",
.revision = 0,
.family = NFPROTO_IPV4,
.hooks = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_OUT),
.proto = IPPROTO_UDP,
.table = "raw",
.target = gtpurh_tg4,
.targetsize = sizeof(struct xt_gtpurh_target_info),
.me = THIS_MODULE,
},
};
static int __init gtpurh_tg_init(void)
{
//-----------------------------------------------------------------------------
static int
__init gtpurh_tg_init(void) {
//-----------------------------------------------------------------------------
pr_info("GTPURH: Initializing module (KVersion: %d)\n", KVERSION);
pr_info("GTPURH: Copyright Polaris Networks 2010-2011\n");
pr_info("GTPURH: Modified by EURECOM Lionel GAUTHIER 2014\n");
//hash_init(ip_fragments);
#if defined(WITH_IPV6)
pr_info("GTPURH: IPv4/IPv6 enabled\n");
#else
pr_info("GTPURH: IPv4 only enabled\n");
#endif
return xt_register_targets(gtpurh_tg_reg, ARRAY_SIZE(gtpurh_tg_reg));
}
static void __exit gtpurh_tg_exit(void)
{
//-----------------------------------------------------------------------------
static void
__exit gtpurh_tg_exit(void) {
//-----------------------------------------------------------------------------
xt_unregister_targets(gtpurh_tg_reg, ARRAY_SIZE(gtpurh_tg_reg));
//_gtpurh_delete_collection_ip_fragments();
pr_info("GTPURH: Unloading module\n");
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment