Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
02e73c1e
Commit
02e73c1e
authored
May 08, 2011
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'batman-adv/next' of
git://git.open-mesh.org/ecsv/linux-merge
parents
c5216cc7
27aea212
Changes
20
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
712 additions
and
557 deletions
+712
-557
Documentation/networking/batman-adv.txt
Documentation/networking/batman-adv.txt
+6
-5
net/batman-adv/aggregation.c
net/batman-adv/aggregation.c
+8
-8
net/batman-adv/aggregation.h
net/batman-adv/aggregation.h
+2
-2
net/batman-adv/bat_debugfs.c
net/batman-adv/bat_debugfs.c
+2
-2
net/batman-adv/bat_sysfs.c
net/batman-adv/bat_sysfs.c
+9
-7
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.c
+10
-26
net/batman-adv/main.c
net/batman-adv/main.c
+12
-8
net/batman-adv/main.h
net/batman-adv/main.h
+22
-20
net/batman-adv/originator.c
net/batman-adv/originator.c
+4
-6
net/batman-adv/packet.h
net/batman-adv/packet.h
+2
-3
net/batman-adv/routing.c
net/batman-adv/routing.c
+71
-91
net/batman-adv/routing.h
net/batman-adv/routing.h
+3
-3
net/batman-adv/send.c
net/batman-adv/send.c
+8
-8
net/batman-adv/send.h
net/batman-adv/send.h
+1
-1
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.c
+292
-117
net/batman-adv/translation-table.c
net/batman-adv/translation-table.c
+209
-208
net/batman-adv/translation-table.h
net/batman-adv/translation-table.h
+12
-12
net/batman-adv/types.h
net/batman-adv/types.h
+29
-20
net/batman-adv/unicast.c
net/batman-adv/unicast.c
+1
-1
net/batman-adv/vis.c
net/batman-adv/vis.c
+9
-9
No files found.
Documentation/networking/batman-adv.txt
View file @
02e73c1e
[state:
27-01
-2011]
[state:
17-04
-2011]
BATMAN-ADV
----------
...
...
@@ -19,6 +19,7 @@ duce the overhead to a minimum. It does not depend on any (other)
network driver, and can be used on wifi as well as ethernet lan,
vpn, etc ... (anything with ethernet-style layer 2).
CONFIGURATION
-------------
...
...
@@ -160,13 +161,13 @@ face. Each entry can/has to have the following values:
-> "TQ mac value" - src mac's link quality towards mac address
of a neighbor originator's interface which
is being used for routing
-> "
HNA mac" - HNA
announced by source mac
-> "
TT mac" - TT
announced by source mac
-> "PRIMARY" - this is a primary interface
-> "SEC mac" - secondary mac address of source
(requires preceding PRIMARY)
The TQ value has a range from 4 to 255 with 255 being the best.
The
HNA
entries are showing which hosts are connected to the mesh
The
TT
entries are showing which hosts are connected to the mesh
via bat0 or being bridged into the mesh network. The PRIMARY/SEC
values are only applied on primary interfaces
...
...
@@ -199,7 +200,7 @@ abled during run time. Following log_levels are defined:
0 - All debug output disabled
1 - Enable messages related to routing / flooding / broadcasting
2 - Enable route or
hna
added / changed / deleted
2 - Enable route or
tt entry
added / changed / deleted
3 - Enable all messages
The debug output can be changed at runtime using the file
...
...
@@ -207,7 +208,7 @@ The debug output can be changed at runtime using the file
# echo 2 > /sys/class/net/bat0/mesh/log_level
will enable debug messages for when routes or
HNA
s change.
will enable debug messages for when routes or
TT
s change.
BATCTL
...
...
net/batman-adv/aggregation.c
View file @
02e73c1e
...
...
@@ -24,10 +24,10 @@
#include "send.h"
#include "routing.h"
/* calculate the size of the
hna
information for a given packet */
static
int
hna
_len
(
struct
batman_packet
*
batman_packet
)
/* calculate the size of the
tt
information for a given packet */
static
int
tt
_len
(
struct
batman_packet
*
batman_packet
)
{
return
batman_packet
->
num_
hna
*
ETH_ALEN
;
return
batman_packet
->
num_
tt
*
ETH_ALEN
;
}
/* return true if new_packet can be aggregated with forw_packet */
...
...
@@ -250,7 +250,7 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
{
struct
batman_packet
*
batman_packet
;
int
buff_pos
=
0
;
unsigned
char
*
hna
_buff
;
unsigned
char
*
tt
_buff
;
batman_packet
=
(
struct
batman_packet
*
)
packet_buff
;
...
...
@@ -259,14 +259,14 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
orig_interval. */
batman_packet
->
seqno
=
ntohl
(
batman_packet
->
seqno
);
hna
_buff
=
packet_buff
+
buff_pos
+
BAT_PACKET_LEN
;
tt
_buff
=
packet_buff
+
buff_pos
+
BAT_PACKET_LEN
;
receive_bat_packet
(
ethhdr
,
batman_packet
,
hna_buff
,
hna
_len
(
batman_packet
),
tt_buff
,
tt
_len
(
batman_packet
),
if_incoming
);
buff_pos
+=
BAT_PACKET_LEN
+
hna
_len
(
batman_packet
);
buff_pos
+=
BAT_PACKET_LEN
+
tt
_len
(
batman_packet
);
batman_packet
=
(
struct
batman_packet
*
)
(
packet_buff
+
buff_pos
);
}
while
(
aggregated_packet
(
buff_pos
,
packet_len
,
batman_packet
->
num_
hna
));
batman_packet
->
num_
tt
));
}
net/batman-adv/aggregation.h
View file @
02e73c1e
...
...
@@ -25,9 +25,9 @@
#include "main.h"
/* is there another aggregated packet here? */
static
inline
int
aggregated_packet
(
int
buff_pos
,
int
packet_len
,
int
num_
hna
)
static
inline
int
aggregated_packet
(
int
buff_pos
,
int
packet_len
,
int
num_
tt
)
{
int
next_buff_pos
=
buff_pos
+
BAT_PACKET_LEN
+
(
num_
hna
*
ETH_ALEN
);
int
next_buff_pos
=
buff_pos
+
BAT_PACKET_LEN
+
(
num_
tt
*
ETH_ALEN
);
return
(
next_buff_pos
<=
packet_len
)
&&
(
next_buff_pos
<=
MAX_AGGREGATION_BYTES
);
...
...
net/batman-adv/bat_debugfs.c
View file @
02e73c1e
...
...
@@ -241,13 +241,13 @@ static int softif_neigh_open(struct inode *inode, struct file *file)
static
int
transtable_global_open
(
struct
inode
*
inode
,
struct
file
*
file
)
{
struct
net_device
*
net_dev
=
(
struct
net_device
*
)
inode
->
i_private
;
return
single_open
(
file
,
hna
_global_seq_print_text
,
net_dev
);
return
single_open
(
file
,
tt
_global_seq_print_text
,
net_dev
);
}
static
int
transtable_local_open
(
struct
inode
*
inode
,
struct
file
*
file
)
{
struct
net_device
*
net_dev
=
(
struct
net_device
*
)
inode
->
i_private
;
return
single_open
(
file
,
hna
_local_seq_print_text
,
net_dev
);
return
single_open
(
file
,
tt
_local_seq_print_text
,
net_dev
);
}
static
int
vis_data_open
(
struct
inode
*
inode
,
struct
file
*
file
)
...
...
net/batman-adv/bat_sysfs.c
View file @
02e73c1e
...
...
@@ -488,22 +488,24 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
(
strncmp
(
hard_iface
->
soft_iface
->
name
,
buff
,
IFNAMSIZ
)
==
0
))
goto
out
;
if
(
!
rtnl_trylock
())
{
ret
=
-
ERESTARTSYS
;
goto
out
;
}
if
(
status_tmp
==
IF_NOT_IN_USE
)
{
rtnl_lock
();
hardif_disable_interface
(
hard_iface
);
rtnl_unlock
();
goto
out
;
goto
unlock
;
}
/* if the interface already is in use */
if
(
hard_iface
->
if_status
!=
IF_NOT_IN_USE
)
{
rtnl_lock
();
if
(
hard_iface
->
if_status
!=
IF_NOT_IN_USE
)
hardif_disable_interface
(
hard_iface
);
rtnl_unlock
();
}
ret
=
hardif_enable_interface
(
hard_iface
,
buff
);
unlock:
rtnl_unlock
();
out:
hardif_free_ref
(
hard_iface
);
return
ret
;
...
...
net/batman-adv/hard-interface.c
View file @
02e73c1e
...
...
@@ -31,9 +31,6 @@
#include <linux/if_arp.h>
/* protect update critical side of hardif_list - but not the content */
static
DEFINE_SPINLOCK
(
hardif_list_lock
);
static
int
batman_skb_recv
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
,
...
...
@@ -136,7 +133,7 @@ static void primary_if_select(struct bat_priv *bat_priv,
struct
hard_iface
*
curr_hard_iface
;
struct
batman_packet
*
batman_packet
;
spin_lock_bh
(
&
hardif_list_lock
);
ASSERT_RTNL
(
);
if
(
new_hard_iface
&&
!
atomic_inc_not_zero
(
&
new_hard_iface
->
refcount
))
new_hard_iface
=
NULL
;
...
...
@@ -148,7 +145,7 @@ static void primary_if_select(struct bat_priv *bat_priv,
hardif_free_ref
(
curr_hard_iface
);
if
(
!
new_hard_iface
)
goto
out
;
return
;
batman_packet
=
(
struct
batman_packet
*
)(
new_hard_iface
->
packet_buff
);
batman_packet
->
flags
=
PRIMARIES_FIRST_HOP
;
...
...
@@ -157,13 +154,10 @@ static void primary_if_select(struct bat_priv *bat_priv,
primary_if_update_addr
(
bat_priv
);
/***
* hacky trick to make sure that we send the
HNA
information via
* hacky trick to make sure that we send the
TT
information via
* our new primary interface
*/
atomic_set
(
&
bat_priv
->
hna_local_changed
,
1
);
out:
spin_unlock_bh
(
&
hardif_list_lock
);
atomic_set
(
&
bat_priv
->
tt_local_changed
,
1
);
}
static
bool
hardif_is_iface_up
(
struct
hard_iface
*
hard_iface
)
...
...
@@ -345,7 +339,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
batman_packet
->
flags
=
0
;
batman_packet
->
ttl
=
2
;
batman_packet
->
tq
=
TQ_MAX_VALUE
;
batman_packet
->
num_
hna
=
0
;
batman_packet
->
num_
tt
=
0
;
hard_iface
->
if_num
=
bat_priv
->
num_ifaces
;
bat_priv
->
num_ifaces
++
;
...
...
@@ -456,6 +450,8 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
struct
hard_iface
*
hard_iface
;
int
ret
;
ASSERT_RTNL
();
ret
=
is_valid_iface
(
net_dev
);
if
(
ret
!=
1
)
goto
out
;
...
...
@@ -482,10 +478,7 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
atomic_set
(
&
hard_iface
->
refcount
,
2
);
check_known_mac_addr
(
hard_iface
->
net_dev
);
spin_lock
(
&
hardif_list_lock
);
list_add_tail_rcu
(
&
hard_iface
->
list
,
&
hardif_list
);
spin_unlock
(
&
hardif_list_lock
);
return
hard_iface
;
...
...
@@ -499,6 +492,8 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
static
void
hardif_remove_interface
(
struct
hard_iface
*
hard_iface
)
{
ASSERT_RTNL
();
/* first deactivate interface */
if
(
hard_iface
->
if_status
!=
IF_NOT_IN_USE
)
hardif_disable_interface
(
hard_iface
);
...
...
@@ -514,20 +509,11 @@ static void hardif_remove_interface(struct hard_iface *hard_iface)
void
hardif_remove_interfaces
(
void
)
{
struct
hard_iface
*
hard_iface
,
*
hard_iface_tmp
;
struct
list_head
if_queue
;
INIT_LIST_HEAD
(
&
if_queue
);
spin_lock
(
&
hardif_list_lock
);
rtnl_lock
(
);
list_for_each_entry_safe
(
hard_iface
,
hard_iface_tmp
,
&
hardif_list
,
list
)
{
list_del_rcu
(
&
hard_iface
->
list
);
list_add_tail
(
&
hard_iface
->
list
,
&
if_queue
);
}
spin_unlock
(
&
hardif_list_lock
);
rtnl_lock
();
list_for_each_entry_safe
(
hard_iface
,
hard_iface_tmp
,
&
if_queue
,
list
)
{
hardif_remove_interface
(
hard_iface
);
}
rtnl_unlock
();
...
...
@@ -556,9 +542,7 @@ static int hard_if_event(struct notifier_block *this,
hardif_deactivate_interface
(
hard_iface
);
break
;
case
NETDEV_UNREGISTER
:
spin_lock
(
&
hardif_list_lock
);
list_del_rcu
(
&
hard_iface
->
list
);
spin_unlock
(
&
hardif_list_lock
);
hardif_remove_interface
(
hard_iface
);
break
;
...
...
net/batman-adv/main.c
View file @
02e73c1e
...
...
@@ -33,6 +33,9 @@
#include "vis.h"
#include "hash.h"
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
* list traversals just rcu-locked */
struct
list_head
hardif_list
;
unsigned
char
broadcast_addr
[]
=
{
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
};
...
...
@@ -81,28 +84,29 @@ int mesh_init(struct net_device *soft_iface)
spin_lock_init
(
&
bat_priv
->
forw_bat_list_lock
);
spin_lock_init
(
&
bat_priv
->
forw_bcast_list_lock
);
spin_lock_init
(
&
bat_priv
->
hna
_lhash_lock
);
spin_lock_init
(
&
bat_priv
->
hna
_ghash_lock
);
spin_lock_init
(
&
bat_priv
->
tt
_lhash_lock
);
spin_lock_init
(
&
bat_priv
->
tt
_ghash_lock
);
spin_lock_init
(
&
bat_priv
->
gw_list_lock
);
spin_lock_init
(
&
bat_priv
->
vis_hash_lock
);
spin_lock_init
(
&
bat_priv
->
vis_list_lock
);
spin_lock_init
(
&
bat_priv
->
softif_neigh_lock
);
spin_lock_init
(
&
bat_priv
->
softif_neigh_vid_lock
);
INIT_HLIST_HEAD
(
&
bat_priv
->
forw_bat_list
);
INIT_HLIST_HEAD
(
&
bat_priv
->
forw_bcast_list
);
INIT_HLIST_HEAD
(
&
bat_priv
->
gw_list
);
INIT_HLIST_HEAD
(
&
bat_priv
->
softif_neigh_
list
);
INIT_HLIST_HEAD
(
&
bat_priv
->
softif_neigh_
vids
);
if
(
originator_init
(
bat_priv
)
<
1
)
goto
err
;
if
(
hna
_local_init
(
bat_priv
)
<
1
)
if
(
tt
_local_init
(
bat_priv
)
<
1
)
goto
err
;
if
(
hna
_global_init
(
bat_priv
)
<
1
)
if
(
tt
_global_init
(
bat_priv
)
<
1
)
goto
err
;
hna
_local_add
(
soft_iface
,
soft_iface
->
dev_addr
);
tt
_local_add
(
soft_iface
,
soft_iface
->
dev_addr
);
if
(
vis_init
(
bat_priv
)
<
1
)
goto
err
;
...
...
@@ -133,8 +137,8 @@ void mesh_free(struct net_device *soft_iface)
gw_node_purge
(
bat_priv
);
originator_free
(
bat_priv
);
hna
_local_free
(
bat_priv
);
hna
_global_free
(
bat_priv
);
tt
_local_free
(
bat_priv
);
tt
_global_free
(
bat_priv
);
softif_neigh_purge
(
bat_priv
);
...
...
net/batman-adv/main.h
View file @
02e73c1e
...
...
@@ -34,16 +34,18 @@
#define TQ_MAX_VALUE 255
#define JITTER 20
#define TTL 50
/* Time To Live of broadcast messages */
#define PURGE_TIMEOUT 200
/* purge originators after time in seconds if no
* valid packet comes in -> TODO: check
* influence on TQ_LOCAL_WINDOW_SIZE */
#define LOCAL_HNA_TIMEOUT 3600
/* in seconds */
/* Time To Live of broadcast messages */
#define TTL 50
#define TQ_LOCAL_WINDOW_SIZE 64
/* sliding packet range of received originator
* messages in squence numbers (should be a
* multiple of our word size) */
/* purge originators after time in seconds if no valid packet comes in
* -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
#define PURGE_TIMEOUT 200
#define TT_LOCAL_TIMEOUT 3600
/* in seconds */
/* sliding packet range of received originator messages in squence numbers
* (should be a multiple of our word size) */
#define TQ_LOCAL_WINDOW_SIZE 64
#define TQ_GLOBAL_WINDOW_SIZE 5
#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
...
...
@@ -55,21 +57,20 @@
#define VIS_INTERVAL 5000
/* 5 seconds */
/* how much worse secondary interfaces may be to
* to be considered as bonding candidates */
/* how much worse secondary interfaces may be to be considered as bonding
* candidates */
#define BONDING_TQ_THRESHOLD 50
#define MAX_AGGREGATION_BYTES 512
/* should not be bigger than 512 bytes or
* change the size of
/* should not be bigger than 512 bytes or change the size of
* forw_packet->direct_link_flags */
#define MAX_AGGREGATION_BYTES 512
#define MAX_AGGREGATION_MS 100
#define SOFTIF_NEIGH_TIMEOUT 180000
/* 3 minutes */
/* don't reset again within 30 seconds */
#define RESET_PROTECTION_MS 30000
#define EXPECTED_SEQNO_RANGE 65536
/* don't reset again within 30 seconds */
#define MESH_INACTIVE 0
#define MESH_ACTIVE 1
...
...
@@ -84,12 +85,13 @@
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* Append 'batman-adv: ' before
* kernel messages */
/* Append 'batman-adv: ' before kernel messages */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DBG_BATMAN 1
/* all messages related to routing / flooding /
* broadcasting / etc */
#define DBG_ROUTES 2
/* route or hna added / changed / deleted */
/* all messages related to routing / flooding / broadcasting / etc */
#define DBG_BATMAN 1
/* route or tt entry added / changed / deleted */
#define DBG_ROUTES 2
#define DBG_ALL 3
...
...
net/batman-adv/originator.c
View file @
02e73c1e
...
...
@@ -19,8 +19,6 @@
*
*/
/* increase the reference counter for this originator */
#include "main.h"
#include "originator.h"
#include "hash.h"
...
...
@@ -144,7 +142,7 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
spin_unlock_bh
(
&
orig_node
->
neigh_list_lock
);
frag_list_free
(
&
orig_node
->
frag_list
);
hna
_global_del_orig
(
orig_node
->
bat_priv
,
orig_node
,
tt
_global_del_orig
(
orig_node
->
bat_priv
,
orig_node
,
"originator timed out"
);
kfree
(
orig_node
->
bcast_own
);
...
...
@@ -222,7 +220,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
orig_node
->
bat_priv
=
bat_priv
;
memcpy
(
orig_node
->
orig
,
addr
,
ETH_ALEN
);
orig_node
->
router
=
NULL
;
orig_node
->
hna
_buff
=
NULL
;
orig_node
->
tt
_buff
=
NULL
;
orig_node
->
bcast_seqno_reset
=
jiffies
-
1
-
msecs_to_jiffies
(
RESET_PROTECTION_MS
);
orig_node
->
batman_seqno_reset
=
jiffies
-
1
...
...
@@ -333,8 +331,8 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
&
best_neigh_node
))
{
update_routes
(
bat_priv
,
orig_node
,
best_neigh_node
,
orig_node
->
hna
_buff
,
orig_node
->
hna
_buff_len
);
orig_node
->
tt
_buff
,
orig_node
->
tt
_buff_len
);
}
}
...
...
net/batman-adv/packet.h
View file @
02e73c1e
...
...
@@ -61,7 +61,7 @@ struct batman_packet {
uint8_t
orig
[
6
];
uint8_t
prev_sender
[
6
];
uint8_t
ttl
;
uint8_t
num_
hna
;
uint8_t
num_
tt
;
uint8_t
gw_flags
;
/* flags related to gateway class */
uint8_t
align
;
}
__packed
;
...
...
@@ -128,8 +128,7 @@ struct vis_packet {
uint8_t
entries
;
/* number of entries behind this struct */
uint32_t
seqno
;
/* sequence number */
uint8_t
ttl
;
/* TTL */
uint8_t
vis_orig
[
6
];
/* originator that informs about its
* neighbors */
uint8_t
vis_orig
[
6
];
/* originator that announces its neighbors */
uint8_t
target_orig
[
6
];
/* who should receive this packet */
uint8_t
sender_orig
[
6
];
/* who sent or rebroadcasted this packet */
}
__packed
;
...
...
net/batman-adv/routing.c
View file @
02e73c1e
...
...
@@ -64,28 +64,28 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
}
}
static
void
update_
HNA
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
unsigned
char
*
hna_buff
,
int
hna
_buff_len
)
static
void
update_
TT
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
unsigned
char
*
tt_buff
,
int
tt
_buff_len
)
{
if
((
hna_buff_len
!=
orig_node
->
hna
_buff_len
)
||
((
hna
_buff_len
>
0
)
&&
(
orig_node
->
hna
_buff_len
>
0
)
&&
(
memcmp
(
orig_node
->
hna_buff
,
hna_buff
,
hna
_buff_len
)
!=
0
)))
{
if
(
orig_node
->
hna
_buff_len
>
0
)
hna
_global_del_orig
(
bat_priv
,
orig_node
,
"originator changed
hna
"
);
if
((
hna_buff_len
>
0
)
&&
(
hna
_buff
))
hna
_global_add_orig
(
bat_priv
,
orig_node
,
hna_buff
,
hna
_buff_len
);
if
((
tt_buff_len
!=
orig_node
->
tt
_buff_len
)
||
((
tt
_buff_len
>
0
)
&&
(
orig_node
->
tt
_buff_len
>
0
)
&&
(
memcmp
(
orig_node
->
tt_buff
,
tt_buff
,
tt
_buff_len
)
!=
0
)))
{
if
(
orig_node
->
tt
_buff_len
>
0
)
tt
_global_del_orig
(
bat_priv
,
orig_node
,
"originator changed
tt
"
);
if
((
tt_buff_len
>
0
)
&&
(
tt
_buff
))
tt
_global_add_orig
(
bat_priv
,
orig_node
,
tt_buff
,
tt
_buff_len
);
}
}
static
void
update_route
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
struct
neigh_node
*
neigh_node
,
unsigned
char
*
hna_buff
,
int
hna
_buff_len
)
unsigned
char
*
tt_buff
,
int
tt
_buff_len
)
{
struct
neigh_node
*
curr_router
;
...
...
@@ -96,7 +96,7 @@ static void update_route(struct bat_priv *bat_priv,
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Deleting route towards: %pM
\n
"
,
orig_node
->
orig
);
hna
_global_del_orig
(
bat_priv
,
orig_node
,
tt
_global_del_orig
(
bat_priv
,
orig_node
,
"originator timed out"
);
/* route added */
...
...
@@ -105,8 +105,8 @@ static void update_route(struct bat_priv *bat_priv,
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Adding route towards: %pM (via %pM)
\n
"
,
orig_node
->
orig
,
neigh_node
->
addr
);
hna
_global_add_orig
(
bat_priv
,
orig_node
,
hna_buff
,
hna
_buff_len
);
tt
_global_add_orig
(
bat_priv
,
orig_node
,
tt_buff
,
tt
_buff_len
);
/* route changed */
}
else
{
...
...
@@ -135,8 +135,8 @@ static void update_route(struct bat_priv *bat_priv,
void
update_routes
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
struct
neigh_node
*
neigh_node
,
unsigned
char
*
hna
_buff
,
int
hna
_buff_len
)
struct
neigh_node
*
neigh_node
,
unsigned
char
*
tt
_buff
,
int
tt
_buff_len
)
{
struct
neigh_node
*
router
=
NULL
;
...
...
@@ -147,10 +147,10 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
if
(
router
!=
neigh_node
)
update_route
(
bat_priv
,
orig_node
,
neigh_node
,
hna_buff
,
hna
_buff_len
);
/* may be just
HNA
changed */
tt_buff
,
tt
_buff_len
);
/* may be just
TT
changed */
else
update_
HNA
(
bat_priv
,
orig_node
,
hna_buff
,
hna
_buff_len
);
update_
TT
(
bat_priv
,
orig_node
,
tt_buff
,
tt
_buff_len
);
out:
if
(
router
)
...
...
@@ -169,42 +169,12 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
uint8_t
orig_eq_count
,
neigh_rq_count
,
tq_own
;
int
tq_asym_penalty
,
ret
=
0
;
if
(
orig_node
==
orig_neigh_node
)
{
rcu_read_lock
();
hlist_for_each_entry_rcu
(
tmp_neigh_node
,
node
,
&
orig_node
->
neigh_list
,
list
)
{
if
(
!
compare_eth
(
tmp_neigh_node
->
addr
,
orig_neigh_node
->
orig
))
continue
;
if
(
tmp_neigh_node
->
if_incoming
!=
if_incoming
)
continue
;
if
(
!
atomic_inc_not_zero
(
&
tmp_neigh_node
->
refcount
))
continue
;
neigh_node
=
tmp_neigh_node
;
}
rcu_read_unlock
();
if
(
!
neigh_node
)
neigh_node
=
create_neighbor
(
orig_node
,
orig_neigh_node
,
orig_neigh_node
->
orig
,
if_incoming
);
if
(
!
neigh_node
)
goto
out
;
neigh_node
->
last_valid
=
jiffies
;
}
else
{
/* find packet count of corresponding one hop neighbor */
/* find corresponding one hop neighbor */
rcu_read_lock
();
hlist_for_each_entry_rcu
(
tmp_neigh_node
,
node
,
&
orig_neigh_node
->
neigh_list
,
list
)
{
if
(
!
compare_eth
(
tmp_neigh_node
->
addr
,
orig_neigh_node
->
orig
))
if
(
!
compare_eth
(
tmp_neigh_node
->
addr
,
orig_neigh_node
->
orig
))
continue
;
if
(
tmp_neigh_node
->
if_incoming
!=
if_incoming
)
...
...
@@ -214,6 +184,7 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
continue
;
neigh_node
=
tmp_neigh_node
;
break
;
}
rcu_read_unlock
();
...
...
@@ -222,12 +193,17 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
orig_neigh_node
,
orig_neigh_node
->
orig
,
if_incoming
);
if
(
!
neigh_node
)
goto
out
;
}
/* if orig_node is direct neighbour update neigh_node last_valid */
if
(
orig_node
==
orig_neigh_node
)
neigh_node
->
last_valid
=
jiffies
;
orig_node
->
last_valid
=
jiffies
;
/* find packet count of corresponding one hop neighbor */
spin_lock_bh
(
&
orig_node
->
ogm_cnt_lock
);
orig_eq_count
=
orig_neigh_node
->
bcast_own_sum
[
if_incoming
->
if_num
];
neigh_rq_count
=
neigh_node
->
real_packet_count
;
...
...
@@ -387,14 +363,14 @@ static void update_orig(struct bat_priv *bat_priv,
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
struct
hard_iface
*
if_incoming
,
unsigned
char
*
hna_buff
,
int
hna
_buff_len
,
unsigned
char
*
tt_buff
,
int
tt
_buff_len
,
char
is_duplicate
)
{
struct
neigh_node
*
neigh_node
=
NULL
,
*
tmp_neigh_node
=
NULL
;
struct
neigh_node
*
router
=
NULL
;
struct
orig_node
*
orig_node_tmp
;
struct
hlist_node
*
node
;
int
tmp_
hna
_buff_len
;
int
tmp_
tt
_buff_len
;
uint8_t
bcast_own_sum_orig
,
bcast_own_sum_neigh
;
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"update_originator(): "
...
...
@@ -459,18 +435,18 @@ static void update_orig(struct bat_priv *bat_priv,
bonding_candidate_add
(
orig_node
,
neigh_node
);
tmp_
hna_buff_len
=
(
hna_buff_len
>
batman_packet
->
num_hna
*
ETH_ALEN
?
batman_packet
->
num_
hna
*
ETH_ALEN
:
hna
_buff_len
);
tmp_
tt_buff_len
=
(
tt_buff_len
>
batman_packet
->
num_tt
*
ETH_ALEN
?
batman_packet
->
num_
tt
*
ETH_ALEN
:
tt
_buff_len
);
/* if this neighbor already is our next hop there is nothing
* to change */
router
=
orig_node_get_router
(
orig_node
);
if
(
router
==
neigh_node
)
goto
update_
hna
;
goto
update_
tt
;
/* if this neighbor does not offer a better TQ we won't consider it */
if
(
router
&&
(
router
->
tq_avg
>
neigh_node
->
tq_avg
))
goto
update_
hna
;
goto
update_
tt
;
/* if the TQ is the same and the link not more symetric we
* won't consider it either */
...
...
@@ -488,16 +464,16 @@ static void update_orig(struct bat_priv *bat_priv,
spin_unlock_bh
(
&
orig_node_tmp
->
ogm_cnt_lock
);
if
(
bcast_own_sum_orig
>=
bcast_own_sum_neigh
)
goto
update_
hna
;
goto
update_
tt
;
}
update_routes
(
bat_priv
,
orig_node
,
neigh_node
,
hna_buff
,
tmp_hna
_buff_len
);
tt_buff
,
tmp_tt
_buff_len
);
goto
update_gw
;
update_
hna
:
update_
tt
:
update_routes
(
bat_priv
,
orig_node
,
router
,
hna_buff
,
tmp_hna
_buff_len
);
tt_buff
,
tmp_tt
_buff_len
);
update_gw:
if
(
orig_node
->
gw_flags
!=
batman_packet
->
gw_flags
)
...
...
@@ -621,7 +597,7 @@ static char count_real_packets(struct ethhdr *ethhdr,
void
receive_bat_packet
(
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
unsigned
char
*
hna_buff
,
int
hna
_buff_len
,
unsigned
char
*
tt_buff
,
int
tt
_buff_len
,
struct
hard_iface
*
if_incoming
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
if_incoming
->
soft_iface
);
...
...
@@ -818,14 +794,14 @@ void receive_bat_packet(struct ethhdr *ethhdr,
((
orig_node
->
last_real_seqno
==
batman_packet
->
seqno
)
&&
(
orig_node
->
last_ttl
-
3
<=
batman_packet
->
ttl
))))
update_orig
(
bat_priv
,
orig_node
,
ethhdr
,
batman_packet
,
if_incoming
,
hna_buff
,
hna
_buff_len
,
is_duplicate
);
if_incoming
,
tt_buff
,
tt
_buff_len
,
is_duplicate
);
/* is single hop (direct) neighbor */
if
(
is_single_hop_neigh
)
{
/* mark direct link on incoming interface */
schedule_forward_packet
(
orig_node
,
ethhdr
,
batman_packet
,
1
,
hna
_buff_len
,
if_incoming
);
1
,
tt
_buff_len
,
if_incoming
);
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Forwarding packet: "
"rebroadcast neighbor packet with direct link flag
\n
"
);
...
...
@@ -848,7 +824,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
bat_dbg
(
DBG_BATMAN
,
bat_priv
,
"Forwarding packet: rebroadcast originator packet
\n
"
);
schedule_forward_packet
(
orig_node
,
ethhdr
,
batman_packet
,
0
,
hna
_buff_len
,
if_incoming
);
0
,
tt
_buff_len
,
if_incoming
);
out_neigh:
if
((
orig_neigh_node
)
&&
(
!
is_single_hop_neigh
))
...
...
@@ -1213,7 +1189,7 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
router
=
orig_node_get_router
(
orig_node
);
if
(
!
router
)
return
NULL
;
goto
err
;
/* without bonding, the first node should
* always choose the default router. */
...
...
@@ -1222,10 +1198,8 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
rcu_read_lock
();
/* select default router to output */
router_orig
=
router
->
orig_node
;
if
(
!
router_orig
)
{
rcu_read_unlock
();
return
NULL
;
}
if
(
!
router_orig
)
goto
err_unlock
;
if
((
!
recv_if
)
&&
(
!
bonding_enabled
))
goto
return_router
;
...
...
@@ -1268,6 +1242,12 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
return_router:
rcu_read_unlock
();
return
router
;
err_unlock:
rcu_read_unlock
();
err:
if
(
router
)
neigh_node_free_ref
(
router
);
return
NULL
;
}
static
int
check_unicast_packet
(
struct
sk_buff
*
skb
,
int
hdr_size
)
...
...
net/batman-adv/routing.h
View file @
02e73c1e
...
...
@@ -25,11 +25,11 @@
void
slide_own_bcast_window
(
struct
hard_iface
*
hard_iface
);
void
receive_bat_packet
(
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
unsigned
char
*
hna_buff
,
int
hna
_buff_len
,
unsigned
char
*
tt_buff
,
int
tt
_buff_len
,
struct
hard_iface
*
if_incoming
);
void
update_routes
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
struct
neigh_node
*
neigh_node
,
unsigned
char
*
hna
_buff
,
int
hna
_buff_len
);
struct
neigh_node
*
neigh_node
,
unsigned
char
*
tt
_buff
,
int
tt
_buff_len
);
int
route_unicast_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
);
int
recv_icmp_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
);
int
recv_unicast_packet
(
struct
sk_buff
*
skb
,
struct
hard_iface
*
recv_if
);
...
...
net/batman-adv/send.c
View file @
02e73c1e
...
...
@@ -121,7 +121,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
/* adjust all flags and log packets */
while
(
aggregated_packet
(
buff_pos
,
forw_packet
->
packet_len
,
batman_packet
->
num_
hna
))
{
batman_packet
->
num_
tt
))
{
/* we might have aggregated direct link packets with an
* ordinary base packet */
...
...
@@ -146,7 +146,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
hard_iface
->
net_dev
->
dev_addr
);
buff_pos
+=
sizeof
(
struct
batman_packet
)
+
(
batman_packet
->
num_
hna
*
ETH_ALEN
);
(
batman_packet
->
num_
tt
*
ETH_ALEN
);
packet_num
++
;
batman_packet
=
(
struct
batman_packet
*
)
(
forw_packet
->
skb
->
data
+
buff_pos
);
...
...
@@ -222,7 +222,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
struct
batman_packet
*
batman_packet
;
new_len
=
sizeof
(
struct
batman_packet
)
+
(
bat_priv
->
num_local_
hna
*
ETH_ALEN
);
(
bat_priv
->
num_local_
tt
*
ETH_ALEN
);
new_buff
=
kmalloc
(
new_len
,
GFP_ATOMIC
);
/* keep old buffer if kmalloc should fail */
...
...
@@ -231,7 +231,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
sizeof
(
struct
batman_packet
));
batman_packet
=
(
struct
batman_packet
*
)
new_buff
;
batman_packet
->
num_
hna
=
hna
_local_fill_buffer
(
bat_priv
,
batman_packet
->
num_
tt
=
tt
_local_fill_buffer
(
bat_priv
,
new_buff
+
sizeof
(
struct
batman_packet
),
new_len
-
sizeof
(
struct
batman_packet
));
...
...
@@ -266,8 +266,8 @@ void schedule_own_packet(struct hard_iface *hard_iface)
if
(
hard_iface
->
if_status
==
IF_TO_BE_ACTIVATED
)
hard_iface
->
if_status
=
IF_ACTIVE
;
/* if local
hna
has changed and interface is a primary interface */
if
((
atomic_read
(
&
bat_priv
->
hna
_local_changed
))
&&
/* if local
tt
has changed and interface is a primary interface */
if
((
atomic_read
(
&
bat_priv
->
tt
_local_changed
))
&&
(
hard_iface
==
primary_if
))
rebuild_batman_packet
(
bat_priv
,
hard_iface
);
...
...
@@ -309,7 +309,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
void
schedule_forward_packet
(
struct
orig_node
*
orig_node
,
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
uint8_t
directlink
,
int
hna
_buff_len
,
uint8_t
directlink
,
int
tt
_buff_len
,
struct
hard_iface
*
if_incoming
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
if_incoming
->
soft_iface
);
...
...
@@ -369,7 +369,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
send_time
=
forward_send_time
();
add_bat_packet_to_list
(
bat_priv
,
(
unsigned
char
*
)
batman_packet
,
sizeof
(
struct
batman_packet
)
+
hna
_buff_len
,
sizeof
(
struct
batman_packet
)
+
tt
_buff_len
,
if_incoming
,
0
,
send_time
);
}
...
...
net/batman-adv/send.h
View file @
02e73c1e
...
...
@@ -29,7 +29,7 @@ void schedule_own_packet(struct hard_iface *hard_iface);
void
schedule_forward_packet
(
struct
orig_node
*
orig_node
,
struct
ethhdr
*
ethhdr
,
struct
batman_packet
*
batman_packet
,
uint8_t
directlink
,
int
hna
_buff_len
,
uint8_t
directlink
,
int
tt
_buff_len
,
struct
hard_iface
*
if_outgoing
);
int
add_bcast_packet_to_list
(
struct
bat_priv
*
bat_priv
,
struct
sk_buff
*
skb
);
void
send_outstanding_bat_packet
(
struct
work_struct
*
work
);
...
...
net/batman-adv/soft-interface.c
View file @
02e73c1e
...
...
@@ -86,135 +86,251 @@ static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
call_rcu
(
&
softif_neigh
->
rcu
,
softif_neigh_free_rcu
);
}
static
struct
softif_neigh
*
softif_neigh_get_selected
(
struct
bat_priv
*
bat_priv
)
static
void
softif_neigh_vid_free_rcu
(
struct
rcu_head
*
rcu
)
{
struct
softif_neigh
*
neigh
;
rcu_read_lock
();
neigh
=
rcu_dereference
(
bat_priv
->
softif_neigh
);
if
(
neigh
&&
!
atomic_inc_not_zero
(
&
neigh
->
refcount
))
neigh
=
NULL
;
rcu_read_unlock
();
return
neigh
;
}
struct
softif_neigh_vid
*
softif_neigh_vid
;
struct
softif_neigh
*
softif_neigh
;
struct
hlist_node
*
node
,
*
node_tmp
;
struct
bat_priv
*
bat_priv
;
static
void
softif_neigh_select
(
struct
bat_priv
*
bat_priv
,
struct
softif_neigh
*
new_neigh
)
{
struct
softif_neigh
*
curr_neigh
;
softif_neigh_vid
=
container_of
(
rcu
,
struct
softif_neigh_vid
,
rcu
);
bat_priv
=
softif_neigh_vid
->
bat_priv
;
spin_lock_bh
(
&
bat_priv
->
softif_neigh_lock
);
if
(
new_neigh
&&
!
atomic_inc_not_zero
(
&
new_neigh
->
refcount
))
new_neigh
=
NULL
;
curr_neigh
=
bat_priv
->
softif_neigh
;
rcu_assign_pointer
(
bat_priv
->
softif_neigh
,
new_neigh
);
if
(
curr_neigh
)
softif_neigh_free_ref
(
curr_neigh
);
hlist_for_each_entry_safe
(
softif_neigh
,
node
,
node_tmp
,
&
softif_neigh_vid
->
softif_neigh_list
,
list
)
{
hlist_del_rcu
(
&
softif_neigh
->
list
);
softif_neigh_free_ref
(
softif_neigh
);
}
spin_unlock_bh
(
&
bat_priv
->
softif_neigh_lock
);
kfree
(
softif_neigh_vid
);
}
static
void
softif_neigh_
deselect
(
struct
bat_priv
*
bat_priv
)
static
void
softif_neigh_
vid_free_ref
(
struct
softif_neigh_vid
*
softif_neigh_vid
)
{
softif_neigh_select
(
bat_priv
,
NULL
);
if
(
atomic_dec_and_test
(
&
softif_neigh_vid
->
refcount
))
call_rcu
(
&
softif_neigh_vid
->
rcu
,
softif_neigh_vid_free_rcu
);
}
void
softif_neigh_purge
(
struct
bat_priv
*
bat_priv
)
static
struct
softif_neigh_vid
*
softif_neigh_vid_get
(
struct
bat_priv
*
bat_priv
,
short
vid
)
{
struct
softif_neigh
*
softif_neigh
,
*
curr_softif_neigh
;
struct
hlist_node
*
node
,
*
node_tmp
;
char
do_deselect
=
0
;
curr_softif_neigh
=
softif_neigh_get_selected
(
bat_priv
);
spin_lock_bh
(
&
bat_priv
->
softif_neigh_lock
);
struct
softif_neigh_vid
*
softif_neigh_vid
;
struct
hlist_node
*
node
;
hlist_for_each_entry_safe
(
softif_neigh
,
node
,
node_tmp
,
&
bat_priv
->
softif_neigh_list
,
list
)
{
rcu_read_lock
();
hlist_for_each_entry_rcu
(
softif_neigh_vid
,
node
,
&
bat_priv
->
softif_neigh_vids
,
list
)
{
if
(
softif_neigh_vid
->
vid
!=
vid
)
continue
;
if
((
!
time_after
(
jiffies
,
softif_neigh
->
last_seen
+
msecs_to_jiffies
(
SOFTIF_NEIGH_TIMEOUT
)))
&&
(
atomic_read
(
&
bat_priv
->
mesh_state
)
==
MESH_ACTIVE
))
if
(
!
atomic_inc_not_zero
(
&
softif_neigh_vid
->
refcount
))
continue
;
if
(
curr_softif_neigh
==
softif_neigh
)
{
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Current mesh exit point '%pM' vanished "
"(vid: %d).
\n
"
,
softif_neigh
->
addr
,
softif_neigh
->
vid
);
do_deselect
=
1
;
goto
out
;
}
hlist_del_rcu
(
&
softif_neigh
->
list
);
softif_neigh_free_ref
(
softif_neigh
);
}
softif_neigh_vid
=
kzalloc
(
sizeof
(
struct
softif_neigh_vid
),
GFP_ATOMIC
);
if
(
!
softif_neigh_vid
)
goto
out
;
spin_unlock_bh
(
&
bat_priv
->
softif_neigh_lock
);
softif_neigh_vid
->
vid
=
vid
;
softif_neigh_vid
->
bat_priv
=
bat_priv
;
/* soft_neigh_deselect() needs to acquire the softif_neigh_lock */
if
(
do_deselect
)
softif_neigh_deselect
(
bat_priv
);
/* initialize with 2 - caller decrements counter by one */
atomic_set
(
&
softif_neigh_vid
->
refcount
,
2
);
INIT_HLIST_HEAD
(
&
softif_neigh_vid
->
softif_neigh_list
);
INIT_HLIST_NODE
(
&
softif_neigh_vid
->
list
);
spin_lock_bh
(
&
bat_priv
->
softif_neigh_vid_lock
);
hlist_add_head_rcu
(
&
softif_neigh_vid
->
list
,
&
bat_priv
->
softif_neigh_vids
);
spin_unlock_bh
(
&
bat_priv
->
softif_neigh_vid_lock
);
if
(
curr_softif_neigh
)
softif_neigh_free_ref
(
curr_softif_neigh
);
out:
rcu_read_unlock
();
return
softif_neigh_vid
;
}
static
struct
softif_neigh
*
softif_neigh_get
(
struct
bat_priv
*
bat_priv
,
uint8_t
*
addr
,
short
vid
)
{
struct
softif_neigh
*
softif_neigh
;
struct
softif_neigh_vid
*
softif_neigh_vid
;
struct
softif_neigh
*
softif_neigh
=
NULL
;
struct
hlist_node
*
node
;
softif_neigh_vid
=
softif_neigh_vid_get
(
bat_priv
,
vid
);
if
(
!
softif_neigh_vid
)
goto
out
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
softif_neigh
,
node
,
&
bat_priv
->
softif_neigh_list
,
list
)
{
&
softif_neigh_vid
->
softif_neigh_list
,
list
)
{
if
(
!
compare_eth
(
softif_neigh
->
addr
,
addr
))
continue
;
if
(
softif_neigh
->
vid
!=
vid
)
continue
;
if
(
!
atomic_inc_not_zero
(
&
softif_neigh
->
refcount
))
continue
;
softif_neigh
->
last_seen
=
jiffies
;
goto
out
;
goto
unlock
;
}
softif_neigh
=
kzalloc
(
sizeof
(
struct
softif_neigh
),
GFP_ATOMIC
);
if
(
!
softif_neigh
)
goto
out
;
goto
unlock
;
memcpy
(
softif_neigh
->
addr
,
addr
,
ETH_ALEN
);
softif_neigh
->
vid
=
vid
;
softif_neigh
->
last_seen
=
jiffies
;
/* initialize with 2 - caller decrements counter by one */
atomic_set
(
&
softif_neigh
->
refcount
,
2
);
INIT_HLIST_NODE
(
&
softif_neigh
->
list
);
spin_lock_bh
(
&
bat_priv
->
softif_neigh_lock
);
hlist_add_head_rcu
(
&
softif_neigh
->
list
,
&
bat_priv
->
softif_neigh_list
);
hlist_add_head_rcu
(
&
softif_neigh
->
list
,
&
softif_neigh_vid
->
softif_neigh_list
);
spin_unlock_bh
(
&
bat_priv
->
softif_neigh_lock
);
unlock:
rcu_read_unlock
();
out:
if
(
softif_neigh_vid
)
softif_neigh_vid_free_ref
(
softif_neigh_vid
);
return
softif_neigh
;
}
static
struct
softif_neigh
*
softif_neigh_get_selected
(
struct
softif_neigh_vid
*
softif_neigh_vid
)
{
struct
softif_neigh
*
softif_neigh
;
rcu_read_lock
();
softif_neigh
=
rcu_dereference
(
softif_neigh_vid
->
softif_neigh
);
if
(
softif_neigh
&&
!
atomic_inc_not_zero
(
&
softif_neigh
->
refcount
))
softif_neigh
=
NULL
;
rcu_read_unlock
();
return
softif_neigh
;
}
static
struct
softif_neigh
*
softif_neigh_vid_get_selected
(
struct
bat_priv
*
bat_priv
,
short
vid
)
{
struct
softif_neigh_vid
*
softif_neigh_vid
;
struct
softif_neigh
*
softif_neigh
=
NULL
;
softif_neigh_vid
=
softif_neigh_vid_get
(
bat_priv
,
vid
);
if
(
!
softif_neigh_vid
)
goto
out
;
softif_neigh
=
softif_neigh_get_selected
(
softif_neigh_vid
);
out:
if
(
softif_neigh_vid
)
softif_neigh_vid_free_ref
(
softif_neigh_vid
);
return
softif_neigh
;
}
static
void
softif_neigh_vid_select
(
struct
bat_priv
*
bat_priv
,
struct
softif_neigh
*
new_neigh
,
short
vid
)
{
struct
softif_neigh_vid
*
softif_neigh_vid
;
struct
softif_neigh
*
curr_neigh
;
softif_neigh_vid
=
softif_neigh_vid_get
(
bat_priv
,
vid
);
if
(
!
softif_neigh_vid
)
goto
out
;
spin_lock_bh
(
&
bat_priv
->
softif_neigh_lock
);
if
(
new_neigh
&&
!
atomic_inc_not_zero
(
&
new_neigh
->
refcount
))
new_neigh
=
NULL
;
curr_neigh
=
softif_neigh_vid
->
softif_neigh
;
rcu_assign_pointer
(
softif_neigh_vid
->
softif_neigh
,
new_neigh
);
if
((
curr_neigh
)
&&
(
!
new_neigh
))
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Removing mesh exit point on vid: %d (prev: %pM).
\n
"
,
vid
,
curr_neigh
->
addr
);
else
if
((
curr_neigh
)
&&
(
new_neigh
))
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Changing mesh exit point on vid: %d from %pM "
"to %pM.
\n
"
,
vid
,
curr_neigh
->
addr
,
new_neigh
->
addr
);
else
if
((
!
curr_neigh
)
&&
(
new_neigh
))
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Setting mesh exit point on vid: %d to %pM.
\n
"
,
vid
,
new_neigh
->
addr
);
if
(
curr_neigh
)
softif_neigh_free_ref
(
curr_neigh
);
spin_unlock_bh
(
&
bat_priv
->
softif_neigh_lock
);
out:
if
(
softif_neigh_vid
)
softif_neigh_vid_free_ref
(
softif_neigh_vid
);
}
static
void
softif_neigh_vid_deselect
(
struct
bat_priv
*
bat_priv
,
struct
softif_neigh_vid
*
softif_neigh_vid
)
{
struct
softif_neigh
*
curr_neigh
;
struct
softif_neigh
*
softif_neigh
=
NULL
,
*
softif_neigh_tmp
;
struct
hard_iface
*
primary_if
=
NULL
;
struct
hlist_node
*
node
;
primary_if
=
primary_if_get_selected
(
bat_priv
);
if
(
!
primary_if
)
goto
out
;
/* find new softif_neigh immediately to avoid temporary loops */
rcu_read_lock
();
curr_neigh
=
rcu_dereference
(
softif_neigh_vid
->
softif_neigh
);
hlist_for_each_entry_rcu
(
softif_neigh_tmp
,
node
,
&
softif_neigh_vid
->
softif_neigh_list
,
list
)
{
if
(
softif_neigh_tmp
==
curr_neigh
)
continue
;
/* we got a neighbor but its mac is 'bigger' than ours */
if
(
memcmp
(
primary_if
->
net_dev
->
dev_addr
,
softif_neigh_tmp
->
addr
,
ETH_ALEN
)
<
0
)
continue
;
if
(
!
atomic_inc_not_zero
(
&
softif_neigh_tmp
->
refcount
))
continue
;
softif_neigh
=
softif_neigh_tmp
;
goto
unlock
;
}
unlock:
rcu_read_unlock
();
out:
softif_neigh_vid_select
(
bat_priv
,
softif_neigh
,
softif_neigh_vid
->
vid
);
if
(
primary_if
)
hardif_free_ref
(
primary_if
);
if
(
softif_neigh
)
softif_neigh_free_ref
(
softif_neigh
);
}
int
softif_neigh_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
)
{
struct
net_device
*
net_dev
=
(
struct
net_device
*
)
seq
->
private
;
struct
bat_priv
*
bat_priv
=
netdev_priv
(
net_dev
);
struct
softif_neigh_vid
*
softif_neigh_vid
;
struct
softif_neigh
*
softif_neigh
;
struct
hard_iface
*
primary_if
;
struct
hlist_node
*
node
;
struct
hlist_node
*
node
,
*
node_tmp
;
struct
softif_neigh
*
curr_softif_neigh
;
int
ret
=
0
;
int
ret
=
0
,
last_seen_secs
,
last_seen_msecs
;
primary_if
=
primary_if_get_selected
(
bat_priv
);
if
(
!
primary_if
)
{
...
...
@@ -233,24 +349,104 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
seq_printf
(
seq
,
"Softif neighbor list (%s)
\n
"
,
net_dev
->
name
);
curr_softif_neigh
=
softif_neigh_get_selected
(
bat_priv
);
rcu_read_lock
();
hlist_for_each_entry_rcu
(
softif_neigh
,
node
,
&
bat_priv
->
softif_neigh_list
,
list
)
seq_printf
(
seq
,
"%s %pM (vid: %d)
\n
"
,
hlist_for_each_entry_rcu
(
softif_neigh_vid
,
node
,
&
bat_priv
->
softif_neigh_vids
,
list
)
{
seq_printf
(
seq
,
" %-15s %s on vid: %d
\n
"
,
"Originator"
,
"last-seen"
,
softif_neigh_vid
->
vid
);
curr_softif_neigh
=
softif_neigh_get_selected
(
softif_neigh_vid
);
hlist_for_each_entry_rcu
(
softif_neigh
,
node_tmp
,
&
softif_neigh_vid
->
softif_neigh_list
,
list
)
{
last_seen_secs
=
jiffies_to_msecs
(
jiffies
-
softif_neigh
->
last_seen
)
/
1000
;
last_seen_msecs
=
jiffies_to_msecs
(
jiffies
-
softif_neigh
->
last_seen
)
%
1000
;
seq_printf
(
seq
,
"%s %pM %3i.%03is
\n
"
,
curr_softif_neigh
==
softif_neigh
?
"=>"
:
" "
,
softif_neigh
->
addr
,
softif_neigh
->
vid
);
rcu_read_unlock
();
last_seen_secs
,
last_seen_msecs
);
}
if
(
curr_softif_neigh
)
softif_neigh_free_ref
(
curr_softif_neigh
);
seq_printf
(
seq
,
"
\n
"
);
}
rcu_read_unlock
();
out:
if
(
primary_if
)
hardif_free_ref
(
primary_if
);
return
ret
;
}
void
softif_neigh_purge
(
struct
bat_priv
*
bat_priv
)
{
struct
softif_neigh
*
softif_neigh
,
*
curr_softif_neigh
;
struct
softif_neigh_vid
*
softif_neigh_vid
;
struct
hlist_node
*
node
,
*
node_tmp
,
*
node_tmp2
;
char
do_deselect
;
rcu_read_lock
();
hlist_for_each_entry_rcu
(
softif_neigh_vid
,
node
,
&
bat_priv
->
softif_neigh_vids
,
list
)
{
if
(
!
atomic_inc_not_zero
(
&
softif_neigh_vid
->
refcount
))
continue
;
curr_softif_neigh
=
softif_neigh_get_selected
(
softif_neigh_vid
);
do_deselect
=
0
;
spin_lock_bh
(
&
bat_priv
->
softif_neigh_lock
);
hlist_for_each_entry_safe
(
softif_neigh
,
node_tmp
,
node_tmp2
,
&
softif_neigh_vid
->
softif_neigh_list
,
list
)
{
if
((
!
time_after
(
jiffies
,
softif_neigh
->
last_seen
+
msecs_to_jiffies
(
SOFTIF_NEIGH_TIMEOUT
)))
&&
(
atomic_read
(
&
bat_priv
->
mesh_state
)
==
MESH_ACTIVE
))
continue
;
if
(
curr_softif_neigh
==
softif_neigh
)
{
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Current mesh exit point on vid: %d "
"'%pM' vanished.
\n
"
,
softif_neigh_vid
->
vid
,
softif_neigh
->
addr
);
do_deselect
=
1
;
}
hlist_del_rcu
(
&
softif_neigh
->
list
);
softif_neigh_free_ref
(
softif_neigh
);
}
spin_unlock_bh
(
&
bat_priv
->
softif_neigh_lock
);
/* soft_neigh_vid_deselect() needs to acquire the
* softif_neigh_lock */
if
(
do_deselect
)
softif_neigh_vid_deselect
(
bat_priv
,
softif_neigh_vid
);
if
(
curr_softif_neigh
)
softif_neigh_free_ref
(
curr_softif_neigh
);
softif_neigh_vid_free_ref
(
softif_neigh_vid
);
}
rcu_read_unlock
();
spin_lock_bh
(
&
bat_priv
->
softif_neigh_vid_lock
);
hlist_for_each_entry_safe
(
softif_neigh_vid
,
node
,
node_tmp
,
&
bat_priv
->
softif_neigh_vids
,
list
)
{
if
(
!
hlist_empty
(
&
softif_neigh_vid
->
softif_neigh_list
))
continue
;
hlist_del_rcu
(
&
softif_neigh_vid
->
list
);
softif_neigh_vid_free_ref
(
softif_neigh_vid
);
}
spin_unlock_bh
(
&
bat_priv
->
softif_neigh_vid_lock
);
}
static
void
softif_batman_recv
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
,
short
vid
)
{
...
...
@@ -283,10 +479,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
if
(
!
softif_neigh
)
goto
out
;
curr_softif_neigh
=
softif_neigh_get_selected
(
bat_priv
);
if
(
!
curr_softif_neigh
)
goto
out
;
curr_softif_neigh
=
softif_neigh_vid_get_selected
(
bat_priv
,
vid
);
if
(
curr_softif_neigh
==
softif_neigh
)
goto
out
;
...
...
@@ -299,33 +492,16 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
softif_neigh
->
addr
,
ETH_ALEN
)
<
0
)
goto
out
;
/* switch to new 'smallest neighbor' */
if
((
curr_softif_neigh
)
&&
(
memcmp
(
softif_neigh
->
addr
,
curr_softif_neigh
->
addr
,
ETH_ALEN
)
<
0
))
{
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Changing mesh exit point from %pM (vid: %d) "
"to %pM (vid: %d).
\n
"
,
curr_softif_neigh
->
addr
,
curr_softif_neigh
->
vid
,
softif_neigh
->
addr
,
softif_neigh
->
vid
);
softif_neigh_select
(
bat_priv
,
softif_neigh
);
goto
out
;
}
/* close own batX device and use softif_neigh as exit node */
if
((
!
curr_softif_neigh
)
&&
(
memcmp
(
softif_neigh
->
addr
,
primary_if
->
net_dev
->
dev_addr
,
ETH_ALEN
)
<
0
))
{
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Setting mesh exit point to %pM (vid: %d).
\n
"
,
softif_neigh
->
addr
,
softif_neigh
->
vid
);
softif_neigh_select
(
bat_priv
,
softif_neigh
);
if
(
!
curr_softif_neigh
)
{
softif_neigh_vid_select
(
bat_priv
,
softif_neigh
,
vid
);
goto
out
;
}
/* switch to new 'smallest neighbor' */
if
(
memcmp
(
softif_neigh
->
addr
,
curr_softif_neigh
->
addr
,
ETH_ALEN
)
<
0
)
softif_neigh_vid_select
(
bat_priv
,
softif_neigh
,
vid
);
out:
kfree_skb
(
skb
);
if
(
softif_neigh
)
...
...
@@ -363,11 +539,11 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
if
(
!
is_valid_ether_addr
(
addr
->
sa_data
))
return
-
EADDRNOTAVAIL
;
/* only modify
hna-
table if it has been initialised before */
/* only modify
trans
table if it has been initialised before */
if
(
atomic_read
(
&
bat_priv
->
mesh_state
)
==
MESH_ACTIVE
)
{
hna
_local_remove
(
bat_priv
,
dev
->
dev_addr
,
tt
_local_remove
(
bat_priv
,
dev
->
dev_addr
,
"mac address changed"
);
hna
_local_add
(
dev
,
addr
->
sa_data
);
tt
_local_add
(
dev
,
addr
->
sa_data
);
}
memcpy
(
dev
->
dev_addr
,
addr
->
sa_data
,
ETH_ALEN
);
...
...
@@ -420,12 +596,12 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
* if we have a another chosen mesh exit node in range
* it will transport the packets to the mesh
*/
curr_softif_neigh
=
softif_neigh_
get_selected
(
bat_priv
);
if
(
(
curr_softif_neigh
)
&&
(
curr_softif_neigh
->
vid
==
vid
)
)
curr_softif_neigh
=
softif_neigh_
vid_get_selected
(
bat_priv
,
vid
);
if
(
curr_softif_neigh
)
goto
dropped
;
/* TODO: check this for locks */
hna
_local_add
(
soft_iface
,
ethhdr
->
h_source
);
tt
_local_add
(
soft_iface
,
ethhdr
->
h_source
);
if
(
is_multicast_ether_addr
(
ethhdr
->
h_dest
))
{
ret
=
gw_is_target
(
bat_priv
,
skb
);
...
...
@@ -529,8 +705,8 @@ void interface_rx(struct net_device *soft_iface,
* if we have a another chosen mesh exit node in range
* it will transport the packets to the non-mesh network
*/
curr_softif_neigh
=
softif_neigh_
get_selected
(
bat_priv
);
if
(
curr_softif_neigh
&&
(
curr_softif_neigh
->
vid
==
vid
)
)
{
curr_softif_neigh
=
softif_neigh_
vid_get_selected
(
bat_priv
,
vid
);
if
(
curr_softif_neigh
)
{
skb_push
(
skb
,
hdr_size
);
unicast_packet
=
(
struct
unicast_packet
*
)
skb
->
data
;
...
...
@@ -613,8 +789,8 @@ static void interface_setup(struct net_device *dev)
* have not been initialized yet
*/
dev
->
mtu
=
ETH_DATA_LEN
;
dev
->
hard_header_len
=
BAT_HEADER_LEN
;
/* reserve more space in the
* skbuff for our header */
/* reserve more space in the skbuff for our header */
dev
->
hard_header_len
=
BAT_HEADER_LEN
;
/* generate random address */
random_ether_addr
(
dev_addr
);
...
...
@@ -639,7 +815,7 @@ struct net_device *softif_create(char *name)
goto
out
;
}
ret
=
register_netdev
(
soft_iface
);
ret
=
register_netdev
ice
(
soft_iface
);
if
(
ret
<
0
)
{
pr_err
(
"Unable to register the batman interface '%s': %i
\n
"
,
name
,
ret
);
...
...
@@ -663,11 +839,10 @@ struct net_device *softif_create(char *name)
atomic_set
(
&
bat_priv
->
mesh_state
,
MESH_INACTIVE
);
atomic_set
(
&
bat_priv
->
bcast_seqno
,
1
);
atomic_set
(
&
bat_priv
->
hna
_local_changed
,
0
);
atomic_set
(
&
bat_priv
->
tt
_local_changed
,
0
);
bat_priv
->
primary_if
=
NULL
;
bat_priv
->
num_ifaces
=
0
;
bat_priv
->
softif_neigh
=
NULL
;
ret
=
sysfs_add_meshif
(
soft_iface
);
if
(
ret
<
0
)
...
...
net/batman-adv/translation-table.c
View file @
02e73c1e
...
...
@@ -26,40 +26,40 @@
#include "hash.h"
#include "originator.h"
static
void
hna
_local_purge
(
struct
work_struct
*
work
);
static
void
_
hna
_global_del_orig
(
struct
bat_priv
*
bat_priv
,
struct
hna_global_entry
*
hna
_global_entry
,
static
void
tt
_local_purge
(
struct
work_struct
*
work
);
static
void
_
tt
_global_del_orig
(
struct
bat_priv
*
bat_priv
,
struct
tt_global_entry
*
tt
_global_entry
,
char
*
message
);
/* returns 1 if they are the same mac addr */
static
int
compare_l
hna
(
struct
hlist_node
*
node
,
void
*
data2
)
static
int
compare_l
tt
(
struct
hlist_node
*
node
,
void
*
data2
)
{
void
*
data1
=
container_of
(
node
,
struct
hna
_local_entry
,
hash_entry
);
void
*
data1
=
container_of
(
node
,
struct
tt
_local_entry
,
hash_entry
);
return
(
memcmp
(
data1
,
data2
,
ETH_ALEN
)
==
0
?
1
:
0
);
}
/* returns 1 if they are the same mac addr */
static
int
compare_g
hna
(
struct
hlist_node
*
node
,
void
*
data2
)
static
int
compare_g
tt
(
struct
hlist_node
*
node
,
void
*
data2
)
{
void
*
data1
=
container_of
(
node
,
struct
hna
_global_entry
,
hash_entry
);
void
*
data1
=
container_of
(
node
,
struct
tt
_global_entry
,
hash_entry
);
return
(
memcmp
(
data1
,
data2
,
ETH_ALEN
)
==
0
?
1
:
0
);
}
static
void
hna
_local_start_timer
(
struct
bat_priv
*
bat_priv
)
static
void
tt
_local_start_timer
(
struct
bat_priv
*
bat_priv
)
{
INIT_DELAYED_WORK
(
&
bat_priv
->
hna_work
,
hna
_local_purge
);
queue_delayed_work
(
bat_event_workqueue
,
&
bat_priv
->
hna
_work
,
10
*
HZ
);
INIT_DELAYED_WORK
(
&
bat_priv
->
tt_work
,
tt
_local_purge
);
queue_delayed_work
(
bat_event_workqueue
,
&
bat_priv
->
tt
_work
,
10
*
HZ
);
}
static
struct
hna_local_entry
*
hna
_local_hash_find
(
struct
bat_priv
*
bat_priv
,
static
struct
tt_local_entry
*
tt
_local_hash_find
(
struct
bat_priv
*
bat_priv
,
void
*
data
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
hna
_local_hash
;
struct
hashtable_t
*
hash
=
bat_priv
->
tt
_local_hash
;
struct
hlist_head
*
head
;
struct
hlist_node
*
node
;
struct
hna_local_entry
*
hna_local_entry
,
*
hna
_local_entry_tmp
=
NULL
;
struct
tt_local_entry
*
tt_local_entry
,
*
tt
_local_entry_tmp
=
NULL
;
int
index
;
if
(
!
hash
)
...
...
@@ -69,26 +69,26 @@ static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
head
=
&
hash
->
table
[
index
];
rcu_read_lock
();
hlist_for_each_entry_rcu
(
hna
_local_entry
,
node
,
head
,
hash_entry
)
{
if
(
!
compare_eth
(
hna
_local_entry
,
data
))
hlist_for_each_entry_rcu
(
tt
_local_entry
,
node
,
head
,
hash_entry
)
{
if
(
!
compare_eth
(
tt
_local_entry
,
data
))
continue
;
hna_local_entry_tmp
=
hna
_local_entry
;
tt_local_entry_tmp
=
tt
_local_entry
;
break
;
}
rcu_read_unlock
();
return
hna
_local_entry_tmp
;
return
tt
_local_entry_tmp
;
}
static
struct
hna_global_entry
*
hna
_global_hash_find
(
struct
bat_priv
*
bat_priv
,
static
struct
tt_global_entry
*
tt
_global_hash_find
(
struct
bat_priv
*
bat_priv
,
void
*
data
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
hna
_global_hash
;
struct
hashtable_t
*
hash
=
bat_priv
->
tt
_global_hash
;
struct
hlist_head
*
head
;
struct
hlist_node
*
node
;
struct
hna_global_entry
*
hna
_global_entry
;
struct
hna_global_entry
*
hna
_global_entry_tmp
=
NULL
;
struct
tt_global_entry
*
tt
_global_entry
;
struct
tt_global_entry
*
tt
_global_entry_tmp
=
NULL
;
int
index
;
if
(
!
hash
)
...
...
@@ -98,125 +98,125 @@ static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
head
=
&
hash
->
table
[
index
];
rcu_read_lock
();
hlist_for_each_entry_rcu
(
hna
_global_entry
,
node
,
head
,
hash_entry
)
{
if
(
!
compare_eth
(
hna
_global_entry
,
data
))
hlist_for_each_entry_rcu
(
tt
_global_entry
,
node
,
head
,
hash_entry
)
{
if
(
!
compare_eth
(
tt
_global_entry
,
data
))
continue
;
hna_global_entry_tmp
=
hna
_global_entry
;
tt_global_entry_tmp
=
tt
_global_entry
;
break
;
}
rcu_read_unlock
();
return
hna
_global_entry_tmp
;
return
tt
_global_entry_tmp
;
}
int
hna
_local_init
(
struct
bat_priv
*
bat_priv
)
int
tt
_local_init
(
struct
bat_priv
*
bat_priv
)
{
if
(
bat_priv
->
hna
_local_hash
)
if
(
bat_priv
->
tt
_local_hash
)
return
1
;
bat_priv
->
hna
_local_hash
=
hash_new
(
1024
);
bat_priv
->
tt
_local_hash
=
hash_new
(
1024
);
if
(
!
bat_priv
->
hna
_local_hash
)
if
(
!
bat_priv
->
tt
_local_hash
)
return
0
;
atomic_set
(
&
bat_priv
->
hna
_local_changed
,
0
);
hna
_local_start_timer
(
bat_priv
);
atomic_set
(
&
bat_priv
->
tt
_local_changed
,
0
);
tt
_local_start_timer
(
bat_priv
);
return
1
;
}
void
hna
_local_add
(
struct
net_device
*
soft_iface
,
uint8_t
*
addr
)
void
tt
_local_add
(
struct
net_device
*
soft_iface
,
uint8_t
*
addr
)
{
struct
bat_priv
*
bat_priv
=
netdev_priv
(
soft_iface
);
struct
hna_local_entry
*
hna
_local_entry
;
struct
hna_global_entry
*
hna
_global_entry
;
struct
tt_local_entry
*
tt
_local_entry
;
struct
tt_global_entry
*
tt
_global_entry
;
int
required_bytes
;
spin_lock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
hna_local_entry
=
hna
_local_hash_find
(
bat_priv
,
addr
);
spin_unlock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
tt_local_entry
=
tt
_local_hash_find
(
bat_priv
,
addr
);
spin_unlock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
if
(
hna
_local_entry
)
{
hna
_local_entry
->
last_seen
=
jiffies
;
if
(
tt
_local_entry
)
{
tt
_local_entry
->
last_seen
=
jiffies
;
return
;
}
/* only announce as many hosts as possible in the batman-packet and
space in batman_packet->num_
hna
That also should give a limit to
space in batman_packet->num_
tt
That also should give a limit to
MAC-flooding. */
required_bytes
=
(
bat_priv
->
num_local_
hna
+
1
)
*
ETH_ALEN
;
required_bytes
=
(
bat_priv
->
num_local_
tt
+
1
)
*
ETH_ALEN
;
required_bytes
+=
BAT_PACKET_LEN
;
if
((
required_bytes
>
ETH_DATA_LEN
)
||
(
atomic_read
(
&
bat_priv
->
aggregated_ogms
)
&&
required_bytes
>
MAX_AGGREGATION_BYTES
)
||
(
bat_priv
->
num_local_
hna
+
1
>
255
))
{
(
bat_priv
->
num_local_
tt
+
1
>
255
))
{
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Can't add new local
hna
entry (%pM): "
"number of local
hna
entries exceeds packet size
\n
"
,
"Can't add new local
tt
entry (%pM): "
"number of local
tt
entries exceeds packet size
\n
"
,
addr
);
return
;
}
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Creating new local
hna
entry: %pM
\n
"
,
addr
);
"Creating new local
tt
entry: %pM
\n
"
,
addr
);
hna_local_entry
=
kmalloc
(
sizeof
(
struct
hna
_local_entry
),
GFP_ATOMIC
);
if
(
!
hna
_local_entry
)
tt_local_entry
=
kmalloc
(
sizeof
(
struct
tt
_local_entry
),
GFP_ATOMIC
);
if
(
!
tt
_local_entry
)
return
;
memcpy
(
hna
_local_entry
->
addr
,
addr
,
ETH_ALEN
);
hna
_local_entry
->
last_seen
=
jiffies
;
memcpy
(
tt
_local_entry
->
addr
,
addr
,
ETH_ALEN
);
tt
_local_entry
->
last_seen
=
jiffies
;
/* the batman interface mac address should never be purged */
if
(
compare_eth
(
addr
,
soft_iface
->
dev_addr
))
hna
_local_entry
->
never_purge
=
1
;
tt
_local_entry
->
never_purge
=
1
;
else
hna
_local_entry
->
never_purge
=
0
;
tt
_local_entry
->
never_purge
=
0
;
spin_lock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
hash_add
(
bat_priv
->
hna_local_hash
,
compare_lhna
,
choose_orig
,
hna_local_entry
,
&
hna
_local_entry
->
hash_entry
);
bat_priv
->
num_local_
hna
++
;
atomic_set
(
&
bat_priv
->
hna
_local_changed
,
1
);
hash_add
(
bat_priv
->
tt_local_hash
,
compare_ltt
,
choose_orig
,
tt_local_entry
,
&
tt
_local_entry
->
hash_entry
);
bat_priv
->
num_local_
tt
++
;
atomic_set
(
&
bat_priv
->
tt
_local_changed
,
1
);
spin_unlock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
/* remove address from global hash if present */
spin_lock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
hna_global_entry
=
hna
_global_hash_find
(
bat_priv
,
addr
);
tt_global_entry
=
tt
_global_hash_find
(
bat_priv
,
addr
);
if
(
hna
_global_entry
)
_
hna_global_del_orig
(
bat_priv
,
hna
_global_entry
,
"local
hna
received"
);
if
(
tt
_global_entry
)
_
tt_global_del_orig
(
bat_priv
,
tt
_global_entry
,
"local
tt
received"
);
spin_unlock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
}
int
hna
_local_fill_buffer
(
struct
bat_priv
*
bat_priv
,
int
tt
_local_fill_buffer
(
struct
bat_priv
*
bat_priv
,
unsigned
char
*
buff
,
int
buff_len
)
{
struct
hashtable_t
*
hash
=
bat_priv
->
hna
_local_hash
;
struct
hna_local_entry
*
hna
_local_entry
;
struct
hashtable_t
*
hash
=
bat_priv
->
tt
_local_hash
;
struct
tt_local_entry
*
tt
_local_entry
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
int
i
,
count
=
0
;
spin_lock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
rcu_read_lock
();
hlist_for_each_entry_rcu
(
hna
_local_entry
,
node
,
hlist_for_each_entry_rcu
(
tt
_local_entry
,
node
,
head
,
hash_entry
)
{
if
(
buff_len
<
(
count
+
1
)
*
ETH_ALEN
)
break
;
memcpy
(
buff
+
(
count
*
ETH_ALEN
),
hna
_local_entry
->
addr
,
memcpy
(
buff
+
(
count
*
ETH_ALEN
),
tt
_local_entry
->
addr
,
ETH_ALEN
);
count
++
;
...
...
@@ -224,20 +224,20 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv,
rcu_read_unlock
();
}
/* if we did not get all new local
hna
s see you next time ;-) */
if
(
count
==
bat_priv
->
num_local_
hna
)
atomic_set
(
&
bat_priv
->
hna
_local_changed
,
0
);
/* if we did not get all new local
tt
s see you next time ;-) */
if
(
count
==
bat_priv
->
num_local_
tt
)
atomic_set
(
&
bat_priv
->
tt
_local_changed
,
0
);
spin_unlock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
return
count
;
}
int
hna
_local_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
)
int
tt
_local_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
)
{
struct
net_device
*
net_dev
=
(
struct
net_device
*
)
seq
->
private
;
struct
bat_priv
*
bat_priv
=
netdev_priv
(
net_dev
);
struct
hashtable_t
*
hash
=
bat_priv
->
hna
_local_hash
;
struct
hna_local_entry
*
hna
_local_entry
;
struct
hashtable_t
*
hash
=
bat_priv
->
tt
_local_hash
;
struct
tt_local_entry
*
tt
_local_entry
;
struct
hard_iface
*
primary_if
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
...
...
@@ -261,10 +261,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
}
seq_printf
(
seq
,
"Locally retrieved addresses (from %s) "
"announced via
HNA
:
\n
"
,
"announced via
TT
:
\n
"
,
net_dev
->
name
);
spin_lock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
buf_size
=
1
;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
...
...
@@ -279,7 +279,7 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
buff
=
kmalloc
(
buf_size
,
GFP_ATOMIC
);
if
(
!
buff
)
{
spin_unlock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
ret
=
-
ENOMEM
;
goto
out
;
}
...
...
@@ -291,15 +291,15 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
head
=
&
hash
->
table
[
i
];
rcu_read_lock
();
hlist_for_each_entry_rcu
(
hna
_local_entry
,
node
,
hlist_for_each_entry_rcu
(
tt
_local_entry
,
node
,
head
,
hash_entry
)
{
pos
+=
snprintf
(
buff
+
pos
,
22
,
" * %pM
\n
"
,
hna
_local_entry
->
addr
);
tt
_local_entry
->
addr
);
}
rcu_read_unlock
();
}
spin_unlock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
seq_printf
(
seq
,
"%s"
,
buff
);
kfree
(
buff
);
...
...
@@ -309,180 +309,180 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
return
ret
;
}
static
void
_
hna
_local_del
(
struct
hlist_node
*
node
,
void
*
arg
)
static
void
_
tt
_local_del
(
struct
hlist_node
*
node
,
void
*
arg
)
{
struct
bat_priv
*
bat_priv
=
(
struct
bat_priv
*
)
arg
;
void
*
data
=
container_of
(
node
,
struct
hna
_local_entry
,
hash_entry
);
void
*
data
=
container_of
(
node
,
struct
tt
_local_entry
,
hash_entry
);
kfree
(
data
);
bat_priv
->
num_local_
hna
--
;
atomic_set
(
&
bat_priv
->
hna
_local_changed
,
1
);
bat_priv
->
num_local_
tt
--
;
atomic_set
(
&
bat_priv
->
tt
_local_changed
,
1
);
}
static
void
hna
_local_del
(
struct
bat_priv
*
bat_priv
,
struct
hna_local_entry
*
hna
_local_entry
,
static
void
tt
_local_del
(
struct
bat_priv
*
bat_priv
,
struct
tt_local_entry
*
tt
_local_entry
,
char
*
message
)
{
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Deleting local
hna
entry (%pM): %s
\n
"
,
hna
_local_entry
->
addr
,
message
);
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Deleting local
tt
entry (%pM): %s
\n
"
,
tt
_local_entry
->
addr
,
message
);
hash_remove
(
bat_priv
->
hna_local_hash
,
compare_lhna
,
choose_orig
,
hna
_local_entry
->
addr
);
_
hna_local_del
(
&
hna
_local_entry
->
hash_entry
,
bat_priv
);
hash_remove
(
bat_priv
->
tt_local_hash
,
compare_ltt
,
choose_orig
,
tt
_local_entry
->
addr
);
_
tt_local_del
(
&
tt
_local_entry
->
hash_entry
,
bat_priv
);
}
void
hna
_local_remove
(
struct
bat_priv
*
bat_priv
,
void
tt
_local_remove
(
struct
bat_priv
*
bat_priv
,
uint8_t
*
addr
,
char
*
message
)
{
struct
hna_local_entry
*
hna
_local_entry
;
struct
tt_local_entry
*
tt
_local_entry
;
spin_lock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
hna_local_entry
=
hna
_local_hash_find
(
bat_priv
,
addr
);
tt_local_entry
=
tt
_local_hash_find
(
bat_priv
,
addr
);
if
(
hna
_local_entry
)
hna_local_del
(
bat_priv
,
hna
_local_entry
,
message
);
if
(
tt
_local_entry
)
tt_local_del
(
bat_priv
,
tt
_local_entry
,
message
);
spin_unlock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
}
static
void
hna
_local_purge
(
struct
work_struct
*
work
)
static
void
tt
_local_purge
(
struct
work_struct
*
work
)
{
struct
delayed_work
*
delayed_work
=
container_of
(
work
,
struct
delayed_work
,
work
);
struct
bat_priv
*
bat_priv
=
container_of
(
delayed_work
,
struct
bat_priv
,
hna
_work
);
struct
hashtable_t
*
hash
=
bat_priv
->
hna
_local_hash
;
struct
hna_local_entry
*
hna
_local_entry
;
container_of
(
delayed_work
,
struct
bat_priv
,
tt
_work
);
struct
hashtable_t
*
hash
=
bat_priv
->
tt
_local_hash
;
struct
tt_local_entry
*
tt
_local_entry
;
struct
hlist_node
*
node
,
*
node_tmp
;
struct
hlist_head
*
head
;
unsigned
long
timeout
;
int
i
;
spin_lock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry_safe
(
hna
_local_entry
,
node
,
node_tmp
,
hlist_for_each_entry_safe
(
tt
_local_entry
,
node
,
node_tmp
,
head
,
hash_entry
)
{
if
(
hna
_local_entry
->
never_purge
)
if
(
tt
_local_entry
->
never_purge
)
continue
;
timeout
=
hna
_local_entry
->
last_seen
;
timeout
+=
LOCAL_HNA
_TIMEOUT
*
HZ
;
timeout
=
tt
_local_entry
->
last_seen
;
timeout
+=
TT_LOCAL
_TIMEOUT
*
HZ
;
if
(
time_before
(
jiffies
,
timeout
))
continue
;
hna_local_del
(
bat_priv
,
hna
_local_entry
,
tt_local_del
(
bat_priv
,
tt
_local_entry
,
"address timed out"
);
}
}
spin_unlock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
hna
_local_start_timer
(
bat_priv
);
spin_unlock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
tt
_local_start_timer
(
bat_priv
);
}
void
hna
_local_free
(
struct
bat_priv
*
bat_priv
)
void
tt
_local_free
(
struct
bat_priv
*
bat_priv
)
{
if
(
!
bat_priv
->
hna
_local_hash
)
if
(
!
bat_priv
->
tt
_local_hash
)
return
;
cancel_delayed_work_sync
(
&
bat_priv
->
hna
_work
);
hash_delete
(
bat_priv
->
hna_local_hash
,
_hna
_local_del
,
bat_priv
);
bat_priv
->
hna
_local_hash
=
NULL
;
cancel_delayed_work_sync
(
&
bat_priv
->
tt
_work
);
hash_delete
(
bat_priv
->
tt_local_hash
,
_tt
_local_del
,
bat_priv
);
bat_priv
->
tt
_local_hash
=
NULL
;
}
int
hna
_global_init
(
struct
bat_priv
*
bat_priv
)
int
tt
_global_init
(
struct
bat_priv
*
bat_priv
)
{
if
(
bat_priv
->
hna
_global_hash
)
if
(
bat_priv
->
tt
_global_hash
)
return
1
;
bat_priv
->
hna
_global_hash
=
hash_new
(
1024
);
bat_priv
->
tt
_global_hash
=
hash_new
(
1024
);
if
(
!
bat_priv
->
hna
_global_hash
)
if
(
!
bat_priv
->
tt
_global_hash
)
return
0
;
return
1
;
}
void
hna
_global_add_orig
(
struct
bat_priv
*
bat_priv
,
void
tt
_global_add_orig
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
unsigned
char
*
hna_buff
,
int
hna
_buff_len
)
unsigned
char
*
tt_buff
,
int
tt
_buff_len
)
{
struct
hna_global_entry
*
hna
_global_entry
;
struct
hna_local_entry
*
hna
_local_entry
;
int
hna
_buff_count
=
0
;
unsigned
char
*
hna
_ptr
;
struct
tt_global_entry
*
tt
_global_entry
;
struct
tt_local_entry
*
tt
_local_entry
;
int
tt
_buff_count
=
0
;
unsigned
char
*
tt
_ptr
;
while
((
hna_buff_count
+
1
)
*
ETH_ALEN
<=
hna
_buff_len
)
{
spin_lock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
while
((
tt_buff_count
+
1
)
*
ETH_ALEN
<=
tt
_buff_len
)
{
spin_lock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
hna_ptr
=
hna_buff
+
(
hna
_buff_count
*
ETH_ALEN
);
hna_global_entry
=
hna_global_hash_find
(
bat_priv
,
hna
_ptr
);
tt_ptr
=
tt_buff
+
(
tt
_buff_count
*
ETH_ALEN
);
tt_global_entry
=
tt_global_hash_find
(
bat_priv
,
tt
_ptr
);
if
(
!
hna
_global_entry
)
{
spin_unlock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
if
(
!
tt
_global_entry
)
{
spin_unlock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
hna
_global_entry
=
kmalloc
(
sizeof
(
struct
hna
_global_entry
),
tt
_global_entry
=
kmalloc
(
sizeof
(
struct
tt
_global_entry
),
GFP_ATOMIC
);
if
(
!
hna
_global_entry
)
if
(
!
tt
_global_entry
)
break
;
memcpy
(
hna_global_entry
->
addr
,
hna
_ptr
,
ETH_ALEN
);
memcpy
(
tt_global_entry
->
addr
,
tt
_ptr
,
ETH_ALEN
);
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Creating new global
hna
entry: "
"Creating new global
tt
entry: "
"%pM (via %pM)
\n
"
,
hna
_global_entry
->
addr
,
orig_node
->
orig
);
tt
_global_entry
->
addr
,
orig_node
->
orig
);
spin_lock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
hash_add
(
bat_priv
->
hna_global_hash
,
compare_ghna
,
choose_orig
,
hna
_global_entry
,
&
hna
_global_entry
->
hash_entry
);
spin_lock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
hash_add
(
bat_priv
->
tt_global_hash
,
compare_gtt
,
choose_orig
,
tt
_global_entry
,
&
tt
_global_entry
->
hash_entry
);
}
hna
_global_entry
->
orig_node
=
orig_node
;
spin_unlock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
tt
_global_entry
->
orig_node
=
orig_node
;
spin_unlock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
/* remove address from local hash if present */
spin_lock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
hna_ptr
=
hna_buff
+
(
hna
_buff_count
*
ETH_ALEN
);
hna_local_entry
=
hna_local_hash_find
(
bat_priv
,
hna
_ptr
);
tt_ptr
=
tt_buff
+
(
tt
_buff_count
*
ETH_ALEN
);
tt_local_entry
=
tt_local_hash_find
(
bat_priv
,
tt
_ptr
);
if
(
hna
_local_entry
)
hna_local_del
(
bat_priv
,
hna
_local_entry
,
"global
hna
received"
);
if
(
tt
_local_entry
)
tt_local_del
(
bat_priv
,
tt
_local_entry
,
"global
tt
received"
);
spin_unlock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
hna
_buff_count
++
;
tt
_buff_count
++
;
}
/* initialize, and overwrite if malloc succeeds */
orig_node
->
hna
_buff
=
NULL
;
orig_node
->
hna
_buff_len
=
0
;
if
(
hna
_buff_len
>
0
)
{
orig_node
->
hna_buff
=
kmalloc
(
hna
_buff_len
,
GFP_ATOMIC
);
if
(
orig_node
->
hna
_buff
)
{
memcpy
(
orig_node
->
hna_buff
,
hna_buff
,
hna
_buff_len
);
orig_node
->
hna_buff_len
=
hna
_buff_len
;
orig_node
->
tt
_buff
=
NULL
;
orig_node
->
tt
_buff_len
=
0
;
if
(
tt
_buff_len
>
0
)
{
orig_node
->
tt_buff
=
kmalloc
(
tt
_buff_len
,
GFP_ATOMIC
);
if
(
orig_node
->
tt
_buff
)
{
memcpy
(
orig_node
->
tt_buff
,
tt_buff
,
tt
_buff_len
);
orig_node
->
tt_buff_len
=
tt
_buff_len
;
}
}
}
int
hna
_global_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
)
int
tt
_global_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
)
{
struct
net_device
*
net_dev
=
(
struct
net_device
*
)
seq
->
private
;
struct
bat_priv
*
bat_priv
=
netdev_priv
(
net_dev
);
struct
hashtable_t
*
hash
=
bat_priv
->
hna
_global_hash
;
struct
hna_global_entry
*
hna
_global_entry
;
struct
hashtable_t
*
hash
=
bat_priv
->
tt
_global_hash
;
struct
tt_global_entry
*
tt
_global_entry
;
struct
hard_iface
*
primary_if
;
struct
hlist_node
*
node
;
struct
hlist_head
*
head
;
...
...
@@ -505,10 +505,11 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
goto
out
;
}
seq_printf
(
seq
,
"Globally announced HNAs received via the mesh %s
\n
"
,
seq_printf
(
seq
,
"Globally announced TT entries received via the mesh %s
\n
"
,
net_dev
->
name
);
spin_lock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
buf_size
=
1
;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
...
...
@@ -523,7 +524,7 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
buff
=
kmalloc
(
buf_size
,
GFP_ATOMIC
);
if
(
!
buff
)
{
spin_unlock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
ret
=
-
ENOMEM
;
goto
out
;
}
...
...
@@ -534,17 +535,17 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
head
=
&
hash
->
table
[
i
];
rcu_read_lock
();
hlist_for_each_entry_rcu
(
hna
_global_entry
,
node
,
hlist_for_each_entry_rcu
(
tt
_global_entry
,
node
,
head
,
hash_entry
)
{
pos
+=
snprintf
(
buff
+
pos
,
44
,
" * %pM via %pM
\n
"
,
hna
_global_entry
->
addr
,
hna
_global_entry
->
orig_node
->
orig
);
tt
_global_entry
->
addr
,
tt
_global_entry
->
orig_node
->
orig
);
}
rcu_read_unlock
();
}
spin_unlock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
seq_printf
(
seq
,
"%s"
,
buff
);
kfree
(
buff
);
...
...
@@ -554,84 +555,84 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
return
ret
;
}
static
void
_
hna
_global_del_orig
(
struct
bat_priv
*
bat_priv
,
struct
hna_global_entry
*
hna
_global_entry
,
static
void
_
tt
_global_del_orig
(
struct
bat_priv
*
bat_priv
,
struct
tt_global_entry
*
tt
_global_entry
,
char
*
message
)
{
bat_dbg
(
DBG_ROUTES
,
bat_priv
,
"Deleting global
hna
entry %pM (via %pM): %s
\n
"
,
hna_global_entry
->
addr
,
hna
_global_entry
->
orig_node
->
orig
,
"Deleting global
tt
entry %pM (via %pM): %s
\n
"
,
tt_global_entry
->
addr
,
tt
_global_entry
->
orig_node
->
orig
,
message
);
hash_remove
(
bat_priv
->
hna_global_hash
,
compare_ghna
,
choose_orig
,
hna
_global_entry
->
addr
);
kfree
(
hna
_global_entry
);
hash_remove
(
bat_priv
->
tt_global_hash
,
compare_gtt
,
choose_orig
,
tt
_global_entry
->
addr
);
kfree
(
tt
_global_entry
);
}
void
hna
_global_del_orig
(
struct
bat_priv
*
bat_priv
,
void
tt
_global_del_orig
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
char
*
message
)
{
struct
hna_global_entry
*
hna
_global_entry
;
int
hna
_buff_count
=
0
;
unsigned
char
*
hna
_ptr
;
struct
tt_global_entry
*
tt
_global_entry
;
int
tt
_buff_count
=
0
;
unsigned
char
*
tt
_ptr
;
if
(
orig_node
->
hna
_buff_len
==
0
)
if
(
orig_node
->
tt
_buff_len
==
0
)
return
;
spin_lock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
while
((
hna_buff_count
+
1
)
*
ETH_ALEN
<=
orig_node
->
hna
_buff_len
)
{
hna_ptr
=
orig_node
->
hna_buff
+
(
hna
_buff_count
*
ETH_ALEN
);
hna_global_entry
=
hna_global_hash_find
(
bat_priv
,
hna
_ptr
);
while
((
tt_buff_count
+
1
)
*
ETH_ALEN
<=
orig_node
->
tt
_buff_len
)
{
tt_ptr
=
orig_node
->
tt_buff
+
(
tt
_buff_count
*
ETH_ALEN
);
tt_global_entry
=
tt_global_hash_find
(
bat_priv
,
tt
_ptr
);
if
((
hna
_global_entry
)
&&
(
hna
_global_entry
->
orig_node
==
orig_node
))
_
hna_global_del_orig
(
bat_priv
,
hna
_global_entry
,
if
((
tt
_global_entry
)
&&
(
tt
_global_entry
->
orig_node
==
orig_node
))
_
tt_global_del_orig
(
bat_priv
,
tt
_global_entry
,
message
);
hna
_buff_count
++
;
tt
_buff_count
++
;
}
spin_unlock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
orig_node
->
hna
_buff_len
=
0
;
kfree
(
orig_node
->
hna
_buff
);
orig_node
->
hna
_buff
=
NULL
;
orig_node
->
tt
_buff_len
=
0
;
kfree
(
orig_node
->
tt
_buff
);
orig_node
->
tt
_buff
=
NULL
;
}
static
void
hna
_global_del
(
struct
hlist_node
*
node
,
void
*
arg
)
static
void
tt
_global_del
(
struct
hlist_node
*
node
,
void
*
arg
)
{
void
*
data
=
container_of
(
node
,
struct
hna
_global_entry
,
hash_entry
);
void
*
data
=
container_of
(
node
,
struct
tt
_global_entry
,
hash_entry
);
kfree
(
data
);
}
void
hna
_global_free
(
struct
bat_priv
*
bat_priv
)
void
tt
_global_free
(
struct
bat_priv
*
bat_priv
)
{
if
(
!
bat_priv
->
hna
_global_hash
)
if
(
!
bat_priv
->
tt
_global_hash
)
return
;
hash_delete
(
bat_priv
->
hna_global_hash
,
hna
_global_del
,
NULL
);
bat_priv
->
hna
_global_hash
=
NULL
;
hash_delete
(
bat_priv
->
tt_global_hash
,
tt
_global_del
,
NULL
);
bat_priv
->
tt
_global_hash
=
NULL
;
}
struct
orig_node
*
transtable_search
(
struct
bat_priv
*
bat_priv
,
uint8_t
*
addr
)
{
struct
hna_global_entry
*
hna
_global_entry
;
struct
tt_global_entry
*
tt
_global_entry
;
struct
orig_node
*
orig_node
=
NULL
;
spin_lock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
hna_global_entry
=
hna
_global_hash_find
(
bat_priv
,
addr
);
spin_lock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
tt_global_entry
=
tt
_global_hash_find
(
bat_priv
,
addr
);
if
(
!
hna
_global_entry
)
if
(
!
tt
_global_entry
)
goto
out
;
if
(
!
atomic_inc_not_zero
(
&
hna
_global_entry
->
orig_node
->
refcount
))
if
(
!
atomic_inc_not_zero
(
&
tt
_global_entry
->
orig_node
->
refcount
))
goto
out
;
orig_node
=
hna
_global_entry
->
orig_node
;
orig_node
=
tt
_global_entry
->
orig_node
;
out:
spin_unlock_bh
(
&
bat_priv
->
hna
_ghash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_ghash_lock
);
return
orig_node
;
}
net/batman-adv/translation-table.h
View file @
02e73c1e
...
...
@@ -22,22 +22,22 @@
#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
int
hna
_local_init
(
struct
bat_priv
*
bat_priv
);
void
hna
_local_add
(
struct
net_device
*
soft_iface
,
uint8_t
*
addr
);
void
hna
_local_remove
(
struct
bat_priv
*
bat_priv
,
int
tt
_local_init
(
struct
bat_priv
*
bat_priv
);
void
tt
_local_add
(
struct
net_device
*
soft_iface
,
uint8_t
*
addr
);
void
tt
_local_remove
(
struct
bat_priv
*
bat_priv
,
uint8_t
*
addr
,
char
*
message
);
int
hna
_local_fill_buffer
(
struct
bat_priv
*
bat_priv
,
int
tt
_local_fill_buffer
(
struct
bat_priv
*
bat_priv
,
unsigned
char
*
buff
,
int
buff_len
);
int
hna
_local_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
);
void
hna
_local_free
(
struct
bat_priv
*
bat_priv
);
int
hna
_global_init
(
struct
bat_priv
*
bat_priv
);
void
hna
_global_add_orig
(
struct
bat_priv
*
bat_priv
,
int
tt
_local_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
);
void
tt
_local_free
(
struct
bat_priv
*
bat_priv
);
int
tt
_global_init
(
struct
bat_priv
*
bat_priv
);
void
tt
_global_add_orig
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
unsigned
char
*
hna_buff
,
int
hna
_buff_len
);
int
hna
_global_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
);
void
hna
_global_del_orig
(
struct
bat_priv
*
bat_priv
,
unsigned
char
*
tt_buff
,
int
tt
_buff_len
);
int
tt
_global_seq_print_text
(
struct
seq_file
*
seq
,
void
*
offset
);
void
tt
_global_del_orig
(
struct
bat_priv
*
bat_priv
,
struct
orig_node
*
orig_node
,
char
*
message
);
void
hna
_global_free
(
struct
bat_priv
*
bat_priv
);
void
tt
_global_free
(
struct
bat_priv
*
bat_priv
);
struct
orig_node
*
transtable_search
(
struct
bat_priv
*
bat_priv
,
uint8_t
*
addr
);
#endif
/* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
net/batman-adv/types.h
View file @
02e73c1e
...
...
@@ -75,8 +75,8 @@ struct orig_node {
unsigned
long
batman_seqno_reset
;
uint8_t
gw_flags
;
uint8_t
flags
;
unsigned
char
*
hna
_buff
;
int16_t
hna
_buff_len
;
unsigned
char
*
tt
_buff
;
int16_t
tt
_buff_len
;
uint32_t
last_real_seqno
;
uint8_t
last_ttl
;
unsigned
long
bcast_bits
[
NUM_WORDS
];
...
...
@@ -89,11 +89,11 @@ struct orig_node {
struct
hlist_node
hash_entry
;
struct
bat_priv
*
bat_priv
;
unsigned
long
last_frag_packet
;
spinlock_t
ogm_cnt_lock
;
/*
protects: bcast_own, bcast_own_sum,
* neigh_node->real_bits,
* neigh_node->real_packet_count */
spinlock_t
bcast_seqno_lock
;
/* protects bcast_bits,
* last_bcast_seqno */
/* ogm_cnt_lock
protects: bcast_own, bcast_own_sum,
* neigh_node->real_bits, neigh_node->real_packet_count */
spinlock_t
ogm_cnt_lock
;
/* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
spinlock_t
bcast_seqno_lock
;
atomic_t
bond_candidates
;
struct
list_head
bond_list
;
};
...
...
@@ -146,30 +146,30 @@ struct bat_priv {
atomic_t
bcast_queue_left
;
atomic_t
batman_queue_left
;
char
num_ifaces
;
struct
hlist_head
softif_neigh_list
;
struct
softif_neigh
__rcu
*
softif_neigh
;
struct
debug_log
*
debug_log
;
struct
kobject
*
mesh_obj
;
struct
dentry
*
debug_dir
;
struct
hlist_head
forw_bat_list
;
struct
hlist_head
forw_bcast_list
;
struct
hlist_head
gw_list
;
struct
hlist_head
softif_neigh_vids
;
struct
list_head
vis_send_list
;
struct
hashtable_t
*
orig_hash
;
struct
hashtable_t
*
hna
_local_hash
;
struct
hashtable_t
*
hna
_global_hash
;
struct
hashtable_t
*
tt
_local_hash
;
struct
hashtable_t
*
tt
_global_hash
;
struct
hashtable_t
*
vis_hash
;
spinlock_t
forw_bat_list_lock
;
/* protects forw_bat_list */
spinlock_t
forw_bcast_list_lock
;
/* protects */
spinlock_t
hna_lhash_lock
;
/* protects hna
_local_hash */
spinlock_t
hna_ghash_lock
;
/* protects hna
_global_hash */
spinlock_t
tt_lhash_lock
;
/* protects tt
_local_hash */
spinlock_t
tt_ghash_lock
;
/* protects tt
_global_hash */
spinlock_t
gw_list_lock
;
/* protects gw_list and curr_gw */
spinlock_t
vis_hash_lock
;
/* protects vis_hash */
spinlock_t
vis_list_lock
;
/* protects vis_info::recv_list */
spinlock_t
softif_neigh_lock
;
/* protects soft-interface neigh list */
int16_t
num_local_hna
;
atomic_t
hna_local_changed
;
struct
delayed_work
hna_work
;
spinlock_t
softif_neigh_vid_lock
;
/* protects soft-interface vid list */
int16_t
num_local_tt
;
atomic_t
tt_local_changed
;
struct
delayed_work
tt_work
;
struct
delayed_work
orig_work
;
struct
delayed_work
vis_work
;
struct
gw_node
__rcu
*
curr_gw
;
/* rcu protected pointer */
...
...
@@ -192,14 +192,14 @@ struct socket_packet {
struct
icmp_packet_rr
icmp_packet
;
};
struct
hna
_local_entry
{
struct
tt
_local_entry
{
uint8_t
addr
[
ETH_ALEN
];
unsigned
long
last_seen
;
char
never_purge
;
struct
hlist_node
hash_entry
;
};
struct
hna
_global_entry
{
struct
tt
_global_entry
{
uint8_t
addr
[
ETH_ALEN
];
struct
orig_node
*
orig_node
;
struct
hlist_node
hash_entry
;
...
...
@@ -262,7 +262,7 @@ struct vis_info {
struct
vis_info_entry
{
uint8_t
src
[
ETH_ALEN
];
uint8_t
dest
[
ETH_ALEN
];
uint8_t
quality
;
/* quality = 0
means HNA
*/
uint8_t
quality
;
/* quality = 0
client
*/
}
__packed
;
struct
recvlist_node
{
...
...
@@ -270,11 +270,20 @@ struct recvlist_node {
uint8_t
mac
[
ETH_ALEN
];
};
struct
softif_neigh_vid
{
struct
hlist_node
list
;
struct
bat_priv
*
bat_priv
;
short
vid
;
atomic_t
refcount
;
struct
softif_neigh
__rcu
*
softif_neigh
;
struct
rcu_head
rcu
;
struct
hlist_head
softif_neigh_list
;
};
struct
softif_neigh
{
struct
hlist_node
list
;
uint8_t
addr
[
ETH_ALEN
];
unsigned
long
last_seen
;
short
vid
;
atomic_t
refcount
;
struct
rcu_head
rcu
;
};
...
...
net/batman-adv/unicast.c
View file @
02e73c1e
...
...
@@ -300,7 +300,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
goto
find_router
;
}
/* check for
hna
host - increases orig_node refcount */
/* check for
tt
host - increases orig_node refcount */
orig_node
=
transtable_search
(
bat_priv
,
ethhdr
->
h_dest
);
find_router:
...
...
net/batman-adv/vis.c
View file @
02e73c1e
...
...
@@ -194,7 +194,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
{
/* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
if
(
primary
&&
entry
->
quality
==
0
)
return
sprintf
(
buff
,
"
HNA
%pM, "
,
entry
->
dest
);
return
sprintf
(
buff
,
"
TT
%pM, "
,
entry
->
dest
);
else
if
(
compare_eth
(
entry
->
src
,
src
))
return
sprintf
(
buff
,
"TQ %pM %d, "
,
entry
->
dest
,
entry
->
quality
);
...
...
@@ -622,7 +622,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
struct
vis_info
*
info
=
(
struct
vis_info
*
)
bat_priv
->
my_vis_info
;
struct
vis_packet
*
packet
=
(
struct
vis_packet
*
)
info
->
skb_packet
->
data
;
struct
vis_info_entry
*
entry
;
struct
hna_local_entry
*
hna
_local_entry
;
struct
tt_local_entry
*
tt
_local_entry
;
int
best_tq
=
-
1
,
i
;
info
->
first_seen
=
jiffies
;
...
...
@@ -678,29 +678,29 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
rcu_read_unlock
();
}
hash
=
bat_priv
->
hna
_local_hash
;
hash
=
bat_priv
->
tt
_local_hash
;
spin_lock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_lock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
for
(
i
=
0
;
i
<
hash
->
size
;
i
++
)
{
head
=
&
hash
->
table
[
i
];
hlist_for_each_entry
(
hna
_local_entry
,
node
,
head
,
hash_entry
)
{
hlist_for_each_entry
(
tt
_local_entry
,
node
,
head
,
hash_entry
)
{
entry
=
(
struct
vis_info_entry
*
)
skb_put
(
info
->
skb_packet
,
sizeof
(
*
entry
));
memset
(
entry
->
src
,
0
,
ETH_ALEN
);
memcpy
(
entry
->
dest
,
hna
_local_entry
->
addr
,
ETH_ALEN
);
entry
->
quality
=
0
;
/* 0 means
HNA
*/
memcpy
(
entry
->
dest
,
tt
_local_entry
->
addr
,
ETH_ALEN
);
entry
->
quality
=
0
;
/* 0 means
TT
*/
packet
->
entries
++
;
if
(
vis_packet_full
(
info
))
{
spin_unlock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
return
0
;
}
}
}
spin_unlock_bh
(
&
bat_priv
->
hna
_lhash_lock
);
spin_unlock_bh
(
&
bat_priv
->
tt
_lhash_lock
);
return
0
;
unlock:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment