Commit d401c1d1 authored by David S. Miller's avatar David S. Miller

Merge tag 'batadv-next-for-davem-20161108-v2' of git://git.open-mesh.org/linux-merge

Simon Wunderlich says:

====================
pull request for net-next: batman-adv 2016-11-08 v2

This feature and cleanup patchset includes the following changes:

 - netlink and code cleanups by Sven Eckelmann (3 patches)

 - Cleanup and minor fixes by Linus Luessing (3 patches)

 - Speed up multicast update intervals, by Linus Luessing

 - Avoid (re)broadcast in meshes for some easy cases,
   by Linus Luessing

 - Clean up tx return state handling, by Sven Eckelmann (6 patches)

 - Fix some special mac address handling cases, by Sven Eckelmann
   (3 patches)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a6dfdb4e 93bbaab4
......@@ -698,7 +698,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size);
if (!forw_packet_aggr->skb) {
batadv_forw_packet_free(forw_packet_aggr);
batadv_forw_packet_free(forw_packet_aggr, true);
return;
}
......@@ -1611,7 +1611,7 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
if (hardif_neigh)
batadv_hardif_neigh_put(hardif_neigh);
kfree_skb(skb_priv);
consume_skb(skb_priv);
}
/**
......@@ -1783,6 +1783,7 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
struct delayed_work *delayed_work;
struct batadv_forw_packet *forw_packet;
struct batadv_priv *bat_priv;
bool dropped = false;
delayed_work = to_delayed_work(work);
forw_packet = container_of(delayed_work, struct batadv_forw_packet,
......@@ -1792,8 +1793,10 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
hlist_del(&forw_packet->list);
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
dropped = true;
goto out;
}
batadv_iv_ogm_emit(forw_packet);
......@@ -1810,7 +1813,7 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
batadv_iv_ogm_schedule(forw_packet->if_incoming);
out:
batadv_forw_packet_free(forw_packet);
batadv_forw_packet_free(forw_packet, dropped);
}
static int batadv_iv_ogm_receive(struct sk_buff *skb,
......@@ -1820,17 +1823,18 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
struct batadv_ogm_packet *ogm_packet;
u8 *packet_pos;
int ogm_offset;
bool ret;
bool res;
int ret = NET_RX_DROP;
ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
if (!ret)
return NET_RX_DROP;
res = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
if (!res)
goto free_skb;
/* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
* that does not have B.A.T.M.A.N. IV enabled ?
*/
if (bat_priv->algo_ops->iface.enable != batadv_iv_ogm_iface_enable)
return NET_RX_DROP;
goto free_skb;
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
......@@ -1851,8 +1855,15 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
ogm_packet = (struct batadv_ogm_packet *)packet_pos;
}
ret = NET_RX_SUCCESS;
free_skb:
if (ret == NET_RX_SUCCESS)
consume_skb(skb);
else
kfree_skb(skb);
return NET_RX_SUCCESS;
return ret;
}
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
......
......@@ -492,20 +492,21 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
struct batadv_elp_packet *elp_packet;
struct batadv_hard_iface *primary_if;
struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
bool ret;
bool res;
int ret = NET_RX_DROP;
ret = batadv_check_management_packet(skb, if_incoming, BATADV_ELP_HLEN);
if (!ret)
return NET_RX_DROP;
res = batadv_check_management_packet(skb, if_incoming, BATADV_ELP_HLEN);
if (!res)
goto free_skb;
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
return NET_RX_DROP;
goto free_skb;
/* did we receive a B.A.T.M.A.N. V ELP packet on an interface
* that does not have B.A.T.M.A.N. V ELP enabled ?
*/
if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
return NET_RX_DROP;
goto free_skb;
elp_packet = (struct batadv_elp_packet *)skb->data;
......@@ -516,14 +517,19 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
goto free_skb;
batadv_v_elp_neigh_update(bat_priv, ethhdr->h_source, if_incoming,
elp_packet);
out:
if (primary_if)
ret = NET_RX_SUCCESS;
batadv_hardif_put(primary_if);
free_skb:
if (ret == NET_RX_SUCCESS)
consume_skb(skb);
return NET_RX_SUCCESS;
else
kfree_skb(skb);
return ret;
}
......@@ -140,6 +140,7 @@ static void batadv_v_ogm_send(struct work_struct *work)
unsigned char *ogm_buff, *pkt_buff;
int ogm_buff_len;
u16 tvlv_len = 0;
int ret;
bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
......@@ -182,6 +183,31 @@ static void batadv_v_ogm_send(struct work_struct *work)
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
ret = batadv_hardif_no_broadcast(hard_iface, NULL, NULL);
if (ret) {
char *type;
switch (ret) {
case BATADV_HARDIF_BCAST_NORECIPIENT:
type = "no neighbor";
break;
case BATADV_HARDIF_BCAST_DUPFWD:
type = "single neighbor is source";
break;
case BATADV_HARDIF_BCAST_DUPORIG:
type = "single neighbor is originator";
break;
default:
type = "unknown";
}
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 from ourselve on %s surpressed: %s\n",
hard_iface->net_dev->name, type);
batadv_hardif_put(hard_iface);
continue;
}
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n",
ogm_packet->orig, ntohl(ogm_packet->seqno),
......@@ -651,6 +677,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
struct batadv_hard_iface *hard_iface;
struct batadv_ogm2_packet *ogm_packet;
u32 ogm_throughput, link_throughput, path_throughput;
int ret;
ethhdr = eth_hdr(skb);
ogm_packet = (struct batadv_ogm2_packet *)(skb->data + ogm_offset);
......@@ -716,6 +743,35 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
ret = batadv_hardif_no_broadcast(hard_iface,
ogm_packet->orig,
hardif_neigh->orig);
if (ret) {
char *type;
switch (ret) {
case BATADV_HARDIF_BCAST_NORECIPIENT:
type = "no neighbor";
break;
case BATADV_HARDIF_BCAST_DUPFWD:
type = "single neighbor is source";
break;
case BATADV_HARDIF_BCAST_DUPORIG:
type = "single neighbor is originator";
break;
default:
type = "unknown";
}
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 packet from %pM on %s surpressed: %s\n",
ogm_packet->orig, hard_iface->net_dev->name,
type);
batadv_hardif_put(hard_iface);
continue;
}
batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet,
orig_node, neigh_node,
if_incoming, hard_iface);
......@@ -754,18 +810,18 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
* B.A.T.M.A.N. V enabled ?
*/
if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
return NET_RX_DROP;
goto free_skb;
if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
return NET_RX_DROP;
goto free_skb;
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
return NET_RX_DROP;
goto free_skb;
ogm_packet = (struct batadv_ogm2_packet *)skb->data;
if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
return NET_RX_DROP;
goto free_skb;
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
......@@ -786,7 +842,12 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
}
ret = NET_RX_SUCCESS;
free_skb:
if (ret == NET_RX_SUCCESS)
consume_skb(skb);
else
kfree_skb(skb);
return ret;
}
......
......@@ -948,6 +948,41 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
return vid;
}
/**
* batadv_dat_arp_create_reply - create an ARP Reply
* @bat_priv: the bat priv with all the soft interface information
* @ip_src: ARP sender IP
* @ip_dst: ARP target IP
* @hw_src: Ethernet source and ARP sender MAC
* @hw_dst: Ethernet destination and ARP target MAC
* @vid: VLAN identifier (optional, set to zero otherwise)
*
* Creates an ARP Reply from the given values, optionally encapsulated in a
* VLAN header.
*
* Return: An skb containing an ARP Reply.
*/
static struct sk_buff *
batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src,
__be32 ip_dst, u8 *hw_src, u8 *hw_dst,
unsigned short vid)
{
struct sk_buff *skb;
skb = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_dst, bat_priv->soft_iface,
ip_src, hw_dst, hw_src, hw_dst);
if (!skb)
return NULL;
skb_reset_mac_header(skb);
if (vid & BATADV_VLAN_HAS_TAG)
skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
return skb;
}
/**
* batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
* answer using DAT
......@@ -1005,20 +1040,12 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
goto out;
}
skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
bat_priv->soft_iface, ip_dst, hw_src,
dat_entry->mac_addr, hw_src);
skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
dat_entry->mac_addr,
hw_src, vid);
if (!skb_new)
goto out;
if (vid & BATADV_VLAN_HAS_TAG) {
skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
if (!skb_new)
goto out;
}
skb_reset_mac_header(skb_new);
skb_new->protocol = eth_type_trans(skb_new,
bat_priv->soft_iface);
bat_priv->stats.rx_packets++;
......@@ -1081,24 +1108,10 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
if (!dat_entry)
goto out;
skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
bat_priv->soft_iface, ip_dst, hw_src,
dat_entry->mac_addr, hw_src);
if (!skb_new)
goto out;
/* the rest of the TX path assumes that the mac_header offset pointing
* to the inner Ethernet header has been set, therefore reset it now.
*/
skb_reset_mac_header(skb_new);
if (vid & BATADV_VLAN_HAS_TAG) {
skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
dat_entry->mac_addr, hw_src, vid);
if (!skb_new)
goto out;
}
/* To preserve backwards compatibility, the node has choose the outgoing
* format based on the incoming request packet type. The assumption is
......
......@@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
#include <linux/if_ether.h>
......@@ -42,17 +43,23 @@
/**
* batadv_frag_clear_chain - delete entries in the fragment buffer chain
* @head: head of chain with entries.
* @dropped: whether the chain is cleared because all fragments are dropped
*
* Free fragments in the passed hlist. Should be called with appropriate lock.
*/
static void batadv_frag_clear_chain(struct hlist_head *head)
static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
{
struct batadv_frag_list_entry *entry;
struct hlist_node *node;
hlist_for_each_entry_safe(entry, node, head, list) {
hlist_del(&entry->list);
if (dropped)
kfree_skb(entry->skb);
else
consume_skb(entry->skb);
kfree(entry);
}
}
......@@ -73,7 +80,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
spin_lock_bh(&chain->lock);
if (!check_cb || check_cb(chain)) {
batadv_frag_clear_chain(&chain->fragment_list);
batadv_frag_clear_chain(&chain->fragment_list, true);
chain->size = 0;
}
......@@ -118,7 +125,7 @@ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
return false;
if (!hlist_empty(&chain->fragment_list))
batadv_frag_clear_chain(&chain->fragment_list);
batadv_frag_clear_chain(&chain->fragment_list, true);
chain->size = 0;
chain->seqno = seqno;
......@@ -220,7 +227,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
* exceeds the maximum size of one merged packet. Don't allow
* packets to have different total_size.
*/
batadv_frag_clear_chain(&chain->fragment_list);
batadv_frag_clear_chain(&chain->fragment_list, true);
chain->size = 0;
} else if (ntohs(frag_packet->total_size) == chain->size) {
/* All fragments received. Hand over chain to caller. */
......@@ -254,6 +261,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
struct batadv_frag_list_entry *entry;
struct sk_buff *skb_out;
int size, hdr_size = sizeof(struct batadv_frag_packet);
bool dropped = false;
/* Remove first entry, as this is the destination for the rest of the
* fragments.
......@@ -270,6 +278,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
kfree_skb(skb_out);
skb_out = NULL;
dropped = true;
goto free;
}
......@@ -291,7 +300,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
free:
/* Locking is not needed, because 'chain' is not part of any orig. */
batadv_frag_clear_chain(chain);
batadv_frag_clear_chain(chain, dropped);
return skb_out;
}
......@@ -433,8 +442,7 @@ static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
* @orig_node: final destination of the created fragments
* @neigh_node: next-hop of the created fragments
*
* Return: the netdev tx status or -1 in case of error.
* When -1 is returned the skb is not consumed.
* Return: the netdev tx status or a negative errno code on a failure
*/
int batadv_frag_send_packet(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
......@@ -447,7 +455,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
unsigned int header_size = sizeof(frag_header);
unsigned int max_fragment_size, max_packet_size;
int ret = -1;
int ret;
/* To avoid merge and refragmentation at next-hops we never send
* fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
......@@ -457,13 +465,17 @@ int batadv_frag_send_packet(struct sk_buff *skb,
max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
/* Don't even try to fragment, if we need more than 16 fragments */
if (skb->len > max_packet_size)
goto out;
if (skb->len > max_packet_size) {
ret = -EAGAIN;
goto free_skb;
}
bat_priv = orig_node->bat_priv;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
if (!primary_if) {
ret = -EINVAL;
goto put_primary_if;
}
/* Create one header to be copied to all fragments */
frag_header.packet_type = BATADV_UNICAST_FRAG;
......@@ -488,34 +500,35 @@ int batadv_frag_send_packet(struct sk_buff *skb,
/* Eat and send fragments from the tail of skb */
while (skb->len > max_fragment_size) {
skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
if (!skb_fragment)
goto out;
if (!skb_fragment) {
ret = -ENOMEM;
goto free_skb;
}
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
skb_fragment->len + ETH_HLEN);
ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
if (ret != NET_XMIT_SUCCESS) {
/* return -1 so that the caller can free the original
* skb
*/
ret = -1;
goto out;
ret = NET_XMIT_DROP;
goto free_skb;
}
frag_header.no++;
/* The initial check in this function should cover this case */
if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
ret = -1;
goto out;
ret = -EINVAL;
goto free_skb;
}
}
/* Make room for the fragment header. */
if (batadv_skb_head_push(skb, header_size) < 0 ||
pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
goto out;
pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
ret = -ENOMEM;
goto free_skb;
}
memcpy(skb->data, &frag_header, header_size);
......@@ -524,10 +537,13 @@ int batadv_frag_send_packet(struct sk_buff *skb,
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
skb->len + ETH_HLEN);
ret = batadv_send_unicast_skb(skb, neigh_node);
/* skb was consumed */
skb = NULL;
out:
if (primary_if)
put_primary_if:
batadv_hardif_put(primary_if);
free_skb:
kfree_skb(skb);
return ret;
}
......@@ -228,6 +228,58 @@ bool batadv_is_wifi_netdev(struct net_device *net_device)
return false;
}
/**
* batadv_hardif_no_broadcast - check whether (re)broadcast is necessary
* @if_outgoing: the outgoing interface checked and considered for (re)broadcast
* @orig_addr: the originator of this packet
* @orig_neigh: originator address of the forwarder we just got the packet from
* (NULL if we originated)
*
* Checks whether a packet needs to be (re)broadcasted on the given interface.
*
* Return:
* BATADV_HARDIF_BCAST_NORECIPIENT: No neighbor on interface
* BATADV_HARDIF_BCAST_DUPFWD: Just one neighbor, but it is the forwarder
* BATADV_HARDIF_BCAST_DUPORIG: Just one neighbor, but it is the originator
* BATADV_HARDIF_BCAST_OK: Several neighbors, must broadcast
*/
int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
u8 *orig_addr, u8 *orig_neigh)
{
struct batadv_hardif_neigh_node *hardif_neigh;
struct hlist_node *first;
int ret = BATADV_HARDIF_BCAST_OK;
rcu_read_lock();
/* 0 neighbors -> no (re)broadcast */
first = rcu_dereference(hlist_first_rcu(&if_outgoing->neigh_list));
if (!first) {
ret = BATADV_HARDIF_BCAST_NORECIPIENT;
goto out;
}
/* >1 neighbors -> (re)brodcast */
if (rcu_dereference(hlist_next_rcu(first)))
goto out;
hardif_neigh = hlist_entry(first, struct batadv_hardif_neigh_node,
list);
/* 1 neighbor, is the originator -> no rebroadcast */
if (orig_addr && batadv_compare_eth(hardif_neigh->orig, orig_addr)) {
ret = BATADV_HARDIF_BCAST_DUPORIG;
/* 1 neighbor, is the one we received from -> no rebroadcast */
} else if (orig_neigh &&
batadv_compare_eth(hardif_neigh->orig, orig_neigh)) {
ret = BATADV_HARDIF_BCAST_DUPFWD;
}
out:
rcu_read_unlock();
return ret;
}
static struct batadv_hard_iface *
batadv_hardif_get_active(const struct net_device *soft_iface)
{
......
......@@ -39,6 +39,20 @@ enum batadv_hard_if_state {
BATADV_IF_I_WANT_YOU,
};
/**
* enum batadv_hard_if_bcast - broadcast avoidance options
* @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface
* @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no recipient
* @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it from
* @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator
*/
enum batadv_hard_if_bcast {
BATADV_HARDIF_BCAST_OK = 0,
BATADV_HARDIF_BCAST_NORECIPIENT,
BATADV_HARDIF_BCAST_DUPFWD,
BATADV_HARDIF_BCAST_DUPORIG,
};
/**
* enum batadv_hard_if_cleanup - Cleanup modi for soft_iface after slave removal
* @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
......@@ -63,6 +77,8 @@ void batadv_hardif_remove_interfaces(void);
int batadv_hardif_min_mtu(struct net_device *soft_iface);
void batadv_update_min_mtu(struct net_device *soft_iface);
void batadv_hardif_release(struct kref *ref);
int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
u8 *orig_addr, u8 *orig_neigh);
/**
* batadv_hardif_put - decrement the hard interface refcounter and possibly
......
......@@ -402,6 +402,8 @@ void batadv_skb_set_priority(struct sk_buff *skb, int offset)
static int batadv_recv_unhandled_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
kfree_skb(skb);
return NET_RX_DROP;
}
......@@ -416,7 +418,6 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct batadv_ogm_packet *batadv_ogm_packet;
struct batadv_hard_iface *hard_iface;
u8 idx;
int ret;
hard_iface = container_of(ptype, struct batadv_hard_iface,
batman_adv_ptype);
......@@ -466,14 +467,8 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
/* reset control block to avoid left overs from previous users */
memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
/* all receive handlers return whether they received or reused
* the supplied skb. if not, we have to free the skb.
*/
idx = batadv_ogm_packet->packet_type;
ret = (*batadv_rx_handler[idx])(skb, hard_iface);
if (ret == NET_RX_DROP)
kfree_skb(skb);
(*batadv_rx_handler[idx])(skb, hard_iface);
batadv_hardif_put(hard_iface);
......
......@@ -48,6 +48,7 @@
#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
#define BATADV_TT_WORK_PERIOD 5000 /* 5 seconds */
#define BATADV_ORIG_WORK_PERIOD 1000 /* 1 second */
#define BATADV_MCAST_WORK_PERIOD 500 /* 0.5 seconds */
#define BATADV_DAT_ENTRY_TIMEOUT (5 * 60000) /* 5 mins in milliseconds */
/* sliding packet range of received originator messages in sequence numbers
* (should be a multiple of our word size)
......
......@@ -33,6 +33,7 @@
#include <linux/in6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
......@@ -48,6 +49,7 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <net/addrconf.h>
#include <net/if_inet6.h>
#include <net/ip.h>
......@@ -60,6 +62,18 @@
#include "translation-table.h"
#include "tvlv.h"
static void batadv_mcast_mla_update(struct work_struct *work);
/**
* batadv_mcast_start_timer - schedule the multicast periodic worker
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
{
queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
}
/**
* batadv_mcast_get_bridge - get the bridge on top of the softif if it exists
* @soft_iface: netdev struct of the mesh interface
......@@ -231,19 +245,15 @@ static int batadv_mcast_mla_bridge_get(struct net_device *dev,
/**
* batadv_mcast_mla_list_free - free a list of multicast addresses
* @bat_priv: the bat priv with all the soft interface information
* @mcast_list: the list to free
*
* Removes and frees all items in the given mcast_list.
*/
static void batadv_mcast_mla_list_free(struct batadv_priv *bat_priv,
struct hlist_head *mcast_list)
static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
{
struct batadv_hw_addr *mcast_entry;
struct hlist_node *tmp;
lockdep_assert_held(&bat_priv->tt.commit_lock);
hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
hlist_del(&mcast_entry->list);
kfree(mcast_entry);
......@@ -259,6 +269,8 @@ static void batadv_mcast_mla_list_free(struct batadv_priv *bat_priv,
* translation table except the ones listed in the given mcast_list.
*
* If mcast_list is NULL then all are retracted.
*
* Do not call outside of the mcast worker! (or cancel mcast worker first)
*/
static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
struct hlist_head *mcast_list)
......@@ -266,7 +278,7 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
struct batadv_hw_addr *mcast_entry;
struct hlist_node *tmp;
lockdep_assert_held(&bat_priv->tt.commit_lock);
WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
list) {
......@@ -291,6 +303,8 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
*
* Adds multicast listener announcements from the given mcast_list to the
* translation table if they have not been added yet.
*
* Do not call outside of the mcast worker! (or cancel mcast worker first)
*/
static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
struct hlist_head *mcast_list)
......@@ -298,7 +312,7 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
struct batadv_hw_addr *mcast_entry;
struct hlist_node *tmp;
lockdep_assert_held(&bat_priv->tt.commit_lock);
WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
if (!mcast_list)
return;
......@@ -532,13 +546,18 @@ static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
}
/**
* batadv_mcast_mla_update - update the own MLAs
* __batadv_mcast_mla_update - update the own MLAs
* @bat_priv: the bat priv with all the soft interface information
*
* Updates the own multicast listener announcements in the translation
* table as well as the own, announced multicast tvlv container.
*
* Note that non-conflicting reads and writes to bat_priv->mcast.mla_list
* in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are
* ensured by the non-parallel execution of the worker this function
* belongs to.
*/
void batadv_mcast_mla_update(struct batadv_priv *bat_priv)
static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
{
struct net_device *soft_iface = bat_priv->soft_iface;
struct hlist_head mcast_list = HLIST_HEAD_INIT;
......@@ -560,7 +579,30 @@ void batadv_mcast_mla_update(struct batadv_priv *bat_priv)
batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
out:
batadv_mcast_mla_list_free(bat_priv, &mcast_list);
batadv_mcast_mla_list_free(&mcast_list);
}
/**
* batadv_mcast_mla_update - update the own MLAs
* @work: kernel work struct
*
* Updates the own multicast listener announcements in the translation
* table as well as the own, announced multicast tvlv container.
*
* In the end, reschedules the work timer.
*/
static void batadv_mcast_mla_update(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct batadv_priv_mcast *priv_mcast;
struct batadv_priv *bat_priv;
delayed_work = to_delayed_work(work);
priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
__batadv_mcast_mla_update(bat_priv);
batadv_mcast_start_timer(bat_priv);
}
/**
......@@ -1132,6 +1174,9 @@ void batadv_mcast_init(struct batadv_priv *bat_priv)
batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
NULL, BATADV_TVLV_MCAST, 2,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
batadv_mcast_start_timer(bat_priv);
}
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
......@@ -1243,12 +1288,13 @@ int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
*/
void batadv_mcast_free(struct batadv_priv *bat_priv)
{
cancel_delayed_work_sync(&bat_priv->mcast.work);
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
spin_lock_bh(&bat_priv->tt.commit_lock);
/* safely calling outside of worker, as worker was canceled above */
batadv_mcast_mla_tt_retract(bat_priv, NULL);
spin_unlock_bh(&bat_priv->tt.commit_lock);
}
/**
......
......@@ -39,8 +39,6 @@ enum batadv_forw_mode {
#ifdef CONFIG_BATMAN_ADV_MCAST
void batadv_mcast_mla_update(struct batadv_priv *bat_priv);
enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node **mcast_single_orig);
......@@ -55,10 +53,6 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
#else
static inline void batadv_mcast_mla_update(struct batadv_priv *bat_priv)
{
}
static inline enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node **mcast_single_orig)
......
......@@ -20,11 +20,14 @@
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/cache.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/genetlink.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/printk.h>
......@@ -527,7 +530,7 @@ batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb)
return msg->len;
}
static struct genl_ops batadv_netlink_ops[] = {
static const struct genl_ops batadv_netlink_ops[] = {
{
.cmd = BATADV_CMD_GET_MESH_INFO,
.flags = GENL_ADMIN_PERM,
......
......@@ -260,10 +260,16 @@ static void batadv_nc_path_put(struct batadv_nc_path *nc_path)
/**
* batadv_nc_packet_free - frees nc packet
* @nc_packet: the nc packet to free
* @dropped: whether the packet is freed because is is dropped
*/
static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet)
static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet,
bool dropped)
{
if (dropped)
kfree_skb(nc_packet->skb);
else
consume_skb(nc_packet->skb);
batadv_nc_path_put(nc_packet->nc_path);
kfree(nc_packet);
}
......@@ -576,7 +582,7 @@ static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
{
batadv_send_unicast_skb(nc_packet->skb, nc_packet->neigh_node);
nc_packet->skb = NULL;
batadv_nc_packet_free(nc_packet);
batadv_nc_packet_free(nc_packet, false);
}
/**
......@@ -610,7 +616,7 @@ static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv,
/* purge nc packet */
list_del(&nc_packet->list);
batadv_nc_packet_free(nc_packet);
batadv_nc_packet_free(nc_packet, true);
res = true;
......@@ -1208,11 +1214,11 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
}
/* skb_src is now coded into skb_dest, so free it */
kfree_skb(skb_src);
consume_skb(skb_src);
/* avoid duplicate free of skb from nc_packet */
nc_packet->skb = NULL;
batadv_nc_packet_free(nc_packet);
batadv_nc_packet_free(nc_packet, false);
/* Send the coded packet and return true */
batadv_send_unicast_skb(skb_dest, first_dest);
......@@ -1399,7 +1405,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
/* batadv_nc_skb_store_for_decoding() clones the skb, so we must free
* our ref
*/
kfree_skb(skb);
consume_skb(skb);
}
/**
......@@ -1723,7 +1729,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
ether_addr_copy(unicast_packet->dest, orig_dest);
unicast_packet->ttvn = ttvn;
batadv_nc_packet_free(nc_packet);
batadv_nc_packet_free(nc_packet, false);
return unicast_packet;
}
......@@ -1813,11 +1819,11 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
/* Check if network coding is enabled */
if (!atomic_read(&bat_priv->network_coding))
return NET_RX_DROP;
goto free_skb;
/* Make sure we can access (and remove) header */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
return NET_RX_DROP;
goto free_skb;
coded_packet = (struct batadv_coded_packet *)skb->data;
ethhdr = eth_hdr(skb);
......@@ -1825,7 +1831,7 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
/* Verify frame is destined for us */
if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest) &&
!batadv_is_my_mac(bat_priv, coded_packet->second_dest))
return NET_RX_DROP;
goto free_skb;
/* Update stat counter */
if (batadv_is_my_mac(bat_priv, coded_packet->second_dest))
......@@ -1835,7 +1841,7 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
coded_packet);
if (!nc_packet) {
batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
return NET_RX_DROP;
goto free_skb;
}
/* Make skb's linear, because decoding accesses the entire buffer */
......@@ -1860,7 +1866,10 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
return batadv_recv_unicast_packet(skb, recv_if);
free_nc_packet:
batadv_nc_packet_free(nc_packet);
batadv_nc_packet_free(nc_packet, true);
free_skb:
kfree_skb(skb);
return NET_RX_DROP;
}
......
......@@ -512,12 +512,14 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
* batadv_hardif_neigh_create - create a hardif neighbour node
* @hard_iface: the interface this neighbour is connected to
* @neigh_addr: the interface address of the neighbour to retrieve
* @orig_node: originator object representing the neighbour
*
* Return: the hardif neighbour node if found or created or NULL otherwise.
*/
static struct batadv_hardif_neigh_node *
batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
const u8 *neigh_addr)
const u8 *neigh_addr,
struct batadv_orig_node *orig_node)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hardif_neigh_node *hardif_neigh;
......@@ -536,6 +538,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
kref_get(&hard_iface->refcount);
INIT_HLIST_NODE(&hardif_neigh->list);
ether_addr_copy(hardif_neigh->addr, neigh_addr);
ether_addr_copy(hardif_neigh->orig, orig_node->orig);
hardif_neigh->if_incoming = hard_iface;
hardif_neigh->last_seen = jiffies;
......@@ -556,12 +559,14 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
* node
* @hard_iface: the interface this neighbour is connected to
* @neigh_addr: the interface address of the neighbour to retrieve
* @orig_node: originator object representing the neighbour
*
* Return: the hardif neighbour node if found or created or NULL otherwise.
*/
static struct batadv_hardif_neigh_node *
batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
const u8 *neigh_addr)
const u8 *neigh_addr,
struct batadv_orig_node *orig_node)
{
struct batadv_hardif_neigh_node *hardif_neigh;
......@@ -570,7 +575,7 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
if (hardif_neigh)
return hardif_neigh;
return batadv_hardif_neigh_create(hard_iface, neigh_addr);
return batadv_hardif_neigh_create(hard_iface, neigh_addr, orig_node);
}
/**
......@@ -630,7 +635,7 @@ batadv_neigh_node_create(struct batadv_orig_node *orig_node,
goto out;
hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
neigh_addr);
neigh_addr, orig_node);
if (!hardif_neigh)
goto out;
......
This diff is collapsed.
......@@ -64,8 +64,11 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
* If neigh_node is NULL, then the packet is broadcasted using hard_iface,
* otherwise it is sent as unicast to the given neighbor.
*
* Return: NET_TX_DROP in case of error or the result of dev_queue_xmit(skb)
* otherwise
* Regardless of the return value, the skb is consumed.
*
* Return: A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due
* to congestion or traffic shaping.
*/
int batadv_send_skb_packet(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface,
......@@ -73,7 +76,6 @@ int batadv_send_skb_packet(struct sk_buff *skb,
{
struct batadv_priv *bat_priv;
struct ethhdr *ethhdr;
int ret;
bat_priv = netdev_priv(hard_iface->soft_iface);
......@@ -111,15 +113,8 @@ int batadv_send_skb_packet(struct sk_buff *skb,
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
* (which is > 0). This will not be treated as an error.
*
* a negative value cannot be returned because it could be interepreted
* as not consumed skb by callers of batadv_send_skb_to_orig.
*/
ret = dev_queue_xmit(skb);
if (ret < 0)
ret = NET_XMIT_DROP;
return ret;
return dev_queue_xmit(skb);
send_skb_err:
kfree_skb(skb);
return NET_XMIT_DROP;
......@@ -165,11 +160,9 @@ int batadv_send_unicast_skb(struct sk_buff *skb,
* host, NULL can be passed as recv_if and no interface alternating is
* attempted.
*
* Return: -1 on failure (and the skb is not consumed), -EINPROGRESS if the
* skb is buffered for later transmit or the NET_XMIT status returned by the
* Return: negative errno code on a failure, -EINPROGRESS if the skb is
* buffered for later transmit or the NET_XMIT status returned by the
* lower routine if the packet has been passed down.
*
* If the returning value is not -1 the skb has been consumed.
*/
int batadv_send_skb_to_orig(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
......@@ -177,12 +170,14 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
{
struct batadv_priv *bat_priv = orig_node->bat_priv;
struct batadv_neigh_node *neigh_node;
int ret = -1;
int ret;
/* batadv_find_router() increases neigh_nodes refcount if found. */
neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
if (!neigh_node)
goto out;
if (!neigh_node) {
ret = -EINVAL;
goto free_skb;
}
/* Check if the skb is too large to send in one piece and fragment
* it if needed.
......@@ -191,8 +186,10 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
skb->len > neigh_node->if_incoming->net_dev->mtu) {
/* Fragment and send packet. */
ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
/* skb was consumed */
skb = NULL;
goto out;
goto put_neigh_node;
}
/* try to network code the packet, if it is received on an interface
......@@ -204,9 +201,13 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
else
ret = batadv_send_unicast_skb(skb, neigh_node);
out:
if (neigh_node)
/* skb was consumed */
skb = NULL;
put_neigh_node:
batadv_neigh_node_put(neigh_node);
free_skb:
kfree_skb(skb);
return ret;
}
......@@ -327,7 +328,7 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
{
struct batadv_unicast_packet *unicast_packet;
struct ethhdr *ethhdr;
int res, ret = NET_XMIT_DROP;
int ret = NET_XMIT_DROP;
if (!orig_node)
goto out;
......@@ -364,12 +365,11 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
unicast_packet->ttvn = unicast_packet->ttvn - 1;
res = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (res != -1)
ret = NET_XMIT_SUCCESS;
ret = batadv_send_skb_to_orig(skb, orig_node, NULL);
/* skb was consumed */
skb = NULL;
out:
if (ret == NET_XMIT_DROP)
kfree_skb(skb);
return ret;
}
......@@ -451,13 +451,19 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
/**
* batadv_forw_packet_free - free a forwarding packet
* @forw_packet: The packet to free
* @dropped: whether the packet is freed because is is dropped
*
* This frees a forwarding packet and releases any resources it might
* have claimed.
*/
void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
bool dropped)
{
if (dropped)
kfree_skb(forw_packet->skb);
else
consume_skb(forw_packet->skb);
if (forw_packet->if_incoming)
batadv_hardif_put(forw_packet->if_incoming);
if (forw_packet->if_outgoing)
......@@ -549,6 +555,7 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information
* @skb: broadcast packet to add
* @delay: number of jiffies to wait before sending
* @own_packet: true if it is a self-generated broadcast packet
*
* add a broadcast packet to the queue and setup timers. broadcast packets
* are sent multiple times to increase probability for being received.
......@@ -560,7 +567,8 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
*/
int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
const struct sk_buff *skb,
unsigned long delay)
unsigned long delay,
bool own_packet)
{
struct batadv_hard_iface *primary_if;
struct batadv_forw_packet *forw_packet;
......@@ -586,9 +594,8 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
bcast_packet = (struct batadv_bcast_packet *)newskb->data;
bcast_packet->ttl--;
skb_reset_mac_header(newskb);
forw_packet->skb = newskb;
forw_packet->own = own_packet;
INIT_DELAYED_WORK(&forw_packet->delayed_work,
batadv_send_outstanding_bcast_packet);
......@@ -597,7 +604,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
return NETDEV_TX_OK;
err_packet_free:
batadv_forw_packet_free(forw_packet);
batadv_forw_packet_free(forw_packet, true);
err:
return NETDEV_TX_BUSY;
}
......@@ -605,11 +612,17 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
{
struct batadv_hard_iface *hard_iface;
struct batadv_hardif_neigh_node *neigh_node;
struct delayed_work *delayed_work;
struct batadv_forw_packet *forw_packet;
struct batadv_bcast_packet *bcast_packet;
struct sk_buff *skb1;
struct net_device *soft_iface;
struct batadv_priv *bat_priv;
bool dropped = false;
u8 *neigh_addr;
u8 *orig_neigh;
int ret = 0;
delayed_work = to_delayed_work(work);
forw_packet = container_of(delayed_work, struct batadv_forw_packet,
......@@ -621,11 +634,17 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
hlist_del(&forw_packet->list);
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
dropped = true;
goto out;
}
if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) {
dropped = true;
goto out;
}
bcast_packet = (struct batadv_bcast_packet *)forw_packet->skb->data;
/* rebroadcast packet */
rcu_read_lock();
......@@ -636,6 +655,49 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
if (forw_packet->num_packets >= hard_iface->num_bcasts)
continue;
if (forw_packet->own) {
neigh_node = NULL;
} else {
neigh_addr = eth_hdr(forw_packet->skb)->h_source;
neigh_node = batadv_hardif_neigh_get(hard_iface,
neigh_addr);
}
orig_neigh = neigh_node ? neigh_node->orig : NULL;
ret = batadv_hardif_no_broadcast(hard_iface, bcast_packet->orig,
orig_neigh);
if (ret) {
char *type;
switch (ret) {
case BATADV_HARDIF_BCAST_NORECIPIENT:
type = "no neighbor";
break;
case BATADV_HARDIF_BCAST_DUPFWD:
type = "single neighbor is source";
break;
case BATADV_HARDIF_BCAST_DUPORIG:
type = "single neighbor is originator";
break;
default:
type = "unknown";
}
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s surpressed: %s\n",
bcast_packet->orig,
hard_iface->net_dev->name, type);
if (neigh_node)
batadv_hardif_neigh_put(neigh_node);
continue;
}
if (neigh_node)
batadv_hardif_neigh_put(neigh_node);
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
......@@ -658,7 +720,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
}
out:
batadv_forw_packet_free(forw_packet);
batadv_forw_packet_free(forw_packet, dropped);
}
void
......@@ -699,7 +761,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
batadv_forw_packet_free(forw_packet);
batadv_forw_packet_free(forw_packet, true);
}
}
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
......@@ -726,7 +788,7 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
batadv_forw_packet_free(forw_packet);
batadv_forw_packet_free(forw_packet, true);
}
}
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
......
......@@ -27,7 +27,8 @@
struct sk_buff;
void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet);
void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
bool dropped);
struct batadv_forw_packet *
batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing,
......@@ -46,7 +47,8 @@ int batadv_send_unicast_skb(struct sk_buff *skb,
struct batadv_neigh_node *neigh_node);
int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
const struct sk_buff *skb,
unsigned long delay);
unsigned long delay,
bool own_packet);
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
const struct batadv_hard_iface *hard_iface);
......
......@@ -357,12 +357,12 @@ static int batadv_interface_tx(struct sk_buff *skb,
seqno = atomic_inc_return(&bat_priv->bcast_seqno);
bcast_packet->seqno = htonl(seqno);
batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay, true);
/* a copy is stored in the bcast list, therefore removing
* the original skb.
*/
kfree_skb(skb);
consume_skb(skb);
/* unicast packet */
} else {
......@@ -386,7 +386,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint,
vid);
}
if (ret == NET_XMIT_DROP)
if (ret != NET_XMIT_SUCCESS)
goto dropped_freed;
}
......
......@@ -615,9 +615,6 @@ static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
batadv_tp_fill_prerandom(tp_vars, data, data_len);
r = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (r == -1)
kfree_skb(skb);
if (r == NET_XMIT_SUCCESS)
return 0;
......@@ -1206,9 +1203,6 @@ static int batadv_tp_send_ack(struct batadv_priv *bat_priv, const u8 *dst,
/* send the ack */
r = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (r == -1)
kfree_skb(skb);
if (unlikely(r < 0) || (r == NET_XMIT_DROP)) {
ret = BATADV_TP_REASON_DST_UNREACHABLE;
goto out;
......
......@@ -56,7 +56,6 @@
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "multicast.h"
#include "netlink.h"
#include "originator.h"
#include "packet.h"
......@@ -3795,9 +3794,6 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
{
lockdep_assert_held(&bat_priv->tt.commit_lock);
/* Update multicast addresses in local translation table */
batadv_mcast_mla_update(bat_priv);
if (atomic_read(&bat_priv->tt.local_changes) < 1) {
if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
batadv_tt_tvlv_container_update(bat_priv);
......
......@@ -600,7 +600,6 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
unsigned char *tvlv_buff;
unsigned int tvlv_len;
ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
int res;
orig_node = batadv_orig_hash_find(bat_priv, dst);
if (!orig_node)
......@@ -633,9 +632,7 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
tvlv_buff += sizeof(*tvlv_hdr);
memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
res = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (res == -1)
kfree_skb(skb);
batadv_send_skb_to_orig(skb, orig_node, NULL);
out:
batadv_orig_node_put(orig_node);
}
......@@ -123,8 +123,8 @@ struct batadv_hard_iface_bat_v {
* @list: list node for batadv_hardif_list
* @if_num: identificator of the interface
* @if_status: status of the interface for batman-adv
* @net_dev: pointer to the net_device
* @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
* @net_dev: pointer to the net_device
* @hardif_obj: kobject of the per interface sysfs "mesh" directory
* @refcount: number of contexts the object is used
* @batman_adv_ptype: packet type describing packets that should be processed by
......@@ -141,8 +141,8 @@ struct batadv_hard_iface {
struct list_head list;
s16 if_num;
char if_status;
struct net_device *net_dev;
u8 num_bcasts;
struct net_device *net_dev;
struct kobject *hardif_obj;
struct kref refcount;
struct packet_type batman_adv_ptype;
......@@ -408,6 +408,7 @@ struct batadv_hardif_neigh_node_bat_v {
* struct batadv_hardif_neigh_node - unique neighbor per hard-interface
* @list: list node for batadv_hard_iface::neigh_list
* @addr: the MAC address of the neighboring interface
* @orig: the address of the originator this neighbor node belongs to
* @if_incoming: pointer to incoming hard-interface
* @last_seen: when last packet via this neighbor was received
* @bat_v: B.A.T.M.A.N. V private data
......@@ -417,6 +418,7 @@ struct batadv_hardif_neigh_node_bat_v {
struct batadv_hardif_neigh_node {
struct hlist_node list;
u8 addr[ETH_ALEN];
u8 orig[ETH_ALEN];
struct batadv_hard_iface *if_incoming;
unsigned long last_seen;
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
......@@ -785,9 +787,10 @@ struct batadv_mcast_querier_state {
* @num_want_all_ipv6: counter for items in want_all_ipv6_list
* @want_lists_lock: lock for protecting modifications to mcast want lists
* (traversals are rcu-locked)
* @work: work queue callback item for multicast TT and TVLV updates
*/
struct batadv_priv_mcast {
struct hlist_head mla_list;
struct hlist_head mla_list; /* see __batadv_mcast_mla_update() */
struct hlist_head want_all_unsnoopables_list;
struct hlist_head want_all_ipv4_list;
struct hlist_head want_all_ipv6_list;
......@@ -802,6 +805,7 @@ struct batadv_priv_mcast {
atomic_t num_want_all_ipv6;
/* protects want_all_{unsnoopables,ipv4,ipv6}_list */
spinlock_t want_lists_lock;
struct delayed_work work;
};
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment