2013-01-04 10:05:31 +08:00
|
|
|
/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
|
2010-12-13 19:19:28 +08:00
|
|
|
*
|
|
|
|
* Marek Lindner, Simon Wunderlich
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
|
|
* License as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
|
|
* 02110-1301, USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "main.h"
|
2011-06-26 09:37:18 +08:00
|
|
|
#include "distributed-arp-table.h"
|
2010-12-13 19:19:28 +08:00
|
|
|
#include "send.h"
|
|
|
|
#include "routing.h"
|
|
|
|
#include "translation-table.h"
|
|
|
|
#include "soft-interface.h"
|
|
|
|
#include "hard-interface.h"
|
|
|
|
#include "vis.h"
|
|
|
|
#include "gateway_common.h"
|
|
|
|
#include "originator.h"
|
|
|
|
|
2012-11-26 07:38:50 +08:00
|
|
|
#include <linux/if_ether.h>
|
|
|
|
|
2012-05-17 02:23:14 +08:00
|
|
|
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
/* send out an already prepared packet to the given address via the
|
2012-05-12 08:09:43 +08:00
|
|
|
* specified batman interface
|
|
|
|
*/
|
2012-06-06 04:31:31 +08:00
|
|
|
int batadv_send_skb_packet(struct sk_buff *skb,
|
|
|
|
struct batadv_hard_iface *hard_iface,
|
2012-05-12 08:09:37 +08:00
|
|
|
const uint8_t *dst_addr)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
|
|
|
struct ethhdr *ethhdr;
|
|
|
|
|
2012-06-04 04:19:19 +08:00
|
|
|
if (hard_iface->if_status != BATADV_IF_ACTIVE)
|
2010-12-13 19:19:28 +08:00
|
|
|
goto send_skb_err;
|
|
|
|
|
2011-02-18 20:33:20 +08:00
|
|
|
if (unlikely(!hard_iface->net_dev))
|
2010-12-13 19:19:28 +08:00
|
|
|
goto send_skb_err;
|
|
|
|
|
2011-02-18 20:33:20 +08:00
|
|
|
if (!(hard_iface->net_dev->flags & IFF_UP)) {
|
2012-03-26 22:22:45 +08:00
|
|
|
pr_warn("Interface %s is not up - can't send packet via that interface!\n",
|
|
|
|
hard_iface->net_dev->name);
|
2010-12-13 19:19:28 +08:00
|
|
|
goto send_skb_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* push to the ethernet header. */
|
2012-05-12 08:09:38 +08:00
|
|
|
if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
|
2010-12-13 19:19:28 +08:00
|
|
|
goto send_skb_err;
|
|
|
|
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
2012-03-07 16:07:48 +08:00
|
|
|
ethhdr = (struct ethhdr *)skb_mac_header(skb);
|
2011-02-18 20:33:20 +08:00
|
|
|
memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
|
2010-12-13 19:19:28 +08:00
|
|
|
memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
|
2012-11-26 07:38:50 +08:00
|
|
|
ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
skb_set_network_header(skb, ETH_HLEN);
|
|
|
|
skb->priority = TC_PRIO_CONTROL;
|
2012-11-26 07:38:50 +08:00
|
|
|
skb->protocol = __constant_htons(ETH_P_BATMAN);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2011-02-18 20:33:20 +08:00
|
|
|
skb->dev = hard_iface->net_dev;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
/* dev_queue_xmit() returns a negative result on error. However on
|
|
|
|
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
|
2012-05-12 08:09:43 +08:00
|
|
|
* (which is > 0). This will not be treated as an error.
|
|
|
|
*/
|
2010-12-13 19:19:28 +08:00
|
|
|
return dev_queue_xmit(skb);
|
|
|
|
send_skb_err:
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NET_XMIT_DROP;
|
|
|
|
}
|
|
|
|
|
2012-10-16 22:13:48 +08:00
|
|
|
/**
|
|
|
|
* batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
|
|
|
|
* @skb: Packet to be transmitted.
|
|
|
|
* @orig_node: Final destination of the packet.
|
|
|
|
* @recv_if: Interface used when receiving the packet (can be NULL).
|
|
|
|
*
|
|
|
|
* Looks up the best next-hop towards the passed originator and passes the
|
|
|
|
* skb on for preparation of MAC header. If the packet originated from this
|
|
|
|
* host, NULL can be passed as recv_if and no interface alternating is
|
|
|
|
* attempted.
|
|
|
|
*
|
|
|
|
* Returns TRUE on success; FALSE otherwise.
|
|
|
|
*/
|
|
|
|
bool batadv_send_skb_to_orig(struct sk_buff *skb,
|
|
|
|
struct batadv_orig_node *orig_node,
|
|
|
|
struct batadv_hard_iface *recv_if)
|
|
|
|
{
|
|
|
|
struct batadv_priv *bat_priv = orig_node->bat_priv;
|
|
|
|
struct batadv_neigh_node *neigh_node;
|
|
|
|
|
|
|
|
/* batadv_find_router() increases neigh_nodes refcount if found. */
|
|
|
|
neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
|
|
|
|
if (!neigh_node)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* route it */
|
|
|
|
batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
|
|
|
|
|
|
|
|
batadv_neigh_node_free_ref(neigh_node);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-06-04 04:19:19 +08:00
|
|
|
if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
|
|
|
|
(hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
|
2010-12-13 19:19:28 +08:00
|
|
|
return;
|
|
|
|
|
2012-05-12 08:09:43 +08:00
|
|
|
/* the interface gets activated here to avoid race conditions between
|
2010-12-13 19:19:28 +08:00
|
|
|
* the moment of activating the interface in
|
|
|
|
* hardif_activate_interface() where the originator mac is set and
|
|
|
|
* outdated packets (especially uninitialized mac addresses) in the
|
|
|
|
* packet queue
|
|
|
|
*/
|
2012-06-04 04:19:19 +08:00
|
|
|
if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
|
|
|
|
hard_iface->if_status = BATADV_IF_ACTIVE;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-05-07 04:22:05 +08:00
|
|
|
bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
|
|
|
if (forw_packet->skb)
|
|
|
|
kfree_skb(forw_packet->skb);
|
2011-05-12 02:59:06 +08:00
|
|
|
if (forw_packet->if_incoming)
|
2012-05-12 19:48:54 +08:00
|
|
|
batadv_hardif_free_ref(forw_packet->if_incoming);
|
2010-12-13 19:19:28 +08:00
|
|
|
kfree(forw_packet);
|
|
|
|
}
|
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
static void
|
|
|
|
_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_forw_packet *forw_packet,
|
|
|
|
unsigned long send_time)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
|
|
|
INIT_HLIST_NODE(&forw_packet->list);
|
|
|
|
|
|
|
|
/* add new packet to packet list */
|
|
|
|
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
|
|
|
|
hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
|
|
|
|
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
|
|
|
|
|
|
|
|
/* start timer for this packet */
|
2012-05-12 08:09:42 +08:00
|
|
|
queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
|
2010-12-13 19:19:28 +08:00
|
|
|
send_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add a broadcast packet to the queue and setup timers. broadcast packets
|
2011-07-09 23:52:13 +08:00
|
|
|
* are sent multiple times to increase probability for being received.
|
2010-12-13 19:19:28 +08:00
|
|
|
*
|
|
|
|
* This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
|
|
|
|
* errors.
|
|
|
|
*
|
|
|
|
* The skb is not consumed, so the caller should make sure that the
|
2012-05-12 08:09:43 +08:00
|
|
|
* skb is freed.
|
|
|
|
*/
|
2012-06-06 04:31:31 +08:00
|
|
|
int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
|
2012-05-12 08:09:37 +08:00
|
|
|
const struct sk_buff *skb,
|
|
|
|
unsigned long delay)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_hard_iface *primary_if = NULL;
|
|
|
|
struct batadv_forw_packet *forw_packet;
|
2012-06-06 04:31:30 +08:00
|
|
|
struct batadv_bcast_packet *bcast_packet;
|
2011-05-15 05:14:50 +08:00
|
|
|
struct sk_buff *newskb;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-05-17 02:23:22 +08:00
|
|
|
if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
|
2012-06-04 04:19:22 +08:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
|
|
|
"bcast packet queue full\n");
|
2010-12-13 19:19:28 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2012-05-12 19:48:54 +08:00
|
|
|
primary_if = batadv_primary_if_get_selected(bat_priv);
|
2011-04-20 21:40:58 +08:00
|
|
|
if (!primary_if)
|
2011-05-15 02:01:22 +08:00
|
|
|
goto out_and_inc;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2011-05-15 05:14:54 +08:00
|
|
|
forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
if (!forw_packet)
|
|
|
|
goto out_and_inc;
|
|
|
|
|
2011-05-15 05:14:50 +08:00
|
|
|
newskb = skb_copy(skb, GFP_ATOMIC);
|
|
|
|
if (!newskb)
|
2010-12-13 19:19:28 +08:00
|
|
|
goto packet_free;
|
|
|
|
|
|
|
|
/* as we have a copy now, it is safe to decrease the TTL */
|
2012-06-06 04:31:30 +08:00
|
|
|
bcast_packet = (struct batadv_bcast_packet *)newskb->data;
|
2011-11-20 22:47:38 +08:00
|
|
|
bcast_packet->header.ttl--;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2011-05-15 05:14:50 +08:00
|
|
|
skb_reset_mac_header(newskb);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2011-05-15 05:14:50 +08:00
|
|
|
forw_packet->skb = newskb;
|
2011-04-20 21:40:58 +08:00
|
|
|
forw_packet->if_incoming = primary_if;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
/* how often did we send the bcast packet ? */
|
|
|
|
forw_packet->num_packets = 0;
|
|
|
|
|
2012-12-25 20:14:37 +08:00
|
|
|
INIT_DELAYED_WORK(&forw_packet->delayed_work,
|
|
|
|
batadv_send_outstanding_bcast_packet);
|
|
|
|
|
2012-05-17 02:23:14 +08:00
|
|
|
_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
|
2010-12-13 19:19:28 +08:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
packet_free:
|
|
|
|
kfree(forw_packet);
|
|
|
|
out_and_inc:
|
|
|
|
atomic_inc(&bat_priv->bcast_queue_left);
|
|
|
|
out:
|
2011-04-20 21:40:58 +08:00
|
|
|
if (primary_if)
|
2012-05-12 19:48:54 +08:00
|
|
|
batadv_hardif_free_ref(primary_if);
|
2010-12-13 19:19:28 +08:00
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
}
|
|
|
|
|
2012-05-17 02:23:14 +08:00
|
|
|
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_hard_iface *hard_iface;
|
2012-07-08 23:13:15 +08:00
|
|
|
struct delayed_work *delayed_work;
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_forw_packet *forw_packet;
|
2010-12-13 19:19:28 +08:00
|
|
|
struct sk_buff *skb1;
|
2012-06-06 04:31:31 +08:00
|
|
|
struct net_device *soft_iface;
|
|
|
|
struct batadv_priv *bat_priv;
|
|
|
|
|
2012-07-08 23:13:15 +08:00
|
|
|
delayed_work = container_of(work, struct delayed_work, work);
|
2012-06-06 04:31:31 +08:00
|
|
|
forw_packet = container_of(delayed_work, struct batadv_forw_packet,
|
|
|
|
delayed_work);
|
|
|
|
soft_iface = forw_packet->if_incoming->soft_iface;
|
|
|
|
bat_priv = netdev_priv(soft_iface);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
|
|
|
|
hlist_del(&forw_packet->list);
|
|
|
|
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
|
|
|
|
|
2012-06-04 04:19:22 +08:00
|
|
|
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
|
2010-12-13 19:19:28 +08:00
|
|
|
goto out;
|
|
|
|
|
2011-06-26 09:37:18 +08:00
|
|
|
if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
|
|
|
|
goto out;
|
|
|
|
|
2010-12-13 19:19:28 +08:00
|
|
|
/* rebroadcast packet */
|
|
|
|
rcu_read_lock();
|
2012-05-12 08:09:42 +08:00
|
|
|
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
|
2011-02-18 20:33:20 +08:00
|
|
|
if (hard_iface->soft_iface != soft_iface)
|
2010-12-13 19:19:28 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* send a copy of the saved skb */
|
|
|
|
skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
|
|
|
|
if (skb1)
|
2012-05-12 08:09:37 +08:00
|
|
|
batadv_send_skb_packet(skb1, hard_iface,
|
2012-05-12 08:09:42 +08:00
|
|
|
batadv_broadcast_addr);
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
forw_packet->num_packets++;
|
|
|
|
|
|
|
|
/* if we still have some more bcasts to send */
|
|
|
|
if (forw_packet->num_packets < 3) {
|
2012-05-17 02:23:14 +08:00
|
|
|
_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
|
|
|
|
msecs_to_jiffies(5));
|
2010-12-13 19:19:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2012-05-17 02:23:14 +08:00
|
|
|
batadv_forw_packet_free(forw_packet);
|
2010-12-13 19:19:28 +08:00
|
|
|
atomic_inc(&bat_priv->bcast_queue_left);
|
|
|
|
}
|
|
|
|
|
2012-05-12 08:09:37 +08:00
|
|
|
void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-07-08 23:13:15 +08:00
|
|
|
struct delayed_work *delayed_work;
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_forw_packet *forw_packet;
|
|
|
|
struct batadv_priv *bat_priv;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-07-08 23:13:15 +08:00
|
|
|
delayed_work = container_of(work, struct delayed_work, work);
|
2012-06-06 04:31:31 +08:00
|
|
|
forw_packet = container_of(delayed_work, struct batadv_forw_packet,
|
|
|
|
delayed_work);
|
2010-12-13 19:19:28 +08:00
|
|
|
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
|
|
|
|
spin_lock_bh(&bat_priv->forw_bat_list_lock);
|
|
|
|
hlist_del(&forw_packet->list);
|
|
|
|
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
|
|
|
|
|
2012-06-04 04:19:22 +08:00
|
|
|
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
|
2010-12-13 19:19:28 +08:00
|
|
|
goto out;
|
|
|
|
|
2011-11-28 21:31:55 +08:00
|
|
|
bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-05-12 08:09:43 +08:00
|
|
|
/* we have to have at least one packet in the queue
|
2010-12-13 19:19:28 +08:00
|
|
|
* to determine the queues wake up time unless we are
|
|
|
|
* shutting down
|
|
|
|
*/
|
|
|
|
if (forw_packet->own)
|
2012-05-12 08:09:37 +08:00
|
|
|
batadv_schedule_bat_ogm(forw_packet->if_incoming);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
/* don't count own packet */
|
|
|
|
if (!forw_packet->own)
|
|
|
|
atomic_inc(&bat_priv->batman_queue_left);
|
|
|
|
|
2012-05-17 02:23:14 +08:00
|
|
|
batadv_forw_packet_free(forw_packet);
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
void
|
|
|
|
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
|
|
|
|
const struct batadv_hard_iface *hard_iface)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_forw_packet *forw_packet;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
struct hlist_node *safe_tmp_node;
|
2011-05-12 02:59:06 +08:00
|
|
|
bool pending;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2011-02-18 20:33:20 +08:00
|
|
|
if (hard_iface)
|
2012-06-04 04:19:22 +08:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
2012-05-12 19:48:58 +08:00
|
|
|
"purge_outstanding_packets(): %s\n",
|
|
|
|
hard_iface->net_dev->name);
|
2010-12-13 19:19:28 +08:00
|
|
|
else
|
2012-06-04 04:19:22 +08:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
2012-05-12 19:48:58 +08:00
|
|
|
"purge_outstanding_packets()\n");
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
/* free bcast list */
|
|
|
|
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
|
2010-12-13 19:19:28 +08:00
|
|
|
&bat_priv->forw_bcast_list, list) {
|
2012-05-12 08:09:43 +08:00
|
|
|
/* if purge_outstanding_packets() was called with an argument
|
2010-12-13 19:19:28 +08:00
|
|
|
* we delete only packets belonging to the given interface
|
|
|
|
*/
|
2011-02-18 20:33:20 +08:00
|
|
|
if ((hard_iface) &&
|
|
|
|
(forw_packet->if_incoming != hard_iface))
|
2010-12-13 19:19:28 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
|
|
|
|
|
2012-05-17 02:23:14 +08:00
|
|
|
/* batadv_send_outstanding_bcast_packet() will lock the list to
|
2010-12-13 19:19:28 +08:00
|
|
|
* delete the item from the list
|
|
|
|
*/
|
2011-05-12 02:59:06 +08:00
|
|
|
pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
|
2010-12-13 19:19:28 +08:00
|
|
|
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
|
2011-05-12 02:59:06 +08:00
|
|
|
|
|
|
|
if (pending) {
|
|
|
|
hlist_del(&forw_packet->list);
|
2012-05-17 02:23:14 +08:00
|
|
|
batadv_forw_packet_free(forw_packet);
|
2011-05-12 02:59:06 +08:00
|
|
|
}
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
|
|
|
|
|
|
|
|
/* free batman packet list */
|
|
|
|
spin_lock_bh(&bat_priv->forw_bat_list_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
|
2010-12-13 19:19:28 +08:00
|
|
|
&bat_priv->forw_bat_list, list) {
|
2012-05-12 08:09:43 +08:00
|
|
|
/* if purge_outstanding_packets() was called with an argument
|
2010-12-13 19:19:28 +08:00
|
|
|
* we delete only packets belonging to the given interface
|
|
|
|
*/
|
2011-02-18 20:33:20 +08:00
|
|
|
if ((hard_iface) &&
|
|
|
|
(forw_packet->if_incoming != hard_iface))
|
2010-12-13 19:19:28 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
|
|
|
|
|
2012-05-12 08:09:43 +08:00
|
|
|
/* send_outstanding_bat_packet() will lock the list to
|
2010-12-13 19:19:28 +08:00
|
|
|
* delete the item from the list
|
|
|
|
*/
|
2011-05-12 02:59:06 +08:00
|
|
|
pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
|
2010-12-13 19:19:28 +08:00
|
|
|
spin_lock_bh(&bat_priv->forw_bat_list_lock);
|
2011-05-12 02:59:06 +08:00
|
|
|
|
|
|
|
if (pending) {
|
|
|
|
hlist_del(&forw_packet->list);
|
2012-05-17 02:23:14 +08:00
|
|
|
batadv_forw_packet_free(forw_packet);
|
2011-05-12 02:59:06 +08:00
|
|
|
}
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
|
|
|
|
}
|