diff options
Diffstat (limited to 'net/batman-adv')
40 files changed, 12898 insertions, 0 deletions
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig new file mode 100644 index 00000000..2b68d068 --- /dev/null +++ b/net/batman-adv/Kconfig @@ -0,0 +1,26 @@ +# +# B.A.T.M.A.N meshing protocol +# + +config BATMAN_ADV + tristate "B.A.T.M.A.N. Advanced Meshing Protocol" + depends on NET + select CRC16 + default n + ---help--- + + B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is + a routing protocol for multi-hop ad-hoc mesh networks. The + networks may be wired or wireless. See + http://www.open-mesh.org/ for more information and user space + tools. + +config BATMAN_ADV_DEBUG + bool "B.A.T.M.A.N. debugging" + depends on BATMAN_ADV != n + ---help--- + + This is an option for use by developers; most people should + say N here. This enables compilation of support for + outputting debugging information to the kernel log. The + output is controlled via the module parameter debug. diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile new file mode 100644 index 00000000..4e392ebe --- /dev/null +++ b/net/batman-adv/Makefile @@ -0,0 +1,39 @@ +# +# Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +# +# Marek Lindner, Simon Wunderlich +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of version 2 of the GNU General Public +# License as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA +# + +obj-$(CONFIG_BATMAN_ADV) += batman-adv.o +batman-adv-y += bat_debugfs.o +batman-adv-y += bat_iv_ogm.o +batman-adv-y += bat_sysfs.o +batman-adv-y += bitarray.o +batman-adv-y += gateway_client.o +batman-adv-y += gateway_common.o +batman-adv-y += hard-interface.o +batman-adv-y += hash.o +batman-adv-y += icmp_socket.o +batman-adv-y += main.o +batman-adv-y += originator.o +batman-adv-y += ring_buffer.o +batman-adv-y += routing.o +batman-adv-y += send.o +batman-adv-y += soft-interface.o +batman-adv-y += translation-table.o +batman-adv-y += unicast.o +batman-adv-y += vis.o diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h new file mode 100644 index 00000000..9852a688 --- /dev/null +++ b/net/batman-adv/bat_algo.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_ +#define _NET_BATMAN_ADV_BAT_ALGO_H_ + +int bat_iv_init(void); + +#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */ diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c new file mode 100644 index 00000000..c3b0548b --- /dev/null +++ b/net/batman-adv/bat_debugfs.c @@ -0,0 +1,381 @@ +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" + +#include <linux/debugfs.h> + +#include "bat_debugfs.h" +#include "translation-table.h" +#include "originator.h" +#include "hard-interface.h" +#include "gateway_common.h" +#include "gateway_client.h" +#include "soft-interface.h" +#include "vis.h" +#include "icmp_socket.h" + +static struct dentry *bat_debugfs; + +#ifdef CONFIG_BATMAN_ADV_DEBUG +#define LOG_BUFF_MASK (log_buff_len-1) +#define LOG_BUFF(idx) (debug_log->log_buff[(idx) & LOG_BUFF_MASK]) + +static int log_buff_len = LOG_BUF_LEN; + +static void emit_log_char(struct debug_log *debug_log, char c) +{ + LOG_BUFF(debug_log->log_end) = c; + debug_log->log_end++; + + if (debug_log->log_end - debug_log->log_start > log_buff_len) + debug_log->log_start = debug_log->log_end - log_buff_len; +} + +__printf(2, 3) +static int fdebug_log(struct debug_log *debug_log, const char *fmt, ...) +{ + va_list args; + static char debug_log_buf[256]; + char *p; + + if (!debug_log) + return 0; + + spin_lock_bh(&debug_log->lock); + va_start(args, fmt); + vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args); + va_end(args); + + for (p = debug_log_buf; *p != 0; p++) + emit_log_char(debug_log, *p); + + spin_unlock_bh(&debug_log->lock); + + wake_up(&debug_log->queue_wait); + + return 0; +} + +int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) +{ + va_list args; + char tmp_log_buf[256]; + + va_start(args, fmt); + vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args); + fdebug_log(bat_priv->debug_log, "[%10lu] %s", + (jiffies / HZ), tmp_log_buf); + va_end(args); + + return 0; +} + +static int log_open(struct inode *inode, struct file *file) +{ + nonseekable_open(inode, file); + file->private_data = inode->i_private; + inc_module_count(); + return 0; +} + +static int log_release(struct inode *inode, struct file *file) +{ + dec_module_count(); + return 0; +} + +static ssize_t log_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct bat_priv *bat_priv = file->private_data; + struct debug_log *debug_log = bat_priv->debug_log; + int error, i = 0; + char c; + + if ((file->f_flags & O_NONBLOCK) && + !(debug_log->log_end - debug_log->log_start)) + return -EAGAIN; + + if (!buf) + return -EINVAL; + + if (count == 0) + return 0; + + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + + error = wait_event_interruptible(debug_log->queue_wait, + (debug_log->log_start - debug_log->log_end)); + + if (error) + return error; + + spin_lock_bh(&debug_log->lock); + + while ((!error) && (i < count) && + (debug_log->log_start != debug_log->log_end)) { + c = LOG_BUFF(debug_log->log_start); + + debug_log->log_start++; + + spin_unlock_bh(&debug_log->lock); + + error = __put_user(c, buf); + + spin_lock_bh(&debug_log->lock); + + buf++; + i++; + + } + + spin_unlock_bh(&debug_log->lock); + + if (!error) + return i; + + return error; +} + +static unsigned int log_poll(struct file *file, poll_table *wait) +{ + struct bat_priv *bat_priv = file->private_data; + struct debug_log *debug_log = bat_priv->debug_log; + + poll_wait(file, &debug_log->queue_wait, wait); + + if (debug_log->log_end - debug_log->log_start) + return POLLIN | POLLRDNORM; + + return 0; +} + +static const struct file_operations log_fops = { + .open = log_open, + .release = log_release, + .read = log_read, + .poll = log_poll, + .llseek = no_llseek, +}; + +static int debug_log_setup(struct bat_priv *bat_priv) +{ + struct dentry *d; + + if (!bat_priv->debug_dir) + goto err; + + bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC); + if (!bat_priv->debug_log) + goto err; + + spin_lock_init(&bat_priv->debug_log->lock); + init_waitqueue_head(&bat_priv->debug_log->queue_wait); + + d = debugfs_create_file("log", S_IFREG | S_IRUSR, + bat_priv->debug_dir, bat_priv, &log_fops); + if (d) + goto err; + + return 0; + +err: + return 1; +} + +static void debug_log_cleanup(struct bat_priv *bat_priv) +{ + kfree(bat_priv->debug_log); + bat_priv->debug_log = NULL; +} +#else /* CONFIG_BATMAN_ADV_DEBUG */ +static int debug_log_setup(struct bat_priv *bat_priv) +{ + bat_priv->debug_log = NULL; + return 0; +} + +static void debug_log_cleanup(struct bat_priv *bat_priv) +{ + return; +} +#endif + +static int bat_algorithms_open(struct inode *inode, struct file *file) +{ + return single_open(file, bat_algo_seq_print_text, NULL); +} + +static int originators_open(struct inode *inode, struct file *file) +{ + struct net_device *net_dev = (struct net_device *)inode->i_private; + return single_open(file, orig_seq_print_text, net_dev); +} + +static int gateways_open(struct inode *inode, struct file *file) +{ + struct net_device *net_dev = (struct net_device *)inode->i_private; + return single_open(file, gw_client_seq_print_text, net_dev); +} + +static int softif_neigh_open(struct inode *inode, struct file *file) +{ + struct net_device *net_dev = (struct net_device *)inode->i_private; + return single_open(file, softif_neigh_seq_print_text, net_dev); +} + +static int transtable_global_open(struct inode *inode, struct file *file) +{ + struct net_device *net_dev = (struct net_device *)inode->i_private; + return single_open(file, tt_global_seq_print_text, net_dev); +} + +static int transtable_local_open(struct inode *inode, struct file *file) +{ + struct net_device *net_dev = (struct net_device *)inode->i_private; + return single_open(file, tt_local_seq_print_text, net_dev); +} + +static int vis_data_open(struct inode *inode, struct file *file) +{ + struct net_device *net_dev = (struct net_device *)inode->i_private; + return single_open(file, vis_seq_print_text, net_dev); +} + +struct bat_debuginfo { + struct attribute attr; + const struct file_operations fops; +}; + +#define BAT_DEBUGINFO(_name, _mode, _open) \ +struct bat_debuginfo bat_debuginfo_##_name = { \ + .attr = { .name = __stringify(_name), \ + .mode = _mode, }, \ + .fops = { .owner = THIS_MODULE, \ + .open = _open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } \ +}; + +static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open); +static BAT_DEBUGINFO(originators, S_IRUGO, originators_open); +static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); +static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open); +static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open); +static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open); +static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open); + +static struct bat_debuginfo *mesh_debuginfos[] = { + &bat_debuginfo_originators, + &bat_debuginfo_gateways, + &bat_debuginfo_softif_neigh, + &bat_debuginfo_transtable_global, + &bat_debuginfo_transtable_local, + &bat_debuginfo_vis_data, + NULL, +}; + +void debugfs_init(void) +{ + struct bat_debuginfo *bat_debug; + struct dentry *file; + + bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL); + if (bat_debugfs == ERR_PTR(-ENODEV)) + bat_debugfs = NULL; + + if (!bat_debugfs) + goto out; + + bat_debug = &bat_debuginfo_routing_algos; + file = debugfs_create_file(bat_debug->attr.name, + S_IFREG | bat_debug->attr.mode, + bat_debugfs, NULL, &bat_debug->fops); + if (!file) + pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name); + +out: + return; +} + +void debugfs_destroy(void) +{ + if (bat_debugfs) { + debugfs_remove_recursive(bat_debugfs); + bat_debugfs = NULL; + } +} + +int debugfs_add_meshif(struct net_device *dev) +{ + struct bat_priv *bat_priv = netdev_priv(dev); + struct bat_debuginfo **bat_debug; + struct dentry *file; + + if (!bat_debugfs) + goto out; + + bat_priv->debug_dir = debugfs_create_dir(dev->name, bat_debugfs); + if (!bat_priv->debug_dir) + goto out; + + bat_socket_setup(bat_priv); + debug_log_setup(bat_priv); + + for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) { + file = debugfs_create_file(((*bat_debug)->attr).name, + S_IFREG | ((*bat_debug)->attr).mode, + bat_priv->debug_dir, + dev, &(*bat_debug)->fops); + if (!file) { + bat_err(dev, "Can't add debugfs file: %s/%s\n", + dev->name, ((*bat_debug)->attr).name); + goto rem_attr; + } + } + + return 0; +rem_attr: + debugfs_remove_recursive(bat_priv->debug_dir); + bat_priv->debug_dir = NULL; +out: +#ifdef CONFIG_DEBUG_FS + return -ENOMEM; +#else + return 0; +#endif /* CONFIG_DEBUG_FS */ +} + +void debugfs_del_meshif(struct net_device *dev) +{ + struct bat_priv *bat_priv = netdev_priv(dev); + + debug_log_cleanup(bat_priv); + + if (bat_debugfs) { + debugfs_remove_recursive(bat_priv->debug_dir); + bat_priv->debug_dir = NULL; + } +} diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/bat_debugfs.h new file mode 100644 index 00000000..d605c674 --- /dev/null +++ b/net/batman-adv/bat_debugfs.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + + +#ifndef _NET_BATMAN_ADV_DEBUGFS_H_ +#define _NET_BATMAN_ADV_DEBUGFS_H_ + +#define DEBUGFS_BAT_SUBDIR "batman_adv" + +void debugfs_init(void); +void debugfs_destroy(void); +int debugfs_add_meshif(struct net_device *dev); +void debugfs_del_meshif(struct net_device *dev); + +#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */ diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c new file mode 100644 index 00000000..a6d5d63f --- /dev/null +++ b/net/batman-adv/bat_iv_ogm.c @@ -0,0 +1,1182 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "translation-table.h" +#include "ring_buffer.h" +#include "originator.h" +#include "routing.h" +#include "gateway_common.h" +#include "gateway_client.h" +#include "hard-interface.h" +#include "send.h" +#include "bat_algo.h" + +static void bat_iv_ogm_init(struct hard_iface *hard_iface) +{ + struct batman_ogm_packet *batman_ogm_packet; + + hard_iface->packet_len = BATMAN_OGM_LEN; + hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); + + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; + batman_ogm_packet->header.packet_type = BAT_OGM; + batman_ogm_packet->header.version = COMPAT_VERSION; + batman_ogm_packet->header.ttl = 2; + batman_ogm_packet->flags = NO_FLAGS; + batman_ogm_packet->tq = TQ_MAX_VALUE; + batman_ogm_packet->tt_num_changes = 0; + batman_ogm_packet->ttvn = 0; +} + +static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface) +{ + struct batman_ogm_packet *batman_ogm_packet; + + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; + batman_ogm_packet->flags = PRIMARIES_FIRST_HOP; + batman_ogm_packet->header.ttl = TTL; +} + +static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface) +{ + struct batman_ogm_packet *batman_ogm_packet; + + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; + memcpy(batman_ogm_packet->orig, + hard_iface->net_dev->dev_addr, ETH_ALEN); + memcpy(batman_ogm_packet->prev_sender, + hard_iface->net_dev->dev_addr, ETH_ALEN); +} + +/* when do we schedule our own ogm to be sent */ +static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv) +{ + return jiffies + msecs_to_jiffies( + atomic_read(&bat_priv->orig_interval) - + JITTER + (random32() % 2*JITTER)); +} + +/* when do we schedule a ogm packet to be sent */ +static unsigned long bat_iv_ogm_fwd_send_time(void) +{ + return jiffies + msecs_to_jiffies(random32() % (JITTER/2)); +} + +/* apply hop penalty for a normal link */ +static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv) +{ + int hop_penalty = atomic_read(&bat_priv->hop_penalty); + return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE); +} + +/* is there another aggregated packet here? */ +static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len, + int tt_num_changes) +{ + int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes); + + return (next_buff_pos <= packet_len) && + (next_buff_pos <= MAX_AGGREGATION_BYTES); +} + +/* send a batman ogm to a given interface */ +static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, + struct hard_iface *hard_iface) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + char *fwd_str; + uint8_t packet_num; + int16_t buff_pos; + struct batman_ogm_packet *batman_ogm_packet; + struct sk_buff *skb; + + if (hard_iface->if_status != IF_ACTIVE) + return; + + packet_num = 0; + buff_pos = 0; + batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; + + /* adjust all flags and log packets */ + while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, + batman_ogm_packet->tt_num_changes)) { + + /* we might have aggregated direct link packets with an + * ordinary base packet */ + if ((forw_packet->direct_link_flags & (1 << packet_num)) && + (forw_packet->if_incoming == hard_iface)) + batman_ogm_packet->flags |= DIRECTLINK; + else + batman_ogm_packet->flags &= ~DIRECTLINK; + + fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ? + "Sending own" : + "Forwarding")); + bat_dbg(DBG_BATMAN, bat_priv, + "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", + fwd_str, (packet_num > 0 ? "aggregated " : ""), + batman_ogm_packet->orig, + ntohl(batman_ogm_packet->seqno), + batman_ogm_packet->tq, batman_ogm_packet->header.ttl, + (batman_ogm_packet->flags & DIRECTLINK ? + "on" : "off"), + batman_ogm_packet->ttvn, hard_iface->net_dev->name, + hard_iface->net_dev->dev_addr); + + buff_pos += BATMAN_OGM_LEN + + tt_len(batman_ogm_packet->tt_num_changes); + packet_num++; + batman_ogm_packet = (struct batman_ogm_packet *) + (forw_packet->skb->data + buff_pos); + } + + /* create clone because function is called more than once */ + skb = skb_clone(forw_packet->skb, GFP_ATOMIC); + if (skb) + send_skb_packet(skb, hard_iface, broadcast_addr); +} + +/* send a batman ogm packet */ +static void bat_iv_ogm_emit(struct forw_packet *forw_packet) +{ + struct hard_iface *hard_iface; + struct net_device *soft_iface; + struct bat_priv *bat_priv; + struct hard_iface *primary_if = NULL; + struct batman_ogm_packet *batman_ogm_packet; + unsigned char directlink; + + batman_ogm_packet = (struct batman_ogm_packet *) + (forw_packet->skb->data); + directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); + + if (!forw_packet->if_incoming) { + pr_err("Error - can't forward packet: incoming iface not specified\n"); + goto out; + } + + soft_iface = forw_packet->if_incoming->soft_iface; + bat_priv = netdev_priv(soft_iface); + + if (forw_packet->if_incoming->if_status != IF_ACTIVE) + goto out; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + /* multihomed peer assumed */ + /* non-primary OGMs are only broadcasted on their interface */ + if ((directlink && (batman_ogm_packet->header.ttl == 1)) || + (forw_packet->own && (forw_packet->if_incoming != primary_if))) { + + /* FIXME: what about aggregated packets ? */ + bat_dbg(DBG_BATMAN, bat_priv, + "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n", + (forw_packet->own ? "Sending own" : "Forwarding"), + batman_ogm_packet->orig, + ntohl(batman_ogm_packet->seqno), + batman_ogm_packet->header.ttl, + forw_packet->if_incoming->net_dev->name, + forw_packet->if_incoming->net_dev->dev_addr); + + /* skb is only used once and than forw_packet is free'd */ + send_skb_packet(forw_packet->skb, forw_packet->if_incoming, + broadcast_addr); + forw_packet->skb = NULL; + + goto out; + } + + /* broadcast on every interface */ + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if (hard_iface->soft_iface != soft_iface) + continue; + + bat_iv_ogm_send_to_if(forw_packet, hard_iface); + } + rcu_read_unlock(); + +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +/* return true if new_packet can be aggregated with forw_packet */ +static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet + *new_batman_ogm_packet, + struct bat_priv *bat_priv, + int packet_len, unsigned long send_time, + bool directlink, + const struct hard_iface *if_incoming, + const struct forw_packet *forw_packet) +{ + struct batman_ogm_packet *batman_ogm_packet; + int aggregated_bytes = forw_packet->packet_len + packet_len; + struct hard_iface *primary_if = NULL; + bool res = false; + + batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; + + /** + * we can aggregate the current packet to this aggregated packet + * if: + * + * - the send time is within our MAX_AGGREGATION_MS time + * - the resulting packet wont be bigger than + * MAX_AGGREGATION_BYTES + */ + + if (time_before(send_time, forw_packet->send_time) && + time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS), + forw_packet->send_time) && + (aggregated_bytes <= MAX_AGGREGATION_BYTES)) { + + /** + * check aggregation compatibility + * -> direct link packets are broadcasted on + * their interface only + * -> aggregate packet if the current packet is + * a "global" packet as well as the base + * packet + */ + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + /* packets without direct link flag and high TTL + * are flooded through the net */ + if ((!directlink) && + (!(batman_ogm_packet->flags & DIRECTLINK)) && + (batman_ogm_packet->header.ttl != 1) && + + /* own packets originating non-primary + * interfaces leave only that interface */ + ((!forw_packet->own) || + (forw_packet->if_incoming == primary_if))) { + res = true; + goto out; + } + + /* if the incoming packet is sent via this one + * interface only - we still can aggregate */ + if ((directlink) && + (new_batman_ogm_packet->header.ttl == 1) && + (forw_packet->if_incoming == if_incoming) && + + /* packets from direct neighbors or + * own secondary interface packets + * (= secondary interface packets in general) */ + (batman_ogm_packet->flags & DIRECTLINK || + (forw_packet->own && + forw_packet->if_incoming != primary_if))) { + res = true; + goto out; + } + } + +out: + if (primary_if) + hardif_free_ref(primary_if); + return res; +} + +/* create a new aggregated packet and add this packet to it */ +static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, + int packet_len, unsigned long send_time, + bool direct_link, + struct hard_iface *if_incoming, + int own_packet) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct forw_packet *forw_packet_aggr; + unsigned char *skb_buff; + + if (!atomic_inc_not_zero(&if_incoming->refcount)) + return; + + /* own packet should always be scheduled */ + if (!own_packet) { + if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) { + bat_dbg(DBG_BATMAN, bat_priv, + "batman packet queue full\n"); + goto out; + } + } + + forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC); + if (!forw_packet_aggr) { + if (!own_packet) + atomic_inc(&bat_priv->batman_queue_left); + goto out; + } + + if ((atomic_read(&bat_priv->aggregated_ogms)) && + (packet_len < MAX_AGGREGATION_BYTES)) + forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + + sizeof(struct ethhdr)); + else + forw_packet_aggr->skb = dev_alloc_skb(packet_len + + sizeof(struct ethhdr)); + + if (!forw_packet_aggr->skb) { + if (!own_packet) + atomic_inc(&bat_priv->batman_queue_left); + kfree(forw_packet_aggr); + goto out; + } + skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); + + INIT_HLIST_NODE(&forw_packet_aggr->list); + + skb_buff = skb_put(forw_packet_aggr->skb, packet_len); + forw_packet_aggr->packet_len = packet_len; + memcpy(skb_buff, packet_buff, packet_len); + + forw_packet_aggr->own = own_packet; + forw_packet_aggr->if_incoming = if_incoming; + forw_packet_aggr->num_packets = 0; + forw_packet_aggr->direct_link_flags = NO_FLAGS; + forw_packet_aggr->send_time = send_time; + + /* save packet direct link flag status */ + if (direct_link) + forw_packet_aggr->direct_link_flags |= 1; + + /* add new packet to packet list */ + spin_lock_bh(&bat_priv->forw_bat_list_lock); + hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list); + spin_unlock_bh(&bat_priv->forw_bat_list_lock); + + /* start timer for this packet */ + INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, + send_outstanding_bat_ogm_packet); + queue_delayed_work(bat_event_workqueue, + &forw_packet_aggr->delayed_work, + send_time - jiffies); + + return; +out: + hardif_free_ref(if_incoming); +} + +/* aggregate a new packet into the existing ogm packet */ +static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr, + const unsigned char *packet_buff, + int packet_len, bool direct_link) +{ + unsigned char *skb_buff; + + skb_buff = skb_put(forw_packet_aggr->skb, packet_len); + memcpy(skb_buff, packet_buff, packet_len); + forw_packet_aggr->packet_len += packet_len; + forw_packet_aggr->num_packets++; + + /* save packet direct link flag status */ + if (direct_link) + forw_packet_aggr->direct_link_flags |= + (1 << forw_packet_aggr->num_packets); +} + +static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv, + unsigned char *packet_buff, + int packet_len, struct hard_iface *if_incoming, + int own_packet, unsigned long send_time) +{ + /** + * _aggr -> pointer to the packet we want to aggregate with + * _pos -> pointer to the position in the queue + */ + struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL; + struct hlist_node *tmp_node; + struct batman_ogm_packet *batman_ogm_packet; + bool direct_link; + + batman_ogm_packet = (struct batman_ogm_packet *)packet_buff; + direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0; + + /* find position for the packet in the forward queue */ + spin_lock_bh(&bat_priv->forw_bat_list_lock); + /* own packets are not to be aggregated */ + if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { + hlist_for_each_entry(forw_packet_pos, tmp_node, + &bat_priv->forw_bat_list, list) { + if (bat_iv_ogm_can_aggregate(batman_ogm_packet, + bat_priv, packet_len, + send_time, direct_link, + if_incoming, + forw_packet_pos)) { + forw_packet_aggr = forw_packet_pos; + break; + } + } + } + + /* nothing to aggregate with - either aggregation disabled or no + * suitable aggregation packet found */ + if (!forw_packet_aggr) { + /* the following section can run without the lock */ + spin_unlock_bh(&bat_priv->forw_bat_list_lock); + + /** + * if we could not aggregate this packet with one of the others + * we hold it back for a while, so that it might be aggregated + * later on + */ + if ((!own_packet) && + (atomic_read(&bat_priv->aggregated_ogms))) + send_time += msecs_to_jiffies(MAX_AGGREGATION_MS); + + bat_iv_ogm_aggregate_new(packet_buff, packet_len, + send_time, direct_link, + if_incoming, own_packet); + } else { + bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff, + packet_len, direct_link); + spin_unlock_bh(&bat_priv->forw_bat_list_lock); + } +} + +static void bat_iv_ogm_forward(struct orig_node *orig_node, + const struct ethhdr *ethhdr, + struct batman_ogm_packet *batman_ogm_packet, + int directlink, struct hard_iface *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct neigh_node *router; + uint8_t in_tq, in_ttl, tq_avg = 0; + uint8_t tt_num_changes; + + if (batman_ogm_packet->header.ttl <= 1) { + bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); + return; + } + + router = orig_node_get_router(orig_node); + + in_tq = batman_ogm_packet->tq; + in_ttl = batman_ogm_packet->header.ttl; + tt_num_changes = batman_ogm_packet->tt_num_changes; + + batman_ogm_packet->header.ttl--; + memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); + + /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast + * of our best tq value */ + if (router && router->tq_avg != 0) { + + /* rebroadcast ogm of best ranking neighbor as is */ + if (!compare_eth(router->addr, ethhdr->h_source)) { + batman_ogm_packet->tq = router->tq_avg; + + if (router->last_ttl) + batman_ogm_packet->header.ttl = + router->last_ttl - 1; + } + + tq_avg = router->tq_avg; + } + + if (router) + neigh_node_free_ref(router); + + /* apply hop penalty */ + batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv); + + bat_dbg(DBG_BATMAN, bat_priv, + "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n", + in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1, + batman_ogm_packet->header.ttl); + + batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno); + batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc); + + /* switch of primaries first hop flag when forwarding */ + batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP; + if (directlink) + batman_ogm_packet->flags |= DIRECTLINK; + else + batman_ogm_packet->flags &= ~DIRECTLINK; + + bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, + BATMAN_OGM_LEN + tt_len(tt_num_changes), + if_incoming, 0, bat_iv_ogm_fwd_send_time()); +} + +static void bat_iv_ogm_schedule(struct hard_iface *hard_iface, + int tt_num_changes) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batman_ogm_packet *batman_ogm_packet; + struct hard_iface *primary_if; + int vis_server; + + vis_server = atomic_read(&bat_priv->vis_mode); + primary_if = primary_if_get_selected(bat_priv); + + batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; + + /* change sequence number to network order */ + batman_ogm_packet->seqno = + htonl((uint32_t)atomic_read(&hard_iface->seqno)); + + batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn); + batman_ogm_packet->tt_crc = htons((uint16_t) + atomic_read(&bat_priv->tt_crc)); + if (tt_num_changes >= 0) + batman_ogm_packet->tt_num_changes = tt_num_changes; + + if (vis_server == VIS_TYPE_SERVER_SYNC) + batman_ogm_packet->flags |= VIS_SERVER; + else + batman_ogm_packet->flags &= ~VIS_SERVER; + + if ((hard_iface == primary_if) && + (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) + batman_ogm_packet->gw_flags = + (uint8_t)atomic_read(&bat_priv->gw_bandwidth); + else + batman_ogm_packet->gw_flags = NO_FLAGS; + + atomic_inc(&hard_iface->seqno); + + slide_own_bcast_window(hard_iface); + bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, + hard_iface->packet_len, hard_iface, 1, + bat_iv_ogm_emit_send_time(bat_priv)); + + if (primary_if) + hardif_free_ref(primary_if); +} + +static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const struct ethhdr *ethhdr, + const struct batman_ogm_packet + *batman_ogm_packet, + struct hard_iface *if_incoming, + const unsigned char *tt_buff, + int is_duplicate) +{ + struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; + struct neigh_node *router = NULL; + struct orig_node *orig_node_tmp; + struct hlist_node *node; + uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; + + bat_dbg(DBG_BATMAN, bat_priv, + "update_originator(): Searching and updating originator entry of received packet\n"); + + rcu_read_lock(); + hlist_for_each_entry_rcu(tmp_neigh_node, node, + &orig_node->neigh_list, list) { + if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && + (tmp_neigh_node->if_incoming == if_incoming) && + atomic_inc_not_zero(&tmp_neigh_node->refcount)) { + if (neigh_node) + neigh_node_free_ref(neigh_node); + neigh_node = tmp_neigh_node; + continue; + } + + if (is_duplicate) + continue; + + spin_lock_bh(&tmp_neigh_node->tq_lock); + ring_buffer_set(tmp_neigh_node->tq_recv, + &tmp_neigh_node->tq_index, 0); + tmp_neigh_node->tq_avg = + ring_buffer_avg(tmp_neigh_node->tq_recv); + spin_unlock_bh(&tmp_neigh_node->tq_lock); + } + + if (!neigh_node) { + struct orig_node *orig_tmp; + + orig_tmp = get_orig_node(bat_priv, ethhdr->h_source); + if (!orig_tmp) + goto unlock; + + neigh_node = create_neighbor(orig_node, orig_tmp, + ethhdr->h_source, if_incoming); + + orig_node_free_ref(orig_tmp); + if (!neigh_node) + goto unlock; + } else + bat_dbg(DBG_BATMAN, bat_priv, + "Updating existing last-hop neighbor of originator\n"); + + rcu_read_unlock(); + + orig_node->flags = batman_ogm_packet->flags; + neigh_node->last_valid = jiffies; + + spin_lock_bh(&neigh_node->tq_lock); + ring_buffer_set(neigh_node->tq_recv, + &neigh_node->tq_index, + batman_ogm_packet->tq); + neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); + spin_unlock_bh(&neigh_node->tq_lock); + + if (!is_duplicate) { + orig_node->last_ttl = batman_ogm_packet->header.ttl; + neigh_node->last_ttl = batman_ogm_packet->header.ttl; + } + + bonding_candidate_add(orig_node, neigh_node); + + /* if this neighbor already is our next hop there is nothing + * to change */ + router = orig_node_get_router(orig_node); + if (router == neigh_node) + goto update_tt; + + /* if this neighbor does not offer a better TQ we won't consider it */ + if (router && (router->tq_avg > neigh_node->tq_avg)) + goto update_tt; + + /* if the TQ is the same and the link not more symmetric we + * won't consider it either */ + if (router && (neigh_node->tq_avg == router->tq_avg)) { + orig_node_tmp = router->orig_node; + spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); + bcast_own_sum_orig = + orig_node_tmp->bcast_own_sum[if_incoming->if_num]; + spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); + + orig_node_tmp = neigh_node->orig_node; + spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); + bcast_own_sum_neigh = + orig_node_tmp->bcast_own_sum[if_incoming->if_num]; + spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); + + if (bcast_own_sum_orig >= bcast_own_sum_neigh) + goto update_tt; + } + + update_route(bat_priv, orig_node, neigh_node); + +update_tt: + /* I have to check for transtable changes only if the OGM has been + * sent through a primary interface */ + if (((batman_ogm_packet->orig != ethhdr->h_source) && + (batman_ogm_packet->header.ttl > 2)) || + (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) + tt_update_orig(bat_priv, orig_node, tt_buff, + batman_ogm_packet->tt_num_changes, + batman_ogm_packet->ttvn, + batman_ogm_packet->tt_crc); + + if (orig_node->gw_flags != batman_ogm_packet->gw_flags) + gw_node_update(bat_priv, orig_node, + batman_ogm_packet->gw_flags); + + orig_node->gw_flags = batman_ogm_packet->gw_flags; + + /* restart gateway selection if fast or late switching was enabled */ + if ((orig_node->gw_flags) && + (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) && + (atomic_read(&bat_priv->gw_sel_class) > 2)) + gw_check_election(bat_priv, orig_node); + + goto out; + +unlock: + rcu_read_unlock(); +out: + if (neigh_node) + neigh_node_free_ref(neigh_node); + if (router) + neigh_node_free_ref(router); +} + +static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + struct batman_ogm_packet *batman_ogm_packet, + struct hard_iface *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct neigh_node *neigh_node = NULL, *tmp_neigh_node; + struct hlist_node *node; + uint8_t total_count; + uint8_t orig_eq_count, neigh_rq_count, tq_own; + int tq_asym_penalty, ret = 0; + + /* find corresponding one hop neighbor */ + rcu_read_lock(); + hlist_for_each_entry_rcu(tmp_neigh_node, node, + &orig_neigh_node->neigh_list, list) { + + if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig)) + continue; + + if (tmp_neigh_node->if_incoming != if_incoming) + continue; + + if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) + continue; + + neigh_node = tmp_neigh_node; + break; + } + rcu_read_unlock(); + + if (!neigh_node) + neigh_node = create_neighbor(orig_neigh_node, + orig_neigh_node, + orig_neigh_node->orig, + if_incoming); + + if (!neigh_node) + goto out; + + /* if orig_node is direct neighbor update neigh_node last_valid */ + if (orig_node == orig_neigh_node) + neigh_node->last_valid = jiffies; + + orig_node->last_valid = jiffies; + + /* find packet count of corresponding one hop neighbor */ + spin_lock_bh(&orig_node->ogm_cnt_lock); + orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num]; + neigh_rq_count = neigh_node->real_packet_count; + spin_unlock_bh(&orig_node->ogm_cnt_lock); + + /* pay attention to not get a value bigger than 100 % */ + total_count = (orig_eq_count > neigh_rq_count ? + neigh_rq_count : orig_eq_count); + + /* if we have too few packets (too less data) we set tq_own to zero */ + /* if we receive too few packets it is not considered bidirectional */ + if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || + (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) + tq_own = 0; + else + /* neigh_node->real_packet_count is never zero as we + * only purge old information when getting new + * information */ + tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count; + + /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does + * affect the nearly-symmetric links only a little, but + * punishes asymmetric links more. This will give a value + * between 0 and TQ_MAX_VALUE + */ + tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE * + (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * + (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * + (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) / + (TQ_LOCAL_WINDOW_SIZE * + TQ_LOCAL_WINDOW_SIZE * + TQ_LOCAL_WINDOW_SIZE); + + batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own + * tq_asym_penalty) / + (TQ_MAX_VALUE * TQ_MAX_VALUE)); + + bat_dbg(DBG_BATMAN, bat_priv, + "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n", + orig_node->orig, orig_neigh_node->orig, total_count, + neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq); + + /* if link has the minimum required transmission quality + * consider it bidirectional */ + if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) + ret = 1; + +out: + if (neigh_node) + neigh_node_free_ref(neigh_node); + return ret; +} + +/* processes a batman packet for all interfaces, adjusts the sequence number and + * finds out whether it is a duplicate. + * returns: + * 1 the packet is a duplicate + * 0 the packet has not yet been received + * -1 the packet is old and has been received while the seqno window + * was protected. Caller should drop it. + */ +static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, + const struct batman_ogm_packet + *batman_ogm_packet, + const struct hard_iface *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct orig_node *orig_node; + struct neigh_node *tmp_neigh_node; + struct hlist_node *node; + int is_duplicate = 0; + int32_t seq_diff; + int need_update = 0; + int set_mark, ret = -1; + + orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); + if (!orig_node) + return 0; + + spin_lock_bh(&orig_node->ogm_cnt_lock); + seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno; + + /* signalize caller that the packet is to be dropped. */ + if (window_protected(bat_priv, seq_diff, + &orig_node->batman_seqno_reset)) + goto out; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tmp_neigh_node, node, + &orig_node->neigh_list, list) { + + is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, + orig_node->last_real_seqno, + batman_ogm_packet->seqno); + + if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && + (tmp_neigh_node->if_incoming == if_incoming)) + set_mark = 1; + else + set_mark = 0; + + /* if the window moved, set the update flag. */ + need_update |= bit_get_packet(bat_priv, + tmp_neigh_node->real_bits, + seq_diff, set_mark); + + tmp_neigh_node->real_packet_count = + bit_packet_count(tmp_neigh_node->real_bits); + } + rcu_read_unlock(); + + if (need_update) { + bat_dbg(DBG_BATMAN, bat_priv, + "updating last_seqno: old %d, new %d\n", + orig_node->last_real_seqno, batman_ogm_packet->seqno); + orig_node->last_real_seqno = batman_ogm_packet->seqno; + } + + ret = is_duplicate; + +out: + spin_unlock_bh(&orig_node->ogm_cnt_lock); + orig_node_free_ref(orig_node); + return ret; +} + +static void bat_iv_ogm_process(const struct ethhdr *ethhdr, + struct batman_ogm_packet *batman_ogm_packet, + const unsigned char *tt_buff, + struct hard_iface *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct hard_iface *hard_iface; + struct orig_node *orig_neigh_node, *orig_node; + struct neigh_node *router = NULL, *router_router = NULL; + struct neigh_node *orig_neigh_router = NULL; + int has_directlink_flag; + int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; + int is_broadcast = 0, is_bidirectional, is_single_hop_neigh; + int is_duplicate; + uint32_t if_incoming_seqno; + + /* Silently drop when the batman packet is actually not a + * correct packet. + * + * This might happen if a packet is padded (e.g. Ethernet has a + * minimum frame length of 64 byte) and the aggregation interprets + * it as an additional length. + * + * TODO: A more sane solution would be to have a bit in the + * batman_ogm_packet to detect whether the packet is the last + * packet in an aggregation. Here we expect that the padding + * is always zero (or not 0x01) + */ + if (batman_ogm_packet->header.packet_type != BAT_OGM) + return; + + /* could be changed by schedule_own_packet() */ + if_incoming_seqno = atomic_read(&if_incoming->seqno); + + has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0); + + is_single_hop_neigh = (compare_eth(ethhdr->h_source, + batman_ogm_packet->orig) ? 1 : 0); + + bat_dbg(DBG_BATMAN, bat_priv, + "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", + ethhdr->h_source, if_incoming->net_dev->name, + if_incoming->net_dev->dev_addr, batman_ogm_packet->orig, + batman_ogm_packet->prev_sender, batman_ogm_packet->seqno, + batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc, + batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq, + batman_ogm_packet->header.ttl, + batman_ogm_packet->header.version, has_directlink_flag); + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if (hard_iface->if_status != IF_ACTIVE) + continue; + + if (hard_iface->soft_iface != if_incoming->soft_iface) + continue; + + if (compare_eth(ethhdr->h_source, + hard_iface->net_dev->dev_addr)) + is_my_addr = 1; + + if (compare_eth(batman_ogm_packet->orig, + hard_iface->net_dev->dev_addr)) + is_my_orig = 1; + + if (compare_eth(batman_ogm_packet->prev_sender, + hard_iface->net_dev->dev_addr)) + is_my_oldorig = 1; + + if (is_broadcast_ether_addr(ethhdr->h_source)) + is_broadcast = 1; + } + rcu_read_unlock(); + + if (batman_ogm_packet->header.version != COMPAT_VERSION) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: incompatible batman version (%i)\n", + batman_ogm_packet->header.version); + return; + } + + if (is_my_addr) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: received my own broadcast (sender: %pM)\n", + ethhdr->h_source); + return; + } + + if (is_broadcast) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n", + ethhdr->h_source); + return; + } + + if (is_my_orig) { + unsigned long *word; + int offset; + + orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source); + if (!orig_neigh_node) + return; + + /* neighbor has to indicate direct link and it has to + * come via the corresponding interface */ + /* save packet seqno for bidirectional check */ + if (has_directlink_flag && + compare_eth(if_incoming->net_dev->dev_addr, + batman_ogm_packet->orig)) { + offset = if_incoming->if_num * NUM_WORDS; + + spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); + word = &(orig_neigh_node->bcast_own[offset]); + bit_mark(word, + if_incoming_seqno - + batman_ogm_packet->seqno - 2); + orig_neigh_node->bcast_own_sum[if_incoming->if_num] = + bit_packet_count(word); + spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); + } + + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: originator packet from myself (via neighbor)\n"); + orig_node_free_ref(orig_neigh_node); + return; + } + + if (is_my_oldorig) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n", + ethhdr->h_source); + return; + } + + orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); + if (!orig_node) + return; + + is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet, + if_incoming); + + if (is_duplicate == -1) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: packet within seqno protection time (sender: %pM)\n", + ethhdr->h_source); + goto out; + } + + if (batman_ogm_packet->tq == 0) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: originator packet with tq equal 0\n"); + goto out; + } + + router = orig_node_get_router(orig_node); + if (router) + router_router = orig_node_get_router(router->orig_node); + + /* avoid temporary routing loops */ + if (router && router_router && + (compare_eth(router->addr, batman_ogm_packet->prev_sender)) && + !(compare_eth(batman_ogm_packet->orig, + batman_ogm_packet->prev_sender)) && + (compare_eth(router->addr, router_router->addr))) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n", + ethhdr->h_source); + goto out; + } + + /* if sender is a direct neighbor the sender mac equals + * originator mac */ + orig_neigh_node = (is_single_hop_neigh ? + orig_node : + get_orig_node(bat_priv, ethhdr->h_source)); + if (!orig_neigh_node) + goto out; + + orig_neigh_router = orig_node_get_router(orig_neigh_node); + + /* drop packet if sender is not a direct neighbor and if we + * don't route towards it */ + if (!is_single_hop_neigh && (!orig_neigh_router)) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: OGM via unknown neighbor!\n"); + goto out_neigh; + } + + is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node, + batman_ogm_packet, if_incoming); + + bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet); + + /* update ranking if it is not a duplicate or has the same + * seqno and similar ttl as the non-duplicate */ + if (is_bidirectional && + (!is_duplicate || + ((orig_node->last_real_seqno == batman_ogm_packet->seqno) && + (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl)))) + bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, + batman_ogm_packet, if_incoming, + tt_buff, is_duplicate); + + /* is single hop (direct) neighbor */ + if (is_single_hop_neigh) { + + /* mark direct link on incoming interface */ + bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, + 1, if_incoming); + + bat_dbg(DBG_BATMAN, bat_priv, + "Forwarding packet: rebroadcast neighbor packet with direct link flag\n"); + goto out_neigh; + } + + /* multihop originator */ + if (!is_bidirectional) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: not received via bidirectional link\n"); + goto out_neigh; + } + + if (is_duplicate) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: duplicate packet received\n"); + goto out_neigh; + } + + bat_dbg(DBG_BATMAN, bat_priv, + "Forwarding packet: rebroadcast originator packet\n"); + bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet, + 0, if_incoming); + +out_neigh: + if ((orig_neigh_node) && (!is_single_hop_neigh)) + orig_node_free_ref(orig_neigh_node); +out: + if (router) + neigh_node_free_ref(router); + if (router_router) + neigh_node_free_ref(router_router); + if (orig_neigh_router) + neigh_node_free_ref(orig_neigh_router); + + orig_node_free_ref(orig_node); +} + +static void bat_iv_ogm_receive(struct hard_iface *if_incoming, + struct sk_buff *skb) +{ + struct batman_ogm_packet *batman_ogm_packet; + struct ethhdr *ethhdr; + int buff_pos = 0, packet_len; + unsigned char *tt_buff, *packet_buff; + + packet_len = skb_headlen(skb); + ethhdr = (struct ethhdr *)skb_mac_header(skb); + packet_buff = skb->data; + batman_ogm_packet = (struct batman_ogm_packet *)packet_buff; + + /* unpack the aggregated packets and process them one by one */ + do { + /* network to host order for our 32bit seqno and the + orig_interval */ + batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno); + batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc); + + tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN; + + bat_iv_ogm_process(ethhdr, batman_ogm_packet, + tt_buff, if_incoming); + + buff_pos += BATMAN_OGM_LEN + + tt_len(batman_ogm_packet->tt_num_changes); + + batman_ogm_packet = (struct batman_ogm_packet *) + (packet_buff + buff_pos); + } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len, + batman_ogm_packet->tt_num_changes)); +} + +static struct bat_algo_ops batman_iv __read_mostly = { + .name = "BATMAN IV", + .bat_ogm_init = bat_iv_ogm_init, + .bat_ogm_init_primary = bat_iv_ogm_init_primary, + .bat_ogm_update_mac = bat_iv_ogm_update_mac, + .bat_ogm_schedule = bat_iv_ogm_schedule, + .bat_ogm_emit = bat_iv_ogm_emit, + .bat_ogm_receive = bat_iv_ogm_receive, +}; + +int __init bat_iv_init(void) +{ + return bat_algo_register(&batman_iv); +} diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c new file mode 100644 index 00000000..68ff759f --- /dev/null +++ b/net/batman-adv/bat_sysfs.c @@ -0,0 +1,685 @@ +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "bat_sysfs.h" +#include "translation-table.h" +#include "originator.h" +#include "hard-interface.h" +#include "gateway_common.h" +#include "gateway_client.h" +#include "vis.h" + +static struct net_device *kobj_to_netdev(struct kobject *obj) +{ + struct device *dev = container_of(obj->parent, struct device, kobj); + return to_net_dev(dev); +} + +static struct bat_priv *kobj_to_batpriv(struct kobject *obj) +{ + struct net_device *net_dev = kobj_to_netdev(obj); + return netdev_priv(net_dev); +} + +#define UEV_TYPE_VAR "BATTYPE=" +#define UEV_ACTION_VAR "BATACTION=" +#define UEV_DATA_VAR "BATDATA=" + +static char *uev_action_str[] = { + "add", + "del", + "change" +}; + +static char *uev_type_str[] = { + "gw" +}; + +/* Use this, if you have customized show and store functions */ +#define BAT_ATTR(_name, _mode, _show, _store) \ +struct bat_attribute bat_attr_##_name = { \ + .attr = {.name = __stringify(_name), \ + .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +}; + +#define BAT_ATTR_STORE_BOOL(_name, _post_func) \ +ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ + char *buff, size_t count) \ +{ \ + struct net_device *net_dev = kobj_to_netdev(kobj); \ + struct bat_priv *bat_priv = netdev_priv(net_dev); \ + return __store_bool_attr(buff, count, _post_func, attr, \ + &bat_priv->_name, net_dev); \ +} + +#define BAT_ATTR_SHOW_BOOL(_name) \ +ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ + char *buff) \ +{ \ + struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \ + return sprintf(buff, "%s\n", \ + atomic_read(&bat_priv->_name) == 0 ? \ + "disabled" : "enabled"); \ +} \ + +/* Use this, if you are going to turn a [name] in bat_priv on or off */ +#define BAT_ATTR_BOOL(_name, _mode, _post_func) \ + static BAT_ATTR_STORE_BOOL(_name, _post_func) \ + static BAT_ATTR_SHOW_BOOL(_name) \ + static BAT_ATTR(_name, _mode, show_##_name, store_##_name) + + +#define BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \ +ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ + char *buff, size_t count) \ +{ \ + struct net_device *net_dev = kobj_to_netdev(kobj); \ + struct bat_priv *bat_priv = netdev_priv(net_dev); \ + return __store_uint_attr(buff, count, _min, _max, _post_func, \ + attr, &bat_priv->_name, net_dev); \ +} + +#define BAT_ATTR_SHOW_UINT(_name) \ +ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \ + char *buff) \ +{ \ + struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \ + return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \ +} \ + +/* Use this, if you are going to set [name] in bat_priv to unsigned integer + * values only */ +#define BAT_ATTR_UINT(_name, _mode, _min, _max, _post_func) \ + static BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func) \ + static BAT_ATTR_SHOW_UINT(_name) \ + static BAT_ATTR(_name, _mode, show_##_name, store_##_name) + + +static int store_bool_attr(char *buff, size_t count, + struct net_device *net_dev, + const char *attr_name, atomic_t *attr) +{ + int enabled = -1; + + if (buff[count - 1] == '\n') + buff[count - 1] = '\0'; + + if ((strncmp(buff, "1", 2) == 0) || + (strncmp(buff, "enable", 7) == 0) || + (strncmp(buff, "enabled", 8) == 0)) + enabled = 1; + + if ((strncmp(buff, "0", 2) == 0) || + (strncmp(buff, "disable", 8) == 0) || + (strncmp(buff, "disabled", 9) == 0)) + enabled = 0; + + if (enabled < 0) { + bat_info(net_dev, + "%s: Invalid parameter received: %s\n", + attr_name, buff); + return -EINVAL; + } + + if (atomic_read(attr) == enabled) + return count; + + bat_info(net_dev, "%s: Changing from: %s to: %s\n", attr_name, + atomic_read(attr) == 1 ? "enabled" : "disabled", + enabled == 1 ? "enabled" : "disabled"); + + atomic_set(attr, (unsigned)enabled); + return count; +} + +static inline ssize_t __store_bool_attr(char *buff, size_t count, + void (*post_func)(struct net_device *), + struct attribute *attr, + atomic_t *attr_store, struct net_device *net_dev) +{ + int ret; + + ret = store_bool_attr(buff, count, net_dev, attr->name, attr_store); + if (post_func && ret) + post_func(net_dev); + + return ret; +} + +static int store_uint_attr(const char *buff, size_t count, + struct net_device *net_dev, const char *attr_name, + unsigned int min, unsigned int max, atomic_t *attr) +{ + unsigned long uint_val; + int ret; + + ret = kstrtoul(buff, 10, &uint_val); + if (ret) { + bat_info(net_dev, + "%s: Invalid parameter received: %s\n", + attr_name, buff); + return -EINVAL; + } + + if (uint_val < min) { + bat_info(net_dev, "%s: Value is too small: %lu min: %u\n", + attr_name, uint_val, min); + return -EINVAL; + } + + if (uint_val > max) { + bat_info(net_dev, "%s: Value is too big: %lu max: %u\n", + attr_name, uint_val, max); + return -EINVAL; + } + + if (atomic_read(attr) == uint_val) + return count; + + bat_info(net_dev, "%s: Changing from: %i to: %lu\n", + attr_name, atomic_read(attr), uint_val); + + atomic_set(attr, uint_val); + return count; +} + +static inline ssize_t __store_uint_attr(const char *buff, size_t count, + int min, int max, + void (*post_func)(struct net_device *), + const struct attribute *attr, + atomic_t *attr_store, struct net_device *net_dev) +{ + int ret; + + ret = store_uint_attr(buff, count, net_dev, attr->name, + min, max, attr_store); + if (post_func && ret) + post_func(net_dev); + + return ret; +} + +static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr, + char *buff) +{ + struct bat_priv *bat_priv = kobj_to_batpriv(kobj); + int vis_mode = atomic_read(&bat_priv->vis_mode); + + return sprintf(buff, "%s\n", + vis_mode == VIS_TYPE_CLIENT_UPDATE ? + "client" : "server"); +} + +static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr, + char *buff, size_t count) +{ + struct net_device *net_dev = kobj_to_netdev(kobj); + struct bat_priv *bat_priv = netdev_priv(net_dev); + unsigned long val; + int ret, vis_mode_tmp = -1; + + ret = kstrtoul(buff, 10, &val); + + if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) || + (strncmp(buff, "client", 6) == 0) || + (strncmp(buff, "off", 3) == 0)) + vis_mode_tmp = VIS_TYPE_CLIENT_UPDATE; + + if (((count == 2) && (!ret) && (val == VIS_TYPE_SERVER_SYNC)) || + (strncmp(buff, "server", 6) == 0)) + vis_mode_tmp = VIS_TYPE_SERVER_SYNC; + + if (vis_mode_tmp < 0) { + if (buff[count - 1] == '\n') + buff[count - 1] = '\0'; + + bat_info(net_dev, + "Invalid parameter for 'vis mode' setting received: %s\n", + buff); + return -EINVAL; + } + + if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp) + return count; + + bat_info(net_dev, "Changing vis mode from: %s to: %s\n", + atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ? + "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ? + "client" : "server"); + + atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp); + return count; +} + +static ssize_t show_bat_algo(struct kobject *kobj, struct attribute *attr, + char *buff) +{ + struct bat_priv *bat_priv = kobj_to_batpriv(kobj); + return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name); +} + +static void post_gw_deselect(struct net_device *net_dev) +{ + struct bat_priv *bat_priv = netdev_priv(net_dev); + gw_deselect(bat_priv); +} + +static ssize_t show_gw_mode(struct kobject *kobj, struct attribute *attr, + char *buff) +{ + struct bat_priv *bat_priv = kobj_to_batpriv(kobj); + int bytes_written; + + switch (atomic_read(&bat_priv->gw_mode)) { + case GW_MODE_CLIENT: + bytes_written = sprintf(buff, "%s\n", GW_MODE_CLIENT_NAME); + break; + case GW_MODE_SERVER: + bytes_written = sprintf(buff, "%s\n", GW_MODE_SERVER_NAME); + break; + default: + bytes_written = sprintf(buff, "%s\n", GW_MODE_OFF_NAME); + break; + } + + return bytes_written; +} + +static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr, + char *buff, size_t count) +{ + struct net_device *net_dev = kobj_to_netdev(kobj); + struct bat_priv *bat_priv = netdev_priv(net_dev); + char *curr_gw_mode_str; + int gw_mode_tmp = -1; + + if (buff[count - 1] == '\n') + buff[count - 1] = '\0'; + + if (strncmp(buff, GW_MODE_OFF_NAME, strlen(GW_MODE_OFF_NAME)) == 0) + gw_mode_tmp = GW_MODE_OFF; + + if (strncmp(buff, GW_MODE_CLIENT_NAME, + strlen(GW_MODE_CLIENT_NAME)) == 0) + gw_mode_tmp = GW_MODE_CLIENT; + + if (strncmp(buff, GW_MODE_SERVER_NAME, + strlen(GW_MODE_SERVER_NAME)) == 0) + gw_mode_tmp = GW_MODE_SERVER; + + if (gw_mode_tmp < 0) { + bat_info(net_dev, + "Invalid parameter for 'gw mode' setting received: %s\n", + buff); + return -EINVAL; + } + + if (atomic_read(&bat_priv->gw_mode) == gw_mode_tmp) + return count; + + switch (atomic_read(&bat_priv->gw_mode)) { + case GW_MODE_CLIENT: + curr_gw_mode_str = GW_MODE_CLIENT_NAME; + break; + case GW_MODE_SERVER: + curr_gw_mode_str = GW_MODE_SERVER_NAME; + break; + default: + curr_gw_mode_str = GW_MODE_OFF_NAME; + break; + } + + bat_info(net_dev, "Changing gw mode from: %s to: %s\n", + curr_gw_mode_str, buff); + + gw_deselect(bat_priv); + atomic_set(&bat_priv->gw_mode, (unsigned)gw_mode_tmp); + return count; +} + +static ssize_t show_gw_bwidth(struct kobject *kobj, struct attribute *attr, + char *buff) +{ + struct bat_priv *bat_priv = kobj_to_batpriv(kobj); + int down, up; + int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth); + + gw_bandwidth_to_kbit(gw_bandwidth, &down, &up); + return sprintf(buff, "%i%s/%i%s\n", + (down > 2048 ? down / 1024 : down), + (down > 2048 ? "MBit" : "KBit"), + (up > 2048 ? up / 1024 : up), + (up > 2048 ? "MBit" : "KBit")); +} + +static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr, + char *buff, size_t count) +{ + struct net_device *net_dev = kobj_to_netdev(kobj); + + if (buff[count - 1] == '\n') + buff[count - 1] = '\0'; + + return gw_bandwidth_set(net_dev, buff, count); +} + +BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); +BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); +BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); +static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); +static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL); +static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode); +BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL); +BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL); +BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, + post_gw_deselect); +static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, + store_gw_bwidth); +#ifdef CONFIG_BATMAN_ADV_DEBUG +BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL); +#endif + +static struct bat_attribute *mesh_attrs[] = { + &bat_attr_aggregated_ogms, + &bat_attr_bonding, + &bat_attr_fragmentation, + &bat_attr_ap_isolation, + &bat_attr_vis_mode, + &bat_attr_routing_algo, + &bat_attr_gw_mode, + &bat_attr_orig_interval, + &bat_attr_hop_penalty, + &bat_attr_gw_sel_class, + &bat_attr_gw_bandwidth, +#ifdef CONFIG_BATMAN_ADV_DEBUG + &bat_attr_log_level, +#endif + NULL, +}; + +int sysfs_add_meshif(struct net_device *dev) +{ + struct kobject *batif_kobject = &dev->dev.kobj; + struct bat_priv *bat_priv = netdev_priv(dev); + struct bat_attribute **bat_attr; + int err; + + bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR, + batif_kobject); + if (!bat_priv->mesh_obj) { + bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name, + SYSFS_IF_MESH_SUBDIR); + goto out; + } + + for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) { + err = sysfs_create_file(bat_priv->mesh_obj, + &((*bat_attr)->attr)); + if (err) { + bat_err(dev, "Can't add sysfs file: %s/%s/%s\n", + dev->name, SYSFS_IF_MESH_SUBDIR, + ((*bat_attr)->attr).name); + goto rem_attr; + } + } + + return 0; + +rem_attr: + for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) + sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr)); + + kobject_put(bat_priv->mesh_obj); + bat_priv->mesh_obj = NULL; +out: + return -ENOMEM; +} + +void sysfs_del_meshif(struct net_device *dev) +{ + struct bat_priv *bat_priv = netdev_priv(dev); + struct bat_attribute **bat_attr; + + for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) + sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr)); + + kobject_put(bat_priv->mesh_obj); + bat_priv->mesh_obj = NULL; +} + +static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr, + char *buff) +{ + struct net_device *net_dev = kobj_to_netdev(kobj); + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); + ssize_t length; + + if (!hard_iface) + return 0; + + length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ? + "none" : hard_iface->soft_iface->name); + + hardif_free_ref(hard_iface); + + return length; +} + +static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr, + char *buff, size_t count) +{ + struct net_device *net_dev = kobj_to_netdev(kobj); + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); + int status_tmp = -1; + int ret = count; + + if (!hard_iface) + return count; + + if (buff[count - 1] == '\n') + buff[count - 1] = '\0'; + + if (strlen(buff) >= IFNAMSIZ) { + pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n", + buff); + hardif_free_ref(hard_iface); + return -EINVAL; + } + + if (strncmp(buff, "none", 4) == 0) + status_tmp = IF_NOT_IN_USE; + else + status_tmp = IF_I_WANT_YOU; + + if (hard_iface->if_status == status_tmp) + goto out; + + if ((hard_iface->soft_iface) && + (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) + goto out; + + if (!rtnl_trylock()) { + ret = -ERESTARTSYS; + goto out; + } + + if (status_tmp == IF_NOT_IN_USE) { + hardif_disable_interface(hard_iface); + goto unlock; + } + + /* if the interface already is in use */ + if (hard_iface->if_status != IF_NOT_IN_USE) + hardif_disable_interface(hard_iface); + + ret = hardif_enable_interface(hard_iface, buff); + +unlock: + rtnl_unlock(); +out: + hardif_free_ref(hard_iface); + return ret; +} + +static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr, + char *buff) +{ + struct net_device *net_dev = kobj_to_netdev(kobj); + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); + ssize_t length; + + if (!hard_iface) + return 0; + + switch (hard_iface->if_status) { + case IF_TO_BE_REMOVED: + length = sprintf(buff, "disabling\n"); + break; + case IF_INACTIVE: + length = sprintf(buff, "inactive\n"); + break; + case IF_ACTIVE: + length = sprintf(buff, "active\n"); + break; + case IF_TO_BE_ACTIVATED: + length = sprintf(buff, "enabling\n"); + break; + case IF_NOT_IN_USE: + default: + length = sprintf(buff, "not in use\n"); + break; + } + + hardif_free_ref(hard_iface); + + return length; +} + +static BAT_ATTR(mesh_iface, S_IRUGO | S_IWUSR, + show_mesh_iface, store_mesh_iface); +static BAT_ATTR(iface_status, S_IRUGO, show_iface_status, NULL); + +static struct bat_attribute *batman_attrs[] = { + &bat_attr_mesh_iface, + &bat_attr_iface_status, + NULL, +}; + +int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev) +{ + struct kobject *hardif_kobject = &dev->dev.kobj; + struct bat_attribute **bat_attr; + int err; + + *hardif_obj = kobject_create_and_add(SYSFS_IF_BAT_SUBDIR, + hardif_kobject); + + if (!*hardif_obj) { + bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name, + SYSFS_IF_BAT_SUBDIR); + goto out; + } + + for (bat_attr = batman_attrs; *bat_attr; ++bat_attr) { + err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr)); + if (err) { + bat_err(dev, "Can't add sysfs file: %s/%s/%s\n", + dev->name, SYSFS_IF_BAT_SUBDIR, + ((*bat_attr)->attr).name); + goto rem_attr; + } + } + + return 0; + +rem_attr: + for (bat_attr = batman_attrs; *bat_attr; ++bat_attr) + sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr)); +out: + return -ENOMEM; +} + +void sysfs_del_hardif(struct kobject **hardif_obj) +{ + kobject_put(*hardif_obj); + *hardif_obj = NULL; +} + +int throw_uevent(struct bat_priv *bat_priv, enum uev_type type, + enum uev_action action, const char *data) +{ + int ret = -1; + struct hard_iface *primary_if = NULL; + struct kobject *bat_kobj; + char *uevent_env[4] = { NULL, NULL, NULL, NULL }; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + bat_kobj = &primary_if->soft_iface->dev.kobj; + + uevent_env[0] = kmalloc(strlen(UEV_TYPE_VAR) + + strlen(uev_type_str[type]) + 1, + GFP_ATOMIC); + if (!uevent_env[0]) + goto out; + + sprintf(uevent_env[0], "%s%s", UEV_TYPE_VAR, uev_type_str[type]); + + uevent_env[1] = kmalloc(strlen(UEV_ACTION_VAR) + + strlen(uev_action_str[action]) + 1, + GFP_ATOMIC); + if (!uevent_env[1]) + goto out; + + sprintf(uevent_env[1], "%s%s", UEV_ACTION_VAR, uev_action_str[action]); + + /* If the event is DEL, ignore the data field */ + if (action != UEV_DEL) { + uevent_env[2] = kmalloc(strlen(UEV_DATA_VAR) + + strlen(data) + 1, GFP_ATOMIC); + if (!uevent_env[2]) + goto out; + + sprintf(uevent_env[2], "%s%s", UEV_DATA_VAR, data); + } + + ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env); +out: + kfree(uevent_env[0]); + kfree(uevent_env[1]); + kfree(uevent_env[2]); + + if (primary_if) + hardif_free_ref(primary_if); + + if (ret) + bat_dbg(DBG_BATMAN, bat_priv, + "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n", + uev_type_str[type], uev_action_str[action], + (action == UEV_DEL ? "NULL" : data), ret); + return ret; +} diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h new file mode 100644 index 00000000..fece77ae --- /dev/null +++ b/net/batman-adv/bat_sysfs.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + + +#ifndef _NET_BATMAN_ADV_SYSFS_H_ +#define _NET_BATMAN_ADV_SYSFS_H_ + +#define SYSFS_IF_MESH_SUBDIR "mesh" +#define SYSFS_IF_BAT_SUBDIR "batman_adv" + +struct bat_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct attribute *attr, + char *buf); + ssize_t (*store)(struct kobject *kobj, struct attribute *attr, + char *buf, size_t count); +}; + +int sysfs_add_meshif(struct net_device *dev); +void sysfs_del_meshif(struct net_device *dev); +int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev); +void sysfs_del_hardif(struct kobject **hardif_obj); +int throw_uevent(struct bat_priv *bat_priv, enum uev_type type, + enum uev_action action, const char *data); + +#endif /* _NET_BATMAN_ADV_SYSFS_H_ */ diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c new file mode 100644 index 00000000..6d0aa216 --- /dev/null +++ b/net/batman-adv/bitarray.c @@ -0,0 +1,201 @@ +/* + * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich, Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "bitarray.h" + +#include <linux/bitops.h> + +/* returns true if the corresponding bit in the given seq_bits indicates true + * and curr_seqno is within range of last_seqno */ +int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, + uint32_t curr_seqno) +{ + int32_t diff, word_offset, word_num; + + diff = last_seqno - curr_seqno; + if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) { + return 0; + } else { + /* which word */ + word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE; + /* which position in the selected word */ + word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE; + + if (test_bit(word_offset, &seq_bits[word_num])) + return 1; + else + return 0; + } +} + +/* turn corresponding bit on, so we can remember that we got the packet */ +void bit_mark(unsigned long *seq_bits, int32_t n) +{ + int32_t word_offset, word_num; + + /* if too old, just drop it */ + if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE) + return; + + /* which word */ + word_num = n / WORD_BIT_SIZE; + /* which position in the selected word */ + word_offset = n % WORD_BIT_SIZE; + + set_bit(word_offset, &seq_bits[word_num]); /* turn the position on */ +} + +/* shift the packet array by n places. */ +static void bit_shift(unsigned long *seq_bits, int32_t n) +{ + int32_t word_offset, word_num; + int32_t i; + + if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE) + return; + + word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */ + word_num = n / WORD_BIT_SIZE; /* shift over how much (full) words */ + + for (i = NUM_WORDS - 1; i > word_num; i--) { + /* going from old to new, so we don't overwrite the data we copy + * from. + * + * left is high, right is low: FEDC BA98 7654 3210 + * ^^ ^^ + * vvvv + * ^^^^ = from, vvvvv =to, we'd have word_num==1 and + * word_offset==WORD_BIT_SIZE/2 ????? in this example. + * (=24 bits) + * + * our desired output would be: 9876 5432 1000 0000 + * */ + + seq_bits[i] = + (seq_bits[i - word_num] << word_offset) + + /* take the lower port from the left half, shift it left + * to its final position */ + (seq_bits[i - word_num - 1] >> + (WORD_BIT_SIZE-word_offset)); + /* and the upper part of the right half and shift it left to + * its position */ + /* for our example that would be: word[0] = 9800 + 0076 = + * 9876 */ + } + /* now for our last word, i==word_num, we only have its "left" half. + * that's the 1000 word in our example.*/ + + seq_bits[i] = (seq_bits[i - word_num] << word_offset); + + /* pad the rest with 0, if there is anything */ + i--; + + for (; i >= 0; i--) + seq_bits[i] = 0; +} + +static void bit_reset_window(unsigned long *seq_bits) +{ + int i; + for (i = 0; i < NUM_WORDS; i++) + seq_bits[i] = 0; +} + + +/* receive and process one packet within the sequence number window. + * + * returns: + * 1 if the window was moved (either new or very old) + * 0 if the window was not moved/shifted. + */ +int bit_get_packet(void *priv, unsigned long *seq_bits, + int32_t seq_num_diff, int set_mark) +{ + struct bat_priv *bat_priv = priv; + + /* sequence number is slightly older. We already got a sequence number + * higher than this one, so we just mark it. */ + + if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) { + if (set_mark) + bit_mark(seq_bits, -seq_num_diff); + return 0; + } + + /* sequence number is slightly newer, so we shift the window and + * set the mark if required */ + + if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) { + bit_shift(seq_bits, seq_num_diff); + + if (set_mark) + bit_mark(seq_bits, 0); + return 1; + } + + /* sequence number is much newer, probably missed a lot of packets */ + + if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) && + (seq_num_diff < EXPECTED_SEQNO_RANGE)) { + bat_dbg(DBG_BATMAN, bat_priv, + "We missed a lot of packets (%i) !\n", + seq_num_diff - 1); + bit_reset_window(seq_bits); + if (set_mark) + bit_mark(seq_bits, 0); + return 1; + } + + /* received a much older packet. The other host either restarted + * or the old packet got delayed somewhere in the network. The + * packet should be dropped without calling this function if the + * seqno window is protected. */ + + if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || + (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { + + bat_dbg(DBG_BATMAN, bat_priv, + "Other host probably restarted!\n"); + + bit_reset_window(seq_bits); + if (set_mark) + bit_mark(seq_bits, 0); + + return 1; + } + + /* never reached */ + return 0; +} + +/* count the hamming weight, how many good packets did we receive? just count + * the 1's. + */ +int bit_packet_count(const unsigned long *seq_bits) +{ + int i, hamming = 0; + + for (i = 0; i < NUM_WORDS; i++) + hamming += hweight_long(seq_bits[i]); + + return hamming; +} diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h new file mode 100644 index 00000000..c6135728 --- /dev/null +++ b/net/batman-adv/bitarray.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich, Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_BITARRAY_H_ +#define _NET_BATMAN_ADV_BITARRAY_H_ + +#define WORD_BIT_SIZE (sizeof(unsigned long) * 8) + +/* returns true if the corresponding bit in the given seq_bits indicates true + * and curr_seqno is within range of last_seqno */ +int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, + uint32_t curr_seqno); + +/* turn corresponding bit on, so we can remember that we got the packet */ +void bit_mark(unsigned long *seq_bits, int32_t n); + + +/* receive and process one packet, returns 1 if received seq_num is considered + * new, 0 if old */ +int bit_get_packet(void *priv, unsigned long *seq_bits, + int32_t seq_num_diff, int set_mark); + +/* count the hamming weight, how many good packets did we receive? */ +int bit_packet_count(const unsigned long *seq_bits); + +#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c new file mode 100644 index 00000000..6f9b9b78 --- /dev/null +++ b/net/batman-adv/gateway_client.c @@ -0,0 +1,709 @@ +/* + * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "bat_sysfs.h" +#include "gateway_client.h" +#include "gateway_common.h" +#include "hard-interface.h" +#include "originator.h" +#include "translation-table.h" +#include "routing.h" +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/udp.h> +#include <linux/if_vlan.h> + +/* This is the offset of the options field in a dhcp packet starting at + * the beginning of the dhcp header */ +#define DHCP_OPTIONS_OFFSET 240 +#define DHCP_REQUEST 3 + +static void gw_node_free_ref(struct gw_node *gw_node) +{ + if (atomic_dec_and_test(&gw_node->refcount)) + kfree_rcu(gw_node, rcu); +} + +static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv) +{ + struct gw_node *gw_node; + + rcu_read_lock(); + gw_node = rcu_dereference(bat_priv->curr_gw); + if (!gw_node) + goto out; + + if (!atomic_inc_not_zero(&gw_node->refcount)) + gw_node = NULL; + +out: + rcu_read_unlock(); + return gw_node; +} + +struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv) +{ + struct gw_node *gw_node; + struct orig_node *orig_node = NULL; + + gw_node = gw_get_selected_gw_node(bat_priv); + if (!gw_node) + goto out; + + rcu_read_lock(); + orig_node = gw_node->orig_node; + if (!orig_node) + goto unlock; + + if (!atomic_inc_not_zero(&orig_node->refcount)) + orig_node = NULL; + +unlock: + rcu_read_unlock(); +out: + if (gw_node) + gw_node_free_ref(gw_node); + return orig_node; +} + +static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) +{ + struct gw_node *curr_gw_node; + + spin_lock_bh(&bat_priv->gw_list_lock); + + if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) + new_gw_node = NULL; + + curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1); + rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); + + if (curr_gw_node) + gw_node_free_ref(curr_gw_node); + + spin_unlock_bh(&bat_priv->gw_list_lock); +} + +void gw_deselect(struct bat_priv *bat_priv) +{ + atomic_set(&bat_priv->gw_reselect, 1); +} + +static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv) +{ + struct neigh_node *router; + struct hlist_node *node; + struct gw_node *gw_node, *curr_gw = NULL; + uint32_t max_gw_factor = 0, tmp_gw_factor = 0; + uint8_t max_tq = 0; + int down, up; + + rcu_read_lock(); + hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { + if (gw_node->deleted) + continue; + + router = orig_node_get_router(gw_node->orig_node); + if (!router) + continue; + + if (!atomic_inc_not_zero(&gw_node->refcount)) + goto next; + + switch (atomic_read(&bat_priv->gw_sel_class)) { + case 1: /* fast connection */ + gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, + &down, &up); + + tmp_gw_factor = (router->tq_avg * router->tq_avg * + down * 100 * 100) / + (TQ_LOCAL_WINDOW_SIZE * + TQ_LOCAL_WINDOW_SIZE * 64); + + if ((tmp_gw_factor > max_gw_factor) || + ((tmp_gw_factor == max_gw_factor) && + (router->tq_avg > max_tq))) { + if (curr_gw) + gw_node_free_ref(curr_gw); + curr_gw = gw_node; + atomic_inc(&curr_gw->refcount); + } + break; + + default: /** + * 2: stable connection (use best statistic) + * 3: fast-switch (use best statistic but change as + * soon as a better gateway appears) + * XX: late-switch (use best statistic but change as + * soon as a better gateway appears which has + * $routing_class more tq points) + **/ + if (router->tq_avg > max_tq) { + if (curr_gw) + gw_node_free_ref(curr_gw); + curr_gw = gw_node; + atomic_inc(&curr_gw->refcount); + } + break; + } + + if (router->tq_avg > max_tq) + max_tq = router->tq_avg; + + if (tmp_gw_factor > max_gw_factor) + max_gw_factor = tmp_gw_factor; + + gw_node_free_ref(gw_node); + +next: + neigh_node_free_ref(router); + } + rcu_read_unlock(); + + return curr_gw; +} + +void gw_election(struct bat_priv *bat_priv) +{ + struct gw_node *curr_gw = NULL, *next_gw = NULL; + struct neigh_node *router = NULL; + char gw_addr[18] = { '\0' }; + + /** + * The batman daemon checks here if we already passed a full originator + * cycle in order to make sure we don't choose the first gateway we + * hear about. This check is based on the daemon's uptime which we + * don't have. + **/ + if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) + goto out; + + if (!atomic_dec_not_zero(&bat_priv->gw_reselect)) + goto out; + + curr_gw = gw_get_selected_gw_node(bat_priv); + + next_gw = gw_get_best_gw_node(bat_priv); + + if (curr_gw == next_gw) + goto out; + + if (next_gw) { + sprintf(gw_addr, "%pM", next_gw->orig_node->orig); + + router = orig_node_get_router(next_gw->orig_node); + if (!router) { + gw_deselect(bat_priv); + goto out; + } + } + + if ((curr_gw) && (!next_gw)) { + bat_dbg(DBG_BATMAN, bat_priv, + "Removing selected gateway - no gateway in range\n"); + throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL); + } else if ((!curr_gw) && (next_gw)) { + bat_dbg(DBG_BATMAN, bat_priv, + "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n", + next_gw->orig_node->orig, next_gw->orig_node->gw_flags, + router->tq_avg); + throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr); + } else { + bat_dbg(DBG_BATMAN, bat_priv, + "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n", + next_gw->orig_node->orig, next_gw->orig_node->gw_flags, + router->tq_avg); + throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr); + } + + gw_select(bat_priv, next_gw); + +out: + if (curr_gw) + gw_node_free_ref(curr_gw); + if (next_gw) + gw_node_free_ref(next_gw); + if (router) + neigh_node_free_ref(router); +} + +void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) +{ + struct orig_node *curr_gw_orig; + struct neigh_node *router_gw = NULL, *router_orig = NULL; + uint8_t gw_tq_avg, orig_tq_avg; + + curr_gw_orig = gw_get_selected_orig(bat_priv); + if (!curr_gw_orig) + goto deselect; + + router_gw = orig_node_get_router(curr_gw_orig); + if (!router_gw) + goto deselect; + + /* this node already is the gateway */ + if (curr_gw_orig == orig_node) + goto out; + + router_orig = orig_node_get_router(orig_node); + if (!router_orig) + goto out; + + gw_tq_avg = router_gw->tq_avg; + orig_tq_avg = router_orig->tq_avg; + + /* the TQ value has to be better */ + if (orig_tq_avg < gw_tq_avg) + goto out; + + /** + * if the routing class is greater than 3 the value tells us how much + * greater the TQ value of the new gateway must be + **/ + if ((atomic_read(&bat_priv->gw_sel_class) > 3) && + (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) + goto out; + + bat_dbg(DBG_BATMAN, bat_priv, + "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", + gw_tq_avg, orig_tq_avg); + +deselect: + gw_deselect(bat_priv); +out: + if (curr_gw_orig) + orig_node_free_ref(curr_gw_orig); + if (router_gw) + neigh_node_free_ref(router_gw); + if (router_orig) + neigh_node_free_ref(router_orig); + + return; +} + +static void gw_node_add(struct bat_priv *bat_priv, + struct orig_node *orig_node, uint8_t new_gwflags) +{ + struct gw_node *gw_node; + int down, up; + + gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); + if (!gw_node) + return; + + INIT_HLIST_NODE(&gw_node->list); + gw_node->orig_node = orig_node; + atomic_set(&gw_node->refcount, 1); + + spin_lock_bh(&bat_priv->gw_list_lock); + hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); + spin_unlock_bh(&bat_priv->gw_list_lock); + + gw_bandwidth_to_kbit(new_gwflags, &down, &up); + bat_dbg(DBG_BATMAN, bat_priv, + "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n", + orig_node->orig, new_gwflags, + (down > 2048 ? down / 1024 : down), + (down > 2048 ? "MBit" : "KBit"), + (up > 2048 ? up / 1024 : up), + (up > 2048 ? "MBit" : "KBit")); +} + +void gw_node_update(struct bat_priv *bat_priv, + struct orig_node *orig_node, uint8_t new_gwflags) +{ + struct hlist_node *node; + struct gw_node *gw_node, *curr_gw; + + /** + * Note: We don't need a NULL check here, since curr_gw never gets + * dereferenced. If curr_gw is NULL we also should not exit as we may + * have this gateway in our list (duplication check!) even though we + * have no currently selected gateway. + */ + curr_gw = gw_get_selected_gw_node(bat_priv); + + rcu_read_lock(); + hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { + if (gw_node->orig_node != orig_node) + continue; + + bat_dbg(DBG_BATMAN, bat_priv, + "Gateway class of originator %pM changed from %i to %i\n", + orig_node->orig, gw_node->orig_node->gw_flags, + new_gwflags); + + gw_node->deleted = 0; + + if (new_gwflags == NO_FLAGS) { + gw_node->deleted = jiffies; + bat_dbg(DBG_BATMAN, bat_priv, + "Gateway %pM removed from gateway list\n", + orig_node->orig); + + if (gw_node == curr_gw) + goto deselect; + } + + goto unlock; + } + + if (new_gwflags == NO_FLAGS) + goto unlock; + + gw_node_add(bat_priv, orig_node, new_gwflags); + goto unlock; + +deselect: + gw_deselect(bat_priv); +unlock: + rcu_read_unlock(); + + if (curr_gw) + gw_node_free_ref(curr_gw); +} + +void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node) +{ + gw_node_update(bat_priv, orig_node, 0); +} + +void gw_node_purge(struct bat_priv *bat_priv) +{ + struct gw_node *gw_node, *curr_gw; + struct hlist_node *node, *node_tmp; + unsigned long timeout = msecs_to_jiffies(2 * PURGE_TIMEOUT); + int do_deselect = 0; + + curr_gw = gw_get_selected_gw_node(bat_priv); + + spin_lock_bh(&bat_priv->gw_list_lock); + + hlist_for_each_entry_safe(gw_node, node, node_tmp, + &bat_priv->gw_list, list) { + if (((!gw_node->deleted) || + (time_before(jiffies, gw_node->deleted + timeout))) && + atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) + continue; + + if (curr_gw == gw_node) + do_deselect = 1; + + hlist_del_rcu(&gw_node->list); + gw_node_free_ref(gw_node); + } + + spin_unlock_bh(&bat_priv->gw_list_lock); + + /* gw_deselect() needs to acquire the gw_list_lock */ + if (do_deselect) + gw_deselect(bat_priv); + + if (curr_gw) + gw_node_free_ref(curr_gw); +} + +/** + * fails if orig_node has no router + */ +static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq, + const struct gw_node *gw_node) +{ + struct gw_node *curr_gw; + struct neigh_node *router; + int down, up, ret = -1; + + gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); + + router = orig_node_get_router(gw_node->orig_node); + if (!router) + goto out; + + curr_gw = gw_get_selected_gw_node(bat_priv); + + ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", + (curr_gw == gw_node ? "=>" : " "), + gw_node->orig_node->orig, + router->tq_avg, router->addr, + router->if_incoming->net_dev->name, + gw_node->orig_node->gw_flags, + (down > 2048 ? down / 1024 : down), + (down > 2048 ? "MBit" : "KBit"), + (up > 2048 ? up / 1024 : up), + (up > 2048 ? "MBit" : "KBit")); + + neigh_node_free_ref(router); + if (curr_gw) + gw_node_free_ref(curr_gw); +out: + return ret; +} + +int gw_client_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct hard_iface *primary_if; + struct gw_node *gw_node; + struct hlist_node *node; + int gw_count = 0, ret = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - please specify interfaces to enable it\n", + net_dev->name); + goto out; + } + + if (primary_if->if_status != IF_ACTIVE) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - primary interface not active\n", + net_dev->name); + goto out; + } + + seq_printf(seq, + " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", + "Gateway", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF", + SOURCE_VERSION, primary_if->net_dev->name, + primary_if->net_dev->dev_addr, net_dev->name); + + rcu_read_lock(); + hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { + if (gw_node->deleted) + continue; + + /* fails if orig_node has no router */ + if (_write_buffer_text(bat_priv, seq, gw_node) < 0) + continue; + + gw_count++; + } + rcu_read_unlock(); + + if (gw_count == 0) + seq_printf(seq, "No gateways in range ...\n"); + +out: + if (primary_if) + hardif_free_ref(primary_if); + return ret; +} + +static bool is_type_dhcprequest(struct sk_buff *skb, int header_len) +{ + int ret = false; + unsigned char *p; + int pkt_len; + + if (skb_linearize(skb) < 0) + goto out; + + pkt_len = skb_headlen(skb); + + if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1) + goto out; + + p = skb->data + header_len + DHCP_OPTIONS_OFFSET; + pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1; + + /* Access the dhcp option lists. Each entry is made up by: + * - octet 1: option type + * - octet 2: option data len (only if type != 255 and 0) + * - octet 3: option data */ + while (*p != 255 && !ret) { + /* p now points to the first octet: option type */ + if (*p == 53) { + /* type 53 is the message type option. + * Jump the len octet and go to the data octet */ + if (pkt_len < 2) + goto out; + p += 2; + + /* check if the message type is what we need */ + if (*p == DHCP_REQUEST) + ret = true; + break; + } else if (*p == 0) { + /* option type 0 (padding), just go forward */ + if (pkt_len < 1) + goto out; + pkt_len--; + p++; + } else { + /* This is any other option. So we get the length... */ + if (pkt_len < 1) + goto out; + pkt_len--; + p++; + + /* ...and then we jump over the data */ + if (pkt_len < *p) + goto out; + pkt_len -= *p; + p += (*p); + } + } +out: + return ret; +} + +bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) +{ + struct ethhdr *ethhdr; + struct iphdr *iphdr; + struct ipv6hdr *ipv6hdr; + struct udphdr *udphdr; + + /* check for ethernet header */ + if (!pskb_may_pull(skb, *header_len + ETH_HLEN)) + return false; + ethhdr = (struct ethhdr *)skb->data; + *header_len += ETH_HLEN; + + /* check for initial vlan header */ + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { + if (!pskb_may_pull(skb, *header_len + VLAN_HLEN)) + return false; + ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); + *header_len += VLAN_HLEN; + } + + /* check for ip header */ + switch (ntohs(ethhdr->h_proto)) { + case ETH_P_IP: + if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr))) + return false; + iphdr = (struct iphdr *)(skb->data + *header_len); + *header_len += iphdr->ihl * 4; + + /* check for udp header */ + if (iphdr->protocol != IPPROTO_UDP) + return false; + + break; + case ETH_P_IPV6: + if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr))) + return false; + ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len); + *header_len += sizeof(*ipv6hdr); + + /* check for udp header */ + if (ipv6hdr->nexthdr != IPPROTO_UDP) + return false; + + break; + default: + return false; + } + + if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) + return false; + udphdr = (struct udphdr *)(skb->data + *header_len); + *header_len += sizeof(*udphdr); + + /* check for bootp port */ + if ((ntohs(ethhdr->h_proto) == ETH_P_IP) && + (ntohs(udphdr->dest) != 67)) + return false; + + if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) && + (ntohs(udphdr->dest) != 547)) + return false; + + return true; +} + +bool gw_out_of_range(struct bat_priv *bat_priv, + struct sk_buff *skb, struct ethhdr *ethhdr) +{ + struct neigh_node *neigh_curr = NULL, *neigh_old = NULL; + struct orig_node *orig_dst_node = NULL; + struct gw_node *curr_gw = NULL; + bool ret, out_of_range = false; + unsigned int header_len = 0; + uint8_t curr_tq_avg; + + ret = gw_is_dhcp_target(skb, &header_len); + if (!ret) + goto out; + + orig_dst_node = transtable_search(bat_priv, ethhdr->h_source, + ethhdr->h_dest); + if (!orig_dst_node) + goto out; + + if (!orig_dst_node->gw_flags) + goto out; + + ret = is_type_dhcprequest(skb, header_len); + if (!ret) + goto out; + + switch (atomic_read(&bat_priv->gw_mode)) { + case GW_MODE_SERVER: + /* If we are a GW then we are our best GW. We can artificially + * set the tq towards ourself as the maximum value */ + curr_tq_avg = TQ_MAX_VALUE; + break; + case GW_MODE_CLIENT: + curr_gw = gw_get_selected_gw_node(bat_priv); + if (!curr_gw) + goto out; + + /* packet is going to our gateway */ + if (curr_gw->orig_node == orig_dst_node) + goto out; + + /* If the dhcp packet has been sent to a different gw, + * we have to evaluate whether the old gw is still + * reliable enough */ + neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL); + if (!neigh_curr) + goto out; + + curr_tq_avg = neigh_curr->tq_avg; + break; + case GW_MODE_OFF: + default: + goto out; + } + + neigh_old = find_router(bat_priv, orig_dst_node, NULL); + if (!neigh_old) + goto out; + + if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD) + out_of_range = true; + +out: + if (orig_dst_node) + orig_node_free_ref(orig_dst_node); + if (curr_gw) + gw_node_free_ref(curr_gw); + if (neigh_old) + neigh_node_free_ref(neigh_old); + if (neigh_curr) + neigh_node_free_ref(neigh_curr); + return out_of_range; +} diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h new file mode 100644 index 00000000..bf56a5ae --- /dev/null +++ b/net/batman-adv/gateway_client.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ +#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ + +void gw_deselect(struct bat_priv *bat_priv); +void gw_election(struct bat_priv *bat_priv); +struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv); +void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node); +void gw_node_update(struct bat_priv *bat_priv, + struct orig_node *orig_node, uint8_t new_gwflags); +void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node); +void gw_node_purge(struct bat_priv *bat_priv); +int gw_client_seq_print_text(struct seq_file *seq, void *offset); +bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); +bool gw_out_of_range(struct bat_priv *bat_priv, + struct sk_buff *skb, struct ethhdr *ethhdr); + +#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c new file mode 100644 index 00000000..ca57ac7d --- /dev/null +++ b/net/batman-adv/gateway_common.c @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "gateway_common.h" +#include "gateway_client.h" + +/* calculates the gateway class from kbit */ +static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class) +{ + int mdown = 0, tdown, tup, difference; + uint8_t sbit, part; + + *gw_srv_class = 0; + difference = 0x0FFFFFFF; + + /* test all downspeeds */ + for (sbit = 0; sbit < 2; sbit++) { + for (part = 0; part < 16; part++) { + tdown = 32 * (sbit + 2) * (1 << part); + + if (abs(tdown - down) < difference) { + *gw_srv_class = (sbit << 7) + (part << 3); + difference = abs(tdown - down); + mdown = tdown; + } + } + } + + /* test all upspeeds */ + difference = 0x0FFFFFFF; + + for (part = 0; part < 8; part++) { + tup = ((part + 1) * (mdown)) / 8; + + if (abs(tup - up) < difference) { + *gw_srv_class = (*gw_srv_class & 0xF8) | part; + difference = abs(tup - up); + } + } +} + +/* returns the up and downspeeds in kbit, calculated from the class */ +void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up) +{ + int sbit = (gw_srv_class & 0x80) >> 7; + int dpart = (gw_srv_class & 0x78) >> 3; + int upart = (gw_srv_class & 0x07); + + if (!gw_srv_class) { + *down = 0; + *up = 0; + return; + } + + *down = 32 * (sbit + 2) * (1 << dpart); + *up = ((upart + 1) * (*down)) / 8; +} + +static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, + int *up, int *down) +{ + int ret, multi = 1; + char *slash_ptr, *tmp_ptr; + long ldown, lup; + + slash_ptr = strchr(buff, '/'); + if (slash_ptr) + *slash_ptr = 0; + + if (strlen(buff) > 4) { + tmp_ptr = buff + strlen(buff) - 4; + + if (strnicmp(tmp_ptr, "mbit", 4) == 0) + multi = 1024; + + if ((strnicmp(tmp_ptr, "kbit", 4) == 0) || + (multi > 1)) + *tmp_ptr = '\0'; + } + + ret = kstrtol(buff, 10, &ldown); + if (ret) { + bat_err(net_dev, + "Download speed of gateway mode invalid: %s\n", + buff); + return false; + } + + *down = ldown * multi; + + /* we also got some upload info */ + if (slash_ptr) { + multi = 1; + + if (strlen(slash_ptr + 1) > 4) { + tmp_ptr = slash_ptr + 1 - 4 + strlen(slash_ptr + 1); + + if (strnicmp(tmp_ptr, "mbit", 4) == 0) + multi = 1024; + + if ((strnicmp(tmp_ptr, "kbit", 4) == 0) || + (multi > 1)) + *tmp_ptr = '\0'; + } + + ret = kstrtol(slash_ptr + 1, 10, &lup); + if (ret) { + bat_err(net_dev, + "Upload speed of gateway mode invalid: %s\n", + slash_ptr + 1); + return false; + } + + *up = lup * multi; + } + + return true; +} + +ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count) +{ + struct bat_priv *bat_priv = netdev_priv(net_dev); + long gw_bandwidth_tmp = 0; + int up = 0, down = 0; + bool ret; + + ret = parse_gw_bandwidth(net_dev, buff, &up, &down); + if (!ret) + goto end; + + if ((!down) || (down < 256)) + down = 2000; + + if (!up) + up = down / 5; + + kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp); + + /** + * the gw bandwidth we guessed above might not match the given + * speeds, hence we need to calculate it back to show the number + * that is going to be propagated + **/ + gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up); + + gw_deselect(bat_priv); + bat_info(net_dev, + "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n", + atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp, + (down > 2048 ? down / 1024 : down), + (down > 2048 ? "MBit" : "KBit"), + (up > 2048 ? up / 1024 : up), + (up > 2048 ? "MBit" : "KBit")); + + atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp); + +end: + return count; +} diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h new file mode 100644 index 00000000..b8fb11c4 --- /dev/null +++ b/net/batman-adv/gateway_common.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_ +#define _NET_BATMAN_ADV_GATEWAY_COMMON_H_ + +enum gw_modes { + GW_MODE_OFF, + GW_MODE_CLIENT, + GW_MODE_SERVER, +}; + +#define GW_MODE_OFF_NAME "off" +#define GW_MODE_CLIENT_NAME "client" +#define GW_MODE_SERVER_NAME "server" + +void gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up); +ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count); + +#endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */ diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c new file mode 100644 index 00000000..37789770 --- /dev/null +++ b/net/batman-adv/hard-interface.c @@ -0,0 +1,688 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "hard-interface.h" +#include "soft-interface.h" +#include "send.h" +#include "translation-table.h" +#include "routing.h" +#include "bat_sysfs.h" +#include "originator.h" +#include "hash.h" + +#include <linux/if_arp.h> + + +static int batman_skb_recv(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *ptype, + struct net_device *orig_dev); + +void hardif_free_rcu(struct rcu_head *rcu) +{ + struct hard_iface *hard_iface; + + hard_iface = container_of(rcu, struct hard_iface, rcu); + dev_put(hard_iface->net_dev); + kfree(hard_iface); +} + +struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev) +{ + struct hard_iface *hard_iface; + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if (hard_iface->net_dev == net_dev && + atomic_inc_not_zero(&hard_iface->refcount)) + goto out; + } + + hard_iface = NULL; + +out: + rcu_read_unlock(); + return hard_iface; +} + +static int is_valid_iface(const struct net_device *net_dev) +{ + if (net_dev->flags & IFF_LOOPBACK) + return 0; + + if (net_dev->type != ARPHRD_ETHER) + return 0; + + if (net_dev->addr_len != ETH_ALEN) + return 0; + + /* no batman over batman */ + if (softif_is_valid(net_dev)) + return 0; + + /* Device is being bridged */ + /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) + return 0; */ + + return 1; +} + +static struct hard_iface *hardif_get_active(const struct net_device *soft_iface) +{ + struct hard_iface *hard_iface; + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if (hard_iface->soft_iface != soft_iface) + continue; + + if (hard_iface->if_status == IF_ACTIVE && + atomic_inc_not_zero(&hard_iface->refcount)) + goto out; + } + + hard_iface = NULL; + +out: + rcu_read_unlock(); + return hard_iface; +} + +static void primary_if_update_addr(struct bat_priv *bat_priv) +{ + struct vis_packet *vis_packet; + struct hard_iface *primary_if; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + vis_packet = (struct vis_packet *) + bat_priv->my_vis_info->skb_packet->data; + memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); + memcpy(vis_packet->sender_orig, + primary_if->net_dev->dev_addr, ETH_ALEN); + +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +static void primary_if_select(struct bat_priv *bat_priv, + struct hard_iface *new_hard_iface) +{ + struct hard_iface *curr_hard_iface; + + ASSERT_RTNL(); + + if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount)) + new_hard_iface = NULL; + + curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1); + rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); + + if (curr_hard_iface) + hardif_free_ref(curr_hard_iface); + + if (!new_hard_iface) + return; + + bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface); + primary_if_update_addr(bat_priv); +} + +static bool hardif_is_iface_up(const struct hard_iface *hard_iface) +{ + if (hard_iface->net_dev->flags & IFF_UP) + return true; + + return false; +} + +static void check_known_mac_addr(const struct net_device *net_dev) +{ + const struct hard_iface *hard_iface; + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if ((hard_iface->if_status != IF_ACTIVE) && + (hard_iface->if_status != IF_TO_BE_ACTIVATED)) + continue; + + if (hard_iface->net_dev == net_dev) + continue; + + if (!compare_eth(hard_iface->net_dev->dev_addr, + net_dev->dev_addr)) + continue; + + pr_warning("The newly added mac address (%pM) already exists on: %s\n", + net_dev->dev_addr, hard_iface->net_dev->name); + pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n"); + } + rcu_read_unlock(); +} + +int hardif_min_mtu(struct net_device *soft_iface) +{ + const struct bat_priv *bat_priv = netdev_priv(soft_iface); + const struct hard_iface *hard_iface; + /* allow big frames if all devices are capable to do so + * (have MTU > 1500 + BAT_HEADER_LEN) */ + int min_mtu = ETH_DATA_LEN; + + if (atomic_read(&bat_priv->fragmentation)) + goto out; + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if ((hard_iface->if_status != IF_ACTIVE) && + (hard_iface->if_status != IF_TO_BE_ACTIVATED)) + continue; + + if (hard_iface->soft_iface != soft_iface) + continue; + + min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN, + min_mtu); + } + rcu_read_unlock(); +out: + return min_mtu; +} + +/* adjusts the MTU if a new interface with a smaller MTU appeared. */ +void update_min_mtu(struct net_device *soft_iface) +{ + int min_mtu; + + min_mtu = hardif_min_mtu(soft_iface); + if (soft_iface->mtu != min_mtu) + soft_iface->mtu = min_mtu; +} + +static void hardif_activate_interface(struct hard_iface *hard_iface) +{ + struct bat_priv *bat_priv; + struct hard_iface *primary_if = NULL; + + if (hard_iface->if_status != IF_INACTIVE) + goto out; + + bat_priv = netdev_priv(hard_iface->soft_iface); + + bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface); + hard_iface->if_status = IF_TO_BE_ACTIVATED; + + /** + * the first active interface becomes our primary interface or + * the next active interface after the old primary interface was removed + */ + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + primary_if_select(bat_priv, hard_iface); + + bat_info(hard_iface->soft_iface, "Interface activated: %s\n", + hard_iface->net_dev->name); + + update_min_mtu(hard_iface->soft_iface); + +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +static void hardif_deactivate_interface(struct hard_iface *hard_iface) +{ + if ((hard_iface->if_status != IF_ACTIVE) && + (hard_iface->if_status != IF_TO_BE_ACTIVATED)) + return; + + hard_iface->if_status = IF_INACTIVE; + + bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n", + hard_iface->net_dev->name); + + update_min_mtu(hard_iface->soft_iface); +} + +int hardif_enable_interface(struct hard_iface *hard_iface, + const char *iface_name) +{ + struct bat_priv *bat_priv; + struct net_device *soft_iface; + int ret; + + if (hard_iface->if_status != IF_NOT_IN_USE) + goto out; + + if (!atomic_inc_not_zero(&hard_iface->refcount)) + goto out; + + /* hard-interface is part of a bridge */ + if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT) + pr_err("You are about to enable batman-adv on '%s' which already is part of a bridge. Unless you know exactly what you are doing this is probably wrong and won't work the way you think it would.\n", + hard_iface->net_dev->name); + + soft_iface = dev_get_by_name(&init_net, iface_name); + + if (!soft_iface) { + soft_iface = softif_create(iface_name); + + if (!soft_iface) { + ret = -ENOMEM; + goto err; + } + + /* dev_get_by_name() increases the reference counter for us */ + dev_hold(soft_iface); + } + + if (!softif_is_valid(soft_iface)) { + pr_err("Can't create batman mesh interface %s: already exists as regular interface\n", + soft_iface->name); + dev_put(soft_iface); + ret = -EINVAL; + goto err; + } + + hard_iface->soft_iface = soft_iface; + bat_priv = netdev_priv(hard_iface->soft_iface); + + bat_priv->bat_algo_ops->bat_ogm_init(hard_iface); + + if (!hard_iface->packet_buff) { + bat_err(hard_iface->soft_iface, + "Can't add interface packet (%s): out of memory\n", + hard_iface->net_dev->name); + ret = -ENOMEM; + goto err; + } + + hard_iface->if_num = bat_priv->num_ifaces; + bat_priv->num_ifaces++; + hard_iface->if_status = IF_INACTIVE; + orig_hash_add_if(hard_iface, bat_priv->num_ifaces); + + hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); + hard_iface->batman_adv_ptype.func = batman_skb_recv; + hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; + dev_add_pack(&hard_iface->batman_adv_ptype); + + atomic_set(&hard_iface->seqno, 1); + atomic_set(&hard_iface->frag_seqno, 1); + bat_info(hard_iface->soft_iface, "Adding interface: %s\n", + hard_iface->net_dev->name); + + if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < + ETH_DATA_LEN + BAT_HEADER_LEN) + bat_info(hard_iface->soft_iface, + "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n", + hard_iface->net_dev->name, hard_iface->net_dev->mtu, + ETH_DATA_LEN + BAT_HEADER_LEN); + + if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < + ETH_DATA_LEN + BAT_HEADER_LEN) + bat_info(hard_iface->soft_iface, + "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n", + hard_iface->net_dev->name, hard_iface->net_dev->mtu, + ETH_DATA_LEN + BAT_HEADER_LEN); + + if (hardif_is_iface_up(hard_iface)) + hardif_activate_interface(hard_iface); + else + bat_err(hard_iface->soft_iface, + "Not using interface %s (retrying later): interface not active\n", + hard_iface->net_dev->name); + + /* begin scheduling originator messages on that interface */ + schedule_bat_ogm(hard_iface); + +out: + return 0; + +err: + hardif_free_ref(hard_iface); + return ret; +} + +void hardif_disable_interface(struct hard_iface *hard_iface) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct hard_iface *primary_if = NULL; + + if (hard_iface->if_status == IF_ACTIVE) + hardif_deactivate_interface(hard_iface); + + if (hard_iface->if_status != IF_INACTIVE) + goto out; + + bat_info(hard_iface->soft_iface, "Removing interface: %s\n", + hard_iface->net_dev->name); + dev_remove_pack(&hard_iface->batman_adv_ptype); + + bat_priv->num_ifaces--; + orig_hash_del_if(hard_iface, bat_priv->num_ifaces); + + primary_if = primary_if_get_selected(bat_priv); + if (hard_iface == primary_if) { + struct hard_iface *new_if; + + new_if = hardif_get_active(hard_iface->soft_iface); + primary_if_select(bat_priv, new_if); + + if (new_if) + hardif_free_ref(new_if); + } + + kfree(hard_iface->packet_buff); + hard_iface->packet_buff = NULL; + hard_iface->if_status = IF_NOT_IN_USE; + + /* delete all references to this hard_iface */ + purge_orig_ref(bat_priv); + purge_outstanding_packets(bat_priv, hard_iface); + dev_put(hard_iface->soft_iface); + + /* nobody uses this interface anymore */ + if (!bat_priv->num_ifaces) + softif_destroy(hard_iface->soft_iface); + + hard_iface->soft_iface = NULL; + hardif_free_ref(hard_iface); + +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +static struct hard_iface *hardif_add_interface(struct net_device *net_dev) +{ + struct hard_iface *hard_iface; + int ret; + + ASSERT_RTNL(); + + ret = is_valid_iface(net_dev); + if (ret != 1) + goto out; + + dev_hold(net_dev); + + hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC); + if (!hard_iface) + goto release_dev; + + ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); + if (ret) + goto free_if; + + hard_iface->if_num = -1; + hard_iface->net_dev = net_dev; + hard_iface->soft_iface = NULL; + hard_iface->if_status = IF_NOT_IN_USE; + INIT_LIST_HEAD(&hard_iface->list); + /* extra reference for return */ + atomic_set(&hard_iface->refcount, 2); + + check_known_mac_addr(hard_iface->net_dev); + list_add_tail_rcu(&hard_iface->list, &hardif_list); + + return hard_iface; + +free_if: + kfree(hard_iface); +release_dev: + dev_put(net_dev); +out: + return NULL; +} + +static void hardif_remove_interface(struct hard_iface *hard_iface) +{ + ASSERT_RTNL(); + + /* first deactivate interface */ + if (hard_iface->if_status != IF_NOT_IN_USE) + hardif_disable_interface(hard_iface); + + if (hard_iface->if_status != IF_NOT_IN_USE) + return; + + hard_iface->if_status = IF_TO_BE_REMOVED; + sysfs_del_hardif(&hard_iface->hardif_obj); + hardif_free_ref(hard_iface); +} + +void hardif_remove_interfaces(void) +{ + struct hard_iface *hard_iface, *hard_iface_tmp; + + rtnl_lock(); + list_for_each_entry_safe(hard_iface, hard_iface_tmp, + &hardif_list, list) { + list_del_rcu(&hard_iface->list); + hardif_remove_interface(hard_iface); + } + rtnl_unlock(); +} + +static int hard_if_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *net_dev = ptr; + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); + struct hard_iface *primary_if = NULL; + struct bat_priv *bat_priv; + + if (!hard_iface && event == NETDEV_REGISTER) + hard_iface = hardif_add_interface(net_dev); + + if (!hard_iface) + goto out; + + switch (event) { + case NETDEV_UP: + hardif_activate_interface(hard_iface); + break; + case NETDEV_GOING_DOWN: + case NETDEV_DOWN: + hardif_deactivate_interface(hard_iface); + break; + case NETDEV_UNREGISTER: + list_del_rcu(&hard_iface->list); + + hardif_remove_interface(hard_iface); + break; + case NETDEV_CHANGEMTU: + if (hard_iface->soft_iface) + update_min_mtu(hard_iface->soft_iface); + break; + case NETDEV_CHANGEADDR: + if (hard_iface->if_status == IF_NOT_IN_USE) + goto hardif_put; + + check_known_mac_addr(hard_iface->net_dev); + + bat_priv = netdev_priv(hard_iface->soft_iface); + bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface); + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto hardif_put; + + if (hard_iface == primary_if) + primary_if_update_addr(bat_priv); + break; + default: + break; + } + +hardif_put: + hardif_free_ref(hard_iface); +out: + if (primary_if) + hardif_free_ref(primary_if); + return NOTIFY_DONE; +} + +/* incoming packets with the batman ethertype received on any active hard + * interface */ +static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, + struct net_device *orig_dev) +{ + struct bat_priv *bat_priv; + struct batman_ogm_packet *batman_ogm_packet; + struct hard_iface *hard_iface; + int ret; + + hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); + skb = skb_share_check(skb, GFP_ATOMIC); + + /* skb was released by skb_share_check() */ + if (!skb) + goto err_out; + + /* packet should hold at least type and version */ + if (unlikely(!pskb_may_pull(skb, 2))) + goto err_free; + + /* expect a valid ethernet header here. */ + if (unlikely(skb->mac_len != sizeof(struct ethhdr) || + !skb_mac_header(skb))) + goto err_free; + + if (!hard_iface->soft_iface) + goto err_free; + + bat_priv = netdev_priv(hard_iface->soft_iface); + + if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) + goto err_free; + + /* discard frames on not active interfaces */ + if (hard_iface->if_status != IF_ACTIVE) + goto err_free; + + batman_ogm_packet = (struct batman_ogm_packet *)skb->data; + + if (batman_ogm_packet->header.version != COMPAT_VERSION) { + bat_dbg(DBG_BATMAN, bat_priv, + "Drop packet: incompatible batman version (%i)\n", + batman_ogm_packet->header.version); + goto err_free; + } + + /* all receive handlers return whether they received or reused + * the supplied skb. if not, we have to free the skb. */ + + switch (batman_ogm_packet->header.packet_type) { + /* batman originator packet */ + case BAT_OGM: + ret = recv_bat_ogm_packet(skb, hard_iface); + break; + + /* batman icmp packet */ + case BAT_ICMP: + ret = recv_icmp_packet(skb, hard_iface); + break; + + /* unicast packet */ + case BAT_UNICAST: + ret = recv_unicast_packet(skb, hard_iface); + break; + + /* fragmented unicast packet */ + case BAT_UNICAST_FRAG: + ret = recv_ucast_frag_packet(skb, hard_iface); + break; + + /* broadcast packet */ + case BAT_BCAST: + ret = recv_bcast_packet(skb, hard_iface); + break; + + /* vis packet */ + case BAT_VIS: + ret = recv_vis_packet(skb, hard_iface); + break; + /* Translation table query (request or response) */ + case BAT_TT_QUERY: + ret = recv_tt_query(skb, hard_iface); + break; + /* Roaming advertisement */ + case BAT_ROAM_ADV: + ret = recv_roam_adv(skb, hard_iface); + break; + default: + ret = NET_RX_DROP; + } + + if (ret == NET_RX_DROP) + kfree_skb(skb); + + /* return NET_RX_SUCCESS in any case as we + * most probably dropped the packet for + * routing-logical reasons. */ + + return NET_RX_SUCCESS; + +err_free: + kfree_skb(skb); +err_out: + return NET_RX_DROP; +} + +/* This function returns true if the interface represented by ifindex is a + * 802.11 wireless device */ +bool is_wifi_iface(int ifindex) +{ + struct net_device *net_device = NULL; + bool ret = false; + + if (ifindex == NULL_IFINDEX) + goto out; + + net_device = dev_get_by_index(&init_net, ifindex); + if (!net_device) + goto out; + +#ifdef CONFIG_WIRELESS_EXT + /* pre-cfg80211 drivers have to implement WEXT, so it is possible to + * check for wireless_handlers != NULL */ + if (net_device->wireless_handlers) + ret = true; + else +#endif + /* cfg80211 drivers have to set ieee80211_ptr */ + if (net_device->ieee80211_ptr) + ret = true; +out: + if (net_device) + dev_put(net_device); + return ret; +} + +struct notifier_block hard_if_notifier = { + .notifier_call = hard_if_event, +}; diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h new file mode 100644 index 00000000..e68c5655 --- /dev/null +++ b/net/batman-adv/hard-interface.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_ +#define _NET_BATMAN_ADV_HARD_INTERFACE_H_ + +enum hard_if_state { + IF_NOT_IN_USE, + IF_TO_BE_REMOVED, + IF_INACTIVE, + IF_ACTIVE, + IF_TO_BE_ACTIVATED, + IF_I_WANT_YOU +}; + +extern struct notifier_block hard_if_notifier; + +struct hard_iface* +hardif_get_by_netdev(const struct net_device *net_dev); +int hardif_enable_interface(struct hard_iface *hard_iface, + const char *iface_name); +void hardif_disable_interface(struct hard_iface *hard_iface); +void hardif_remove_interfaces(void); +int hardif_min_mtu(struct net_device *soft_iface); +void update_min_mtu(struct net_device *soft_iface); +void hardif_free_rcu(struct rcu_head *rcu); +bool is_wifi_iface(int ifindex); + +static inline void hardif_free_ref(struct hard_iface *hard_iface) +{ + if (atomic_dec_and_test(&hard_iface->refcount)) + call_rcu(&hard_iface->rcu, hardif_free_rcu); +} + +static inline struct hard_iface *primary_if_get_selected( + struct bat_priv *bat_priv) +{ + struct hard_iface *hard_iface; + + rcu_read_lock(); + hard_iface = rcu_dereference(bat_priv->primary_if); + if (!hard_iface) + goto out; + + if (!atomic_inc_not_zero(&hard_iface->refcount)) + hard_iface = NULL; + +out: + rcu_read_unlock(); + return hard_iface; +} + +#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c new file mode 100644 index 00000000..117687be --- /dev/null +++ b/net/batman-adv/hash.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich, Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "hash.h" + +/* clears the hash */ +static void hash_init(struct hashtable_t *hash) +{ + uint32_t i; + + for (i = 0 ; i < hash->size; i++) { + INIT_HLIST_HEAD(&hash->table[i]); + spin_lock_init(&hash->list_locks[i]); + } +} + +/* free only the hashtable and the hash itself. */ +void hash_destroy(struct hashtable_t *hash) +{ + kfree(hash->list_locks); + kfree(hash->table); + kfree(hash); +} + +/* allocates and clears the hash */ +struct hashtable_t *hash_new(uint32_t size) +{ + struct hashtable_t *hash; + + hash = kmalloc(sizeof(*hash), GFP_ATOMIC); + if (!hash) + return NULL; + + hash->table = kmalloc(sizeof(*hash->table) * size, GFP_ATOMIC); + if (!hash->table) + goto free_hash; + + hash->list_locks = kmalloc(sizeof(*hash->list_locks) * size, + GFP_ATOMIC); + if (!hash->list_locks) + goto free_table; + + hash->size = size; + hash_init(hash); + return hash; + +free_table: + kfree(hash->table); +free_hash: + kfree(hash); + return NULL; +} diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h new file mode 100644 index 00000000..d4bd7862 --- /dev/null +++ b/net/batman-adv/hash.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich, Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_HASH_H_ +#define _NET_BATMAN_ADV_HASH_H_ + +#include <linux/list.h> + +/* callback to a compare function. should + * compare 2 element datas for their keys, + * return 0 if same and not 0 if not + * same */ +typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *); + +/* the hashfunction, should return an index + * based on the key in the data of the first + * argument and the size the second */ +typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t); +typedef void (*hashdata_free_cb)(struct hlist_node *, void *); + +struct hashtable_t { + struct hlist_head *table; /* the hashtable itself with the buckets */ + spinlock_t *list_locks; /* spinlock for each hash list entry */ + uint32_t size; /* size of hashtable */ +}; + +/* allocates and clears the hash */ +struct hashtable_t *hash_new(uint32_t size); + +/* free only the hashtable and the hash itself. */ +void hash_destroy(struct hashtable_t *hash); + +/* remove the hash structure. if hashdata_free_cb != NULL, this function will be + * called to remove the elements inside of the hash. if you don't remove the + * elements, memory might be leaked. */ +static inline void hash_delete(struct hashtable_t *hash, + hashdata_free_cb free_cb, void *arg) +{ + struct hlist_head *head; + struct hlist_node *node, *node_tmp; + spinlock_t *list_lock; /* spinlock to protect write access */ + uint32_t i; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_safe(node, node_tmp, head) { + hlist_del_rcu(node); + + if (free_cb) + free_cb(node, arg); + } + spin_unlock_bh(list_lock); + } + + hash_destroy(hash); +} + +/** + * hash_add - adds data to the hashtable + * @hash: storage hash table + * @compare: callback to determine if 2 hash elements are identical + * @choose: callback calculating the hash index + * @data: data passed to the aforementioned callbacks as argument + * @data_node: to be added element + * + * Returns 0 on success, 1 if the element already is in the hash + * and -1 on error. + */ + +static inline int hash_add(struct hashtable_t *hash, + hashdata_compare_cb compare, + hashdata_choose_cb choose, + const void *data, struct hlist_node *data_node) +{ + uint32_t index; + int ret = -1; + struct hlist_head *head; + struct hlist_node *node; + spinlock_t *list_lock; /* spinlock to protect write access */ + + if (!hash) + goto out; + + index = choose(data, hash->size); + head = &hash->table[index]; + list_lock = &hash->list_locks[index]; + + rcu_read_lock(); + __hlist_for_each_rcu(node, head) { + if (!compare(node, data)) + continue; + + ret = 1; + goto err_unlock; + } + rcu_read_unlock(); + + /* no duplicate found in list, add new element */ + spin_lock_bh(list_lock); + hlist_add_head_rcu(data_node, head); + spin_unlock_bh(list_lock); + + ret = 0; + goto out; + +err_unlock: + rcu_read_unlock(); +out: + return ret; +} + +/* removes data from hash, if found. returns pointer do data on success, so you + * can remove the used structure yourself, or NULL on error . data could be the + * structure you use with just the key filled, we just need the key for + * comparing. */ +static inline void *hash_remove(struct hashtable_t *hash, + hashdata_compare_cb compare, + hashdata_choose_cb choose, void *data) +{ + uint32_t index; + struct hlist_node *node; + struct hlist_head *head; + void *data_save = NULL; + + index = choose(data, hash->size); + head = &hash->table[index]; + + spin_lock_bh(&hash->list_locks[index]); + hlist_for_each(node, head) { + if (!compare(node, data)) + continue; + + data_save = node; + hlist_del_rcu(node); + break; + } + spin_unlock_bh(&hash->list_locks[index]); + + return data_save; +} + +#endif /* _NET_BATMAN_ADV_HASH_H_ */ diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c new file mode 100644 index 00000000..b87518ed --- /dev/null +++ b/net/batman-adv/icmp_socket.c @@ -0,0 +1,346 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include <linux/debugfs.h> +#include <linux/slab.h> +#include "icmp_socket.h" +#include "send.h" +#include "hash.h" +#include "originator.h" +#include "hard-interface.h" + +static struct socket_client *socket_client_hash[256]; + +static void bat_socket_add_packet(struct socket_client *socket_client, + struct icmp_packet_rr *icmp_packet, + size_t icmp_len); + +void bat_socket_init(void) +{ + memset(socket_client_hash, 0, sizeof(socket_client_hash)); +} + +static int bat_socket_open(struct inode *inode, struct file *file) +{ + unsigned int i; + struct socket_client *socket_client; + + nonseekable_open(inode, file); + + socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL); + + if (!socket_client) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) { + if (!socket_client_hash[i]) { + socket_client_hash[i] = socket_client; + break; + } + } + + if (i == ARRAY_SIZE(socket_client_hash)) { + pr_err("Error - can't add another packet client: maximum number of clients reached\n"); + kfree(socket_client); + return -EXFULL; + } + + INIT_LIST_HEAD(&socket_client->queue_list); + socket_client->queue_len = 0; + socket_client->index = i; + socket_client->bat_priv = inode->i_private; + spin_lock_init(&socket_client->lock); + init_waitqueue_head(&socket_client->queue_wait); + + file->private_data = socket_client; + + inc_module_count(); + return 0; +} + +static int bat_socket_release(struct inode *inode, struct file *file) +{ + struct socket_client *socket_client = file->private_data; + struct socket_packet *socket_packet; + struct list_head *list_pos, *list_pos_tmp; + + spin_lock_bh(&socket_client->lock); + + /* for all packets in the queue ... */ + list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) { + socket_packet = list_entry(list_pos, + struct socket_packet, list); + + list_del(list_pos); + kfree(socket_packet); + } + + socket_client_hash[socket_client->index] = NULL; + spin_unlock_bh(&socket_client->lock); + + kfree(socket_client); + dec_module_count(); + + return 0; +} + +static ssize_t bat_socket_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct socket_client *socket_client = file->private_data; + struct socket_packet *socket_packet; + size_t packet_len; + int error; + + if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0)) + return -EAGAIN; + + if ((!buf) || (count < sizeof(struct icmp_packet))) + return -EINVAL; + + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + + error = wait_event_interruptible(socket_client->queue_wait, + socket_client->queue_len); + + if (error) + return error; + + spin_lock_bh(&socket_client->lock); + + socket_packet = list_first_entry(&socket_client->queue_list, + struct socket_packet, list); + list_del(&socket_packet->list); + socket_client->queue_len--; + + spin_unlock_bh(&socket_client->lock); + + packet_len = min(count, socket_packet->icmp_len); + error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len); + + kfree(socket_packet); + + if (error) + return -EFAULT; + + return packet_len; +} + +static ssize_t bat_socket_write(struct file *file, const char __user *buff, + size_t len, loff_t *off) +{ + struct socket_client *socket_client = file->private_data; + struct bat_priv *bat_priv = socket_client->bat_priv; + struct hard_iface *primary_if = NULL; + struct sk_buff *skb; + struct icmp_packet_rr *icmp_packet; + + struct orig_node *orig_node = NULL; + struct neigh_node *neigh_node = NULL; + size_t packet_len = sizeof(struct icmp_packet); + + if (len < sizeof(struct icmp_packet)) { + bat_dbg(DBG_BATMAN, bat_priv, + "Error - can't send packet from char device: invalid packet size\n"); + return -EINVAL; + } + + primary_if = primary_if_get_selected(bat_priv); + + if (!primary_if) { + len = -EFAULT; + goto out; + } + + if (len >= sizeof(struct icmp_packet_rr)) + packet_len = sizeof(struct icmp_packet_rr); + + skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr)); + if (!skb) { + len = -ENOMEM; + goto out; + } + + skb_reserve(skb, sizeof(struct ethhdr)); + icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); + + if (copy_from_user(icmp_packet, buff, packet_len)) { + len = -EFAULT; + goto free_skb; + } + + if (icmp_packet->header.packet_type != BAT_ICMP) { + bat_dbg(DBG_BATMAN, bat_priv, + "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); + len = -EINVAL; + goto free_skb; + } + + if (icmp_packet->msg_type != ECHO_REQUEST) { + bat_dbg(DBG_BATMAN, bat_priv, + "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n"); + len = -EINVAL; + goto free_skb; + } + + icmp_packet->uid = socket_client->index; + + if (icmp_packet->header.version != COMPAT_VERSION) { + icmp_packet->msg_type = PARAMETER_PROBLEM; + icmp_packet->header.version = COMPAT_VERSION; + bat_socket_add_packet(socket_client, icmp_packet, packet_len); + goto free_skb; + } + + if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) + goto dst_unreach; + + orig_node = orig_hash_find(bat_priv, icmp_packet->dst); + if (!orig_node) + goto dst_unreach; + + neigh_node = orig_node_get_router(orig_node); + if (!neigh_node) + goto dst_unreach; + + if (!neigh_node->if_incoming) + goto dst_unreach; + + if (neigh_node->if_incoming->if_status != IF_ACTIVE) + goto dst_unreach; + + memcpy(icmp_packet->orig, + primary_if->net_dev->dev_addr, ETH_ALEN); + + if (packet_len == sizeof(struct icmp_packet_rr)) + memcpy(icmp_packet->rr, + neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN); + + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + goto out; + +dst_unreach: + icmp_packet->msg_type = DESTINATION_UNREACHABLE; + bat_socket_add_packet(socket_client, icmp_packet, packet_len); +free_skb: + kfree_skb(skb); +out: + if (primary_if) + hardif_free_ref(primary_if); + if (neigh_node) + neigh_node_free_ref(neigh_node); + if (orig_node) + orig_node_free_ref(orig_node); + return len; +} + +static unsigned int bat_socket_poll(struct file *file, poll_table *wait) +{ + struct socket_client *socket_client = file->private_data; + + poll_wait(file, &socket_client->queue_wait, wait); + + if (socket_client->queue_len > 0) + return POLLIN | POLLRDNORM; + + return 0; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = bat_socket_open, + .release = bat_socket_release, + .read = bat_socket_read, + .write = bat_socket_write, + .poll = bat_socket_poll, + .llseek = no_llseek, +}; + +int bat_socket_setup(struct bat_priv *bat_priv) +{ + struct dentry *d; + + if (!bat_priv->debug_dir) + goto err; + + d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR, + bat_priv->debug_dir, bat_priv, &fops); + if (d) + goto err; + + return 0; + +err: + return 1; +} + +static void bat_socket_add_packet(struct socket_client *socket_client, + struct icmp_packet_rr *icmp_packet, + size_t icmp_len) +{ + struct socket_packet *socket_packet; + + socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC); + + if (!socket_packet) + return; + + INIT_LIST_HEAD(&socket_packet->list); + memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len); + socket_packet->icmp_len = icmp_len; + + spin_lock_bh(&socket_client->lock); + + /* while waiting for the lock the socket_client could have been + * deleted */ + if (!socket_client_hash[icmp_packet->uid]) { + spin_unlock_bh(&socket_client->lock); + kfree(socket_packet); + return; + } + + list_add_tail(&socket_packet->list, &socket_client->queue_list); + socket_client->queue_len++; + + if (socket_client->queue_len > 100) { + socket_packet = list_first_entry(&socket_client->queue_list, + struct socket_packet, list); + + list_del(&socket_packet->list); + kfree(socket_packet); + socket_client->queue_len--; + } + + spin_unlock_bh(&socket_client->lock); + + wake_up(&socket_client->queue_wait); +} + +void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet, + size_t icmp_len) +{ + struct socket_client *hash = socket_client_hash[icmp_packet->uid]; + + if (hash) + bat_socket_add_packet(hash, icmp_packet, icmp_len); +} diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h new file mode 100644 index 00000000..380ed4c2 --- /dev/null +++ b/net/batman-adv/icmp_socket.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_ +#define _NET_BATMAN_ADV_ICMP_SOCKET_H_ + +#define ICMP_SOCKET "socket" + +void bat_socket_init(void); +int bat_socket_setup(struct bat_priv *bat_priv); +void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet, + size_t icmp_len); + +#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */ diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c new file mode 100644 index 00000000..6d51caaf --- /dev/null +++ b/net/batman-adv/main.c @@ -0,0 +1,291 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "bat_sysfs.h" +#include "bat_debugfs.h" +#include "routing.h" +#include "send.h" +#include "originator.h" +#include "soft-interface.h" +#include "icmp_socket.h" +#include "translation-table.h" +#include "hard-interface.h" +#include "gateway_client.h" +#include "vis.h" +#include "hash.h" +#include "bat_algo.h" + + +/* List manipulations on hardif_list have to be rtnl_lock()'ed, + * list traversals just rcu-locked */ +struct list_head hardif_list; +char bat_routing_algo[20] = "BATMAN IV"; +static struct hlist_head bat_algo_list; + +unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +struct workqueue_struct *bat_event_workqueue; + +static int __init batman_init(void) +{ + INIT_LIST_HEAD(&hardif_list); + INIT_HLIST_HEAD(&bat_algo_list); + + bat_iv_init(); + + /* the name should not be longer than 10 chars - see + * http://lwn.net/Articles/23634/ */ + bat_event_workqueue = create_singlethread_workqueue("bat_events"); + + if (!bat_event_workqueue) + return -ENOMEM; + + bat_socket_init(); + debugfs_init(); + + register_netdevice_notifier(&hard_if_notifier); + + pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", + SOURCE_VERSION, COMPAT_VERSION); + + return 0; +} + +static void __exit batman_exit(void) +{ + debugfs_destroy(); + unregister_netdevice_notifier(&hard_if_notifier); + hardif_remove_interfaces(); + + flush_workqueue(bat_event_workqueue); + destroy_workqueue(bat_event_workqueue); + bat_event_workqueue = NULL; + + rcu_barrier(); +} + +int mesh_init(struct net_device *soft_iface) +{ + struct bat_priv *bat_priv = netdev_priv(soft_iface); + + spin_lock_init(&bat_priv->forw_bat_list_lock); + spin_lock_init(&bat_priv->forw_bcast_list_lock); + spin_lock_init(&bat_priv->tt_changes_list_lock); + spin_lock_init(&bat_priv->tt_req_list_lock); + spin_lock_init(&bat_priv->tt_roam_list_lock); + spin_lock_init(&bat_priv->tt_buff_lock); + spin_lock_init(&bat_priv->gw_list_lock); + spin_lock_init(&bat_priv->vis_hash_lock); + spin_lock_init(&bat_priv->vis_list_lock); + spin_lock_init(&bat_priv->softif_neigh_lock); + spin_lock_init(&bat_priv->softif_neigh_vid_lock); + + INIT_HLIST_HEAD(&bat_priv->forw_bat_list); + INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); + INIT_HLIST_HEAD(&bat_priv->gw_list); + INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids); + INIT_LIST_HEAD(&bat_priv->tt_changes_list); + INIT_LIST_HEAD(&bat_priv->tt_req_list); + INIT_LIST_HEAD(&bat_priv->tt_roam_list); + + if (originator_init(bat_priv) < 1) + goto err; + + if (tt_init(bat_priv) < 1) + goto err; + + tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX); + + if (vis_init(bat_priv) < 1) + goto err; + + atomic_set(&bat_priv->gw_reselect, 0); + atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); + goto end; + +err: + mesh_free(soft_iface); + return -1; + +end: + return 0; +} + +void mesh_free(struct net_device *soft_iface) +{ + struct bat_priv *bat_priv = netdev_priv(soft_iface); + + atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING); + + purge_outstanding_packets(bat_priv, NULL); + + vis_quit(bat_priv); + + gw_node_purge(bat_priv); + originator_free(bat_priv); + + tt_free(bat_priv); + + softif_neigh_purge(bat_priv); + + atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); +} + +void inc_module_count(void) +{ + try_module_get(THIS_MODULE); +} + +void dec_module_count(void) +{ + module_put(THIS_MODULE); +} + +int is_my_mac(const uint8_t *addr) +{ + const struct hard_iface *hard_iface; + + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if (hard_iface->if_status != IF_ACTIVE) + continue; + + if (compare_eth(hard_iface->net_dev->dev_addr, addr)) { + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +static struct bat_algo_ops *bat_algo_get(char *name) +{ + struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; + struct hlist_node *node; + + hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) { + if (strcmp(bat_algo_ops_tmp->name, name) != 0) + continue; + + bat_algo_ops = bat_algo_ops_tmp; + break; + } + + return bat_algo_ops; +} + +int bat_algo_register(struct bat_algo_ops *bat_algo_ops) +{ + struct bat_algo_ops *bat_algo_ops_tmp; + int ret = -1; + + bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name); + if (bat_algo_ops_tmp) { + pr_info("Trying to register already registered routing algorithm: %s\n", + bat_algo_ops->name); + goto out; + } + + /* all algorithms must implement all ops (for now) */ + if (!bat_algo_ops->bat_ogm_init || + !bat_algo_ops->bat_ogm_init_primary || + !bat_algo_ops->bat_ogm_update_mac || + !bat_algo_ops->bat_ogm_schedule || + !bat_algo_ops->bat_ogm_emit || + !bat_algo_ops->bat_ogm_receive) { + pr_info("Routing algo '%s' does not implement required ops\n", + bat_algo_ops->name); + goto out; + } + + INIT_HLIST_NODE(&bat_algo_ops->list); + hlist_add_head(&bat_algo_ops->list, &bat_algo_list); + ret = 0; + +out: + return ret; +} + +int bat_algo_select(struct bat_priv *bat_priv, char *name) +{ + struct bat_algo_ops *bat_algo_ops; + int ret = -1; + + bat_algo_ops = bat_algo_get(name); + if (!bat_algo_ops) + goto out; + + bat_priv->bat_algo_ops = bat_algo_ops; + ret = 0; + +out: + return ret; +} + +int bat_algo_seq_print_text(struct seq_file *seq, void *offset) +{ + struct bat_algo_ops *bat_algo_ops; + struct hlist_node *node; + + seq_printf(seq, "Available routing algorithms:\n"); + + hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) { + seq_printf(seq, "%s\n", bat_algo_ops->name); + } + + return 0; +} + +static int param_set_ra(const char *val, const struct kernel_param *kp) +{ + struct bat_algo_ops *bat_algo_ops; + + bat_algo_ops = bat_algo_get((char *)val); + if (!bat_algo_ops) { + pr_err("Routing algorithm '%s' is not supported\n", val); + return -EINVAL; + } + + return param_set_copystring(val, kp); +} + +static const struct kernel_param_ops param_ops_ra = { + .set = param_set_ra, + .get = param_get_string, +}; + +static struct kparam_string __param_string_ra = { + .maxlen = sizeof(bat_routing_algo), + .string = bat_routing_algo, +}; + +module_param_cb(routing_algo, ¶m_ops_ra, &__param_string_ra, 0644); +module_init(batman_init); +module_exit(batman_exit); + +MODULE_LICENSE("GPL"); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE); +MODULE_VERSION(SOURCE_VERSION); diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h new file mode 100644 index 00000000..94fa1c23 --- /dev/null +++ b/net/batman-adv/main.h @@ -0,0 +1,234 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_MAIN_H_ +#define _NET_BATMAN_ADV_MAIN_H_ + +#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \ + "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>" +#define DRIVER_DESC "B.A.T.M.A.N. advanced" +#define DRIVER_DEVICE "batman-adv" + +#ifndef SOURCE_VERSION +#define SOURCE_VERSION "2012.1.0" +#endif + +/* B.A.T.M.A.N. parameters */ + +#define TQ_MAX_VALUE 255 +#define JITTER 20 + + /* Time To Live of broadcast messages */ +#define TTL 50 + +/* purge originators after time in seconds if no valid packet comes in + * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ +#define PURGE_TIMEOUT 200000 /* 200 seconds */ +#define TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */ +#define TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */ +/* sliding packet range of received originator messages in sequence numbers + * (should be a multiple of our word size) */ +#define TQ_LOCAL_WINDOW_SIZE 64 +#define TT_REQUEST_TIMEOUT 3000 /* miliseconds we have to keep + * pending tt_req */ + +#define TQ_GLOBAL_WINDOW_SIZE 5 +#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 +#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 +#define TQ_TOTAL_BIDRECT_LIMIT 1 + +#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */ + +#define ROAMING_MAX_TIME 20000 /* Time in which a client can roam at most + * ROAMING_MAX_COUNT times in miliseconds*/ +#define ROAMING_MAX_COUNT 5 + +#define NO_FLAGS 0 + +#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */ + +#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) + +#define LOG_BUF_LEN 8192 /* has to be a power of 2 */ + +#define VIS_INTERVAL 5000 /* 5 seconds */ + +/* how much worse secondary interfaces may be to be considered as bonding + * candidates */ +#define BONDING_TQ_THRESHOLD 50 + +/* should not be bigger than 512 bytes or change the size of + * forw_packet->direct_link_flags */ +#define MAX_AGGREGATION_BYTES 512 +#define MAX_AGGREGATION_MS 100 + +#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */ + +/* don't reset again within 30 seconds */ +#define RESET_PROTECTION_MS 30000 +#define EXPECTED_SEQNO_RANGE 65536 + +enum mesh_state { + MESH_INACTIVE, + MESH_ACTIVE, + MESH_DEACTIVATING +}; + +#define BCAST_QUEUE_LEN 256 +#define BATMAN_QUEUE_LEN 256 + +enum uev_action { + UEV_ADD = 0, + UEV_DEL, + UEV_CHANGE +}; + +enum uev_type { + UEV_GW = 0 +}; + +#define GW_THRESHOLD 50 + +/* Debug Messages */ +#ifdef pr_fmt +#undef pr_fmt +#endif +/* Append 'batman-adv: ' before kernel messages */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* all messages related to routing / flooding / broadcasting / etc */ +enum dbg_level { + DBG_BATMAN = 1 << 0, + DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ + DBG_TT = 1 << 2, /* translation table operations */ + DBG_ALL = 7 +}; + +/* Kernel headers */ + +#include <linux/mutex.h> /* mutex */ +#include <linux/module.h> /* needed by all modules */ +#include <linux/netdevice.h> /* netdevice */ +#include <linux/etherdevice.h> /* ethernet address classification */ +#include <linux/if_ether.h> /* ethernet header */ +#include <linux/poll.h> /* poll_table */ +#include <linux/kthread.h> /* kernel threads */ +#include <linux/pkt_sched.h> /* schedule types */ +#include <linux/workqueue.h> /* workqueue */ +#include <linux/slab.h> +#include <net/sock.h> /* struct sock */ +#include <linux/jiffies.h> +#include <linux/seq_file.h> +#include "types.h" + +extern char bat_routing_algo[]; +extern struct list_head hardif_list; + +extern unsigned char broadcast_addr[]; +extern struct workqueue_struct *bat_event_workqueue; + +int mesh_init(struct net_device *soft_iface); +void mesh_free(struct net_device *soft_iface); +void inc_module_count(void); +void dec_module_count(void); +int is_my_mac(const uint8_t *addr); +int bat_algo_register(struct bat_algo_ops *bat_algo_ops); +int bat_algo_select(struct bat_priv *bat_priv, char *name); +int bat_algo_seq_print_text(struct seq_file *seq, void *offset); + +#ifdef CONFIG_BATMAN_ADV_DEBUG +int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3); + +#define bat_dbg(type, bat_priv, fmt, arg...) \ + do { \ + if (atomic_read(&bat_priv->log_level) & type) \ + debug_log(bat_priv, fmt, ## arg); \ + } \ + while (0) +#else /* !CONFIG_BATMAN_ADV_DEBUG */ +__printf(3, 4) +static inline void bat_dbg(int type __always_unused, + struct bat_priv *bat_priv __always_unused, + const char *fmt __always_unused, ...) +{ +} +#endif + +#define bat_info(net_dev, fmt, arg...) \ + do { \ + struct net_device *_netdev = (net_dev); \ + struct bat_priv *_batpriv = netdev_priv(_netdev); \ + bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \ + pr_info("%s: " fmt, _netdev->name, ## arg); \ + } while (0) +#define bat_err(net_dev, fmt, arg...) \ + do { \ + struct net_device *_netdev = (net_dev); \ + struct bat_priv *_batpriv = netdev_priv(_netdev); \ + bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \ + pr_err("%s: " fmt, _netdev->name, ## arg); \ + } while (0) + +/** + * returns 1 if they are the same ethernet addr + * + * note: can't use compare_ether_addr() as it requires aligned memory + */ + +static inline int compare_eth(const void *data1, const void *data2) +{ + return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); +} + +/** + * has_timed_out - compares current time (jiffies) and timestamp + timeout + * @timestamp: base value to compare with (in jiffies) + * @timeout: added to base value before comparing (in milliseconds) + * + * Returns true if current time is after timestamp + timeout + */ +static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout) +{ + return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout)); +} + +#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) + +/* Returns the smallest signed integer in two's complement with the sizeof x */ +#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u))) + +/* Checks if a sequence number x is a predecessor/successor of y. + * they handle overflows/underflows and can correctly check for a + * predecessor/successor unless the variable sequence number has grown by + * more then 2**(bitwidth(x)-1)-1. + * This means that for a uint8_t with the maximum value 255, it would think: + * - when adding nothing - it is neither a predecessor nor a successor + * - before adding more than 127 to the starting value - it is a predecessor, + * - when adding 128 - it is neither a predecessor nor a successor, + * - after adding more than 127 to the starting value - it is a successor */ +#define seq_before(x, y) ({typeof(x) _d1 = (x); \ + typeof(y) _d2 = (y); \ + typeof(x) _dummy = (_d1 - _d2); \ + (void) (&_d1 == &_d2); \ + _dummy > smallest_signed_int(_dummy); }) +#define seq_after(x, y) seq_before(y, x) + +#endif /* _NET_BATMAN_ADV_MAIN_H_ */ diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c new file mode 100644 index 00000000..43c0a4f1 --- /dev/null +++ b/net/batman-adv/originator.c @@ -0,0 +1,645 @@ +/* + * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "originator.h" +#include "hash.h" +#include "translation-table.h" +#include "routing.h" +#include "gateway_client.h" +#include "hard-interface.h" +#include "unicast.h" +#include "soft-interface.h" + +static void purge_orig(struct work_struct *work); + +static void start_purge_timer(struct bat_priv *bat_priv) +{ + INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); + queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); +} + +/* returns 1 if they are the same originator */ +static int compare_orig(const struct hlist_node *node, const void *data2) +{ + const void *data1 = container_of(node, struct orig_node, hash_entry); + + return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); +} + +int originator_init(struct bat_priv *bat_priv) +{ + if (bat_priv->orig_hash) + return 1; + + bat_priv->orig_hash = hash_new(1024); + + if (!bat_priv->orig_hash) + goto err; + + start_purge_timer(bat_priv); + return 1; + +err: + return 0; +} + +void neigh_node_free_ref(struct neigh_node *neigh_node) +{ + if (atomic_dec_and_test(&neigh_node->refcount)) + kfree_rcu(neigh_node, rcu); +} + +/* increases the refcounter of a found router */ +struct neigh_node *orig_node_get_router(struct orig_node *orig_node) +{ + struct neigh_node *router; + + rcu_read_lock(); + router = rcu_dereference(orig_node->router); + + if (router && !atomic_inc_not_zero(&router->refcount)) + router = NULL; + + rcu_read_unlock(); + return router; +} + +struct neigh_node *create_neighbor(struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + const uint8_t *neigh, + struct hard_iface *if_incoming) +{ + struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct neigh_node *neigh_node; + + bat_dbg(DBG_BATMAN, bat_priv, + "Creating new last-hop neighbor of originator\n"); + + neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); + if (!neigh_node) + return NULL; + + INIT_HLIST_NODE(&neigh_node->list); + INIT_LIST_HEAD(&neigh_node->bonding_list); + spin_lock_init(&neigh_node->tq_lock); + + memcpy(neigh_node->addr, neigh, ETH_ALEN); + neigh_node->orig_node = orig_neigh_node; + neigh_node->if_incoming = if_incoming; + + /* extra reference for return */ + atomic_set(&neigh_node->refcount, 2); + + spin_lock_bh(&orig_node->neigh_list_lock); + hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); + spin_unlock_bh(&orig_node->neigh_list_lock); + return neigh_node; +} + +static void orig_node_free_rcu(struct rcu_head *rcu) +{ + struct hlist_node *node, *node_tmp; + struct neigh_node *neigh_node, *tmp_neigh_node; + struct orig_node *orig_node; + + orig_node = container_of(rcu, struct orig_node, rcu); + + spin_lock_bh(&orig_node->neigh_list_lock); + + /* for all bonding members ... */ + list_for_each_entry_safe(neigh_node, tmp_neigh_node, + &orig_node->bond_list, bonding_list) { + list_del_rcu(&neigh_node->bonding_list); + neigh_node_free_ref(neigh_node); + } + + /* for all neighbors towards this originator ... */ + hlist_for_each_entry_safe(neigh_node, node, node_tmp, + &orig_node->neigh_list, list) { + hlist_del_rcu(&neigh_node->list); + neigh_node_free_ref(neigh_node); + } + + spin_unlock_bh(&orig_node->neigh_list_lock); + + frag_list_free(&orig_node->frag_list); + tt_global_del_orig(orig_node->bat_priv, orig_node, + "originator timed out"); + + kfree(orig_node->tt_buff); + kfree(orig_node->bcast_own); + kfree(orig_node->bcast_own_sum); + kfree(orig_node); +} + +void orig_node_free_ref(struct orig_node *orig_node) +{ + if (atomic_dec_and_test(&orig_node->refcount)) + call_rcu(&orig_node->rcu, orig_node_free_rcu); +} + +void originator_free(struct bat_priv *bat_priv) +{ + struct hashtable_t *hash = bat_priv->orig_hash; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + spinlock_t *list_lock; /* spinlock to protect write access */ + struct orig_node *orig_node; + uint32_t i; + + if (!hash) + return; + + cancel_delayed_work_sync(&bat_priv->orig_work); + + bat_priv->orig_hash = NULL; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(orig_node, node, node_tmp, + head, hash_entry) { + + hlist_del_rcu(node); + orig_node_free_ref(orig_node); + } + spin_unlock_bh(list_lock); + } + + hash_destroy(hash); +} + +/* this function finds or creates an originator entry for the given + * address if it does not exits */ +struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr) +{ + struct orig_node *orig_node; + int size; + int hash_added; + + orig_node = orig_hash_find(bat_priv, addr); + if (orig_node) + return orig_node; + + bat_dbg(DBG_BATMAN, bat_priv, + "Creating new originator: %pM\n", addr); + + orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); + if (!orig_node) + return NULL; + + INIT_HLIST_HEAD(&orig_node->neigh_list); + INIT_LIST_HEAD(&orig_node->bond_list); + spin_lock_init(&orig_node->ogm_cnt_lock); + spin_lock_init(&orig_node->bcast_seqno_lock); + spin_lock_init(&orig_node->neigh_list_lock); + spin_lock_init(&orig_node->tt_buff_lock); + + /* extra reference for return */ + atomic_set(&orig_node->refcount, 2); + + orig_node->tt_initialised = false; + orig_node->tt_poss_change = false; + orig_node->bat_priv = bat_priv; + memcpy(orig_node->orig, addr, ETH_ALEN); + orig_node->router = NULL; + orig_node->tt_crc = 0; + atomic_set(&orig_node->last_ttvn, 0); + orig_node->tt_buff = NULL; + orig_node->tt_buff_len = 0; + atomic_set(&orig_node->tt_size, 0); + orig_node->bcast_seqno_reset = jiffies - 1 + - msecs_to_jiffies(RESET_PROTECTION_MS); + orig_node->batman_seqno_reset = jiffies - 1 + - msecs_to_jiffies(RESET_PROTECTION_MS); + + atomic_set(&orig_node->bond_candidates, 0); + + size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS; + + orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); + if (!orig_node->bcast_own) + goto free_orig_node; + + size = bat_priv->num_ifaces * sizeof(uint8_t); + orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC); + + INIT_LIST_HEAD(&orig_node->frag_list); + orig_node->last_frag_packet = 0; + + if (!orig_node->bcast_own_sum) + goto free_bcast_own; + + hash_added = hash_add(bat_priv->orig_hash, compare_orig, + choose_orig, orig_node, &orig_node->hash_entry); + if (hash_added != 0) + goto free_bcast_own_sum; + + return orig_node; +free_bcast_own_sum: + kfree(orig_node->bcast_own_sum); +free_bcast_own: + kfree(orig_node->bcast_own); +free_orig_node: + kfree(orig_node); + return NULL; +} + +static bool purge_orig_neighbors(struct bat_priv *bat_priv, + struct orig_node *orig_node, + struct neigh_node **best_neigh_node) +{ + struct hlist_node *node, *node_tmp; + struct neigh_node *neigh_node; + bool neigh_purged = false; + + *best_neigh_node = NULL; + + spin_lock_bh(&orig_node->neigh_list_lock); + + /* for all neighbors towards this originator ... */ + hlist_for_each_entry_safe(neigh_node, node, node_tmp, + &orig_node->neigh_list, list) { + + if ((has_timed_out(neigh_node->last_valid, PURGE_TIMEOUT)) || + (neigh_node->if_incoming->if_status == IF_INACTIVE) || + (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || + (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { + + if ((neigh_node->if_incoming->if_status == + IF_INACTIVE) || + (neigh_node->if_incoming->if_status == + IF_NOT_IN_USE) || + (neigh_node->if_incoming->if_status == + IF_TO_BE_REMOVED)) + bat_dbg(DBG_BATMAN, bat_priv, + "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n", + orig_node->orig, neigh_node->addr, + neigh_node->if_incoming->net_dev->name); + else + bat_dbg(DBG_BATMAN, bat_priv, + "neighbor timeout: originator %pM, neighbor: %pM, last_valid: %lu\n", + orig_node->orig, neigh_node->addr, + (neigh_node->last_valid / HZ)); + + neigh_purged = true; + + hlist_del_rcu(&neigh_node->list); + bonding_candidate_del(orig_node, neigh_node); + neigh_node_free_ref(neigh_node); + } else { + if ((!*best_neigh_node) || + (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) + *best_neigh_node = neigh_node; + } + } + + spin_unlock_bh(&orig_node->neigh_list_lock); + return neigh_purged; +} + +static bool purge_orig_node(struct bat_priv *bat_priv, + struct orig_node *orig_node) +{ + struct neigh_node *best_neigh_node; + + if (has_timed_out(orig_node->last_valid, 2 * PURGE_TIMEOUT)) { + bat_dbg(DBG_BATMAN, bat_priv, + "Originator timeout: originator %pM, last_valid %lu\n", + orig_node->orig, (orig_node->last_valid / HZ)); + return true; + } else { + if (purge_orig_neighbors(bat_priv, orig_node, + &best_neigh_node)) + update_route(bat_priv, orig_node, best_neigh_node); + } + + return false; +} + +static void _purge_orig(struct bat_priv *bat_priv) +{ + struct hashtable_t *hash = bat_priv->orig_hash; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + spinlock_t *list_lock; /* spinlock to protect write access */ + struct orig_node *orig_node; + uint32_t i; + + if (!hash) + return; + + /* for all origins... */ + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(orig_node, node, node_tmp, + head, hash_entry) { + if (purge_orig_node(bat_priv, orig_node)) { + if (orig_node->gw_flags) + gw_node_delete(bat_priv, orig_node); + hlist_del_rcu(node); + orig_node_free_ref(orig_node); + continue; + } + + if (has_timed_out(orig_node->last_frag_packet, + FRAG_TIMEOUT)) + frag_list_free(&orig_node->frag_list); + } + spin_unlock_bh(list_lock); + } + + gw_node_purge(bat_priv); + gw_election(bat_priv); + + softif_neigh_purge(bat_priv); +} + +static void purge_orig(struct work_struct *work) +{ + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct bat_priv *bat_priv = + container_of(delayed_work, struct bat_priv, orig_work); + + _purge_orig(bat_priv); + start_purge_timer(bat_priv); +} + +void purge_orig_ref(struct bat_priv *bat_priv) +{ + _purge_orig(bat_priv); +} + +int orig_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct hashtable_t *hash = bat_priv->orig_hash; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + struct hard_iface *primary_if; + struct orig_node *orig_node; + struct neigh_node *neigh_node, *neigh_node_tmp; + int batman_count = 0; + int last_seen_secs; + int last_seen_msecs; + uint32_t i; + int ret = 0; + + primary_if = primary_if_get_selected(bat_priv); + + if (!primary_if) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - please specify interfaces to enable it\n", + net_dev->name); + goto out; + } + + if (primary_if->if_status != IF_ACTIVE) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - primary interface not active\n", + net_dev->name); + goto out; + } + + seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", + SOURCE_VERSION, primary_if->net_dev->name, + primary_if->net_dev->dev_addr, net_dev->name); + seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", + "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", + "outgoingIF", "Potential nexthops"); + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { + neigh_node = orig_node_get_router(orig_node); + if (!neigh_node) + continue; + + if (neigh_node->tq_avg == 0) + goto next; + + last_seen_secs = jiffies_to_msecs(jiffies - + orig_node->last_valid) / 1000; + last_seen_msecs = jiffies_to_msecs(jiffies - + orig_node->last_valid) % 1000; + + seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", + orig_node->orig, last_seen_secs, + last_seen_msecs, neigh_node->tq_avg, + neigh_node->addr, + neigh_node->if_incoming->net_dev->name); + + hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp, + &orig_node->neigh_list, list) { + seq_printf(seq, " %pM (%3i)", + neigh_node_tmp->addr, + neigh_node_tmp->tq_avg); + } + + seq_printf(seq, "\n"); + batman_count++; + +next: + neigh_node_free_ref(neigh_node); + } + rcu_read_unlock(); + } + + if (batman_count == 0) + seq_printf(seq, "No batman nodes in range ...\n"); + +out: + if (primary_if) + hardif_free_ref(primary_if); + return ret; +} + +static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) +{ + void *data_ptr; + + data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS, + GFP_ATOMIC); + if (!data_ptr) + return -1; + + memcpy(data_ptr, orig_node->bcast_own, + (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS); + kfree(orig_node->bcast_own); + orig_node->bcast_own = data_ptr; + + data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); + if (!data_ptr) + return -1; + + memcpy(data_ptr, orig_node->bcast_own_sum, + (max_if_num - 1) * sizeof(uint8_t)); + kfree(orig_node->bcast_own_sum); + orig_node->bcast_own_sum = data_ptr; + + return 0; +} + +int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct hashtable_t *hash = bat_priv->orig_hash; + struct hlist_node *node; + struct hlist_head *head; + struct orig_node *orig_node; + uint32_t i; + int ret; + + /* resize all orig nodes because orig_node->bcast_own(_sum) depend on + * if_num */ + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { + spin_lock_bh(&orig_node->ogm_cnt_lock); + ret = orig_node_add_if(orig_node, max_if_num); + spin_unlock_bh(&orig_node->ogm_cnt_lock); + + if (ret == -1) + goto err; + } + rcu_read_unlock(); + } + + return 0; + +err: + rcu_read_unlock(); + return -ENOMEM; +} + +static int orig_node_del_if(struct orig_node *orig_node, + int max_if_num, int del_if_num) +{ + void *data_ptr = NULL; + int chunk_size; + + /* last interface was removed */ + if (max_if_num == 0) + goto free_bcast_own; + + chunk_size = sizeof(unsigned long) * NUM_WORDS; + data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); + if (!data_ptr) + return -1; + + /* copy first part */ + memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); + + /* copy second part */ + memcpy((char *)data_ptr + del_if_num * chunk_size, + orig_node->bcast_own + ((del_if_num + 1) * chunk_size), + (max_if_num - del_if_num) * chunk_size); + +free_bcast_own: + kfree(orig_node->bcast_own); + orig_node->bcast_own = data_ptr; + + if (max_if_num == 0) + goto free_own_sum; + + data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); + if (!data_ptr) + return -1; + + memcpy(data_ptr, orig_node->bcast_own_sum, + del_if_num * sizeof(uint8_t)); + + memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t), + orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)), + (max_if_num - del_if_num) * sizeof(uint8_t)); + +free_own_sum: + kfree(orig_node->bcast_own_sum); + orig_node->bcast_own_sum = data_ptr; + + return 0; +} + +int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct hashtable_t *hash = bat_priv->orig_hash; + struct hlist_node *node; + struct hlist_head *head; + struct hard_iface *hard_iface_tmp; + struct orig_node *orig_node; + uint32_t i; + int ret; + + /* resize all orig nodes because orig_node->bcast_own(_sum) depend on + * if_num */ + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { + spin_lock_bh(&orig_node->ogm_cnt_lock); + ret = orig_node_del_if(orig_node, max_if_num, + hard_iface->if_num); + spin_unlock_bh(&orig_node->ogm_cnt_lock); + + if (ret == -1) + goto err; + } + rcu_read_unlock(); + } + + /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) { + if (hard_iface_tmp->if_status == IF_NOT_IN_USE) + continue; + + if (hard_iface == hard_iface_tmp) + continue; + + if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) + continue; + + if (hard_iface_tmp->if_num > hard_iface->if_num) + hard_iface_tmp->if_num--; + } + rcu_read_unlock(); + + hard_iface->if_num = -1; + return 0; + +err: + rcu_read_unlock(); + return -ENOMEM; +} diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h new file mode 100644 index 00000000..3fe2eda8 --- /dev/null +++ b/net/batman-adv/originator.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_ +#define _NET_BATMAN_ADV_ORIGINATOR_H_ + +#include "hash.h" + +int originator_init(struct bat_priv *bat_priv); +void originator_free(struct bat_priv *bat_priv); +void purge_orig_ref(struct bat_priv *bat_priv); +void orig_node_free_ref(struct orig_node *orig_node); +struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr); +struct neigh_node *create_neighbor(struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + const uint8_t *neigh, + struct hard_iface *if_incoming); +void neigh_node_free_ref(struct neigh_node *neigh_node); +struct neigh_node *orig_node_get_router(struct orig_node *orig_node); +int orig_seq_print_text(struct seq_file *seq, void *offset); +int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); +int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); + + +/* hashfunction to choose an entry in a hash table of given size */ +/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ +static inline uint32_t choose_orig(const void *data, uint32_t size) +{ + const unsigned char *key = data; + uint32_t hash = 0; + size_t i; + + for (i = 0; i < 6; i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash % size; +} + +static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv, + const void *data) +{ + struct hashtable_t *hash = bat_priv->orig_hash; + struct hlist_head *head; + struct hlist_node *node; + struct orig_node *orig_node, *orig_node_tmp = NULL; + int index; + + if (!hash) + return NULL; + + index = choose_orig(data, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { + if (!compare_eth(orig_node, data)) + continue; + + if (!atomic_inc_not_zero(&orig_node->refcount)) + continue; + + orig_node_tmp = orig_node; + break; + } + rcu_read_unlock(); + + return orig_node_tmp; +} + +#endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */ diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h new file mode 100644 index 00000000..441f3db1 --- /dev/null +++ b/net/batman-adv/packet.h @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_PACKET_H_ +#define _NET_BATMAN_ADV_PACKET_H_ + +#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ + +enum bat_packettype { + BAT_OGM = 0x01, + BAT_ICMP = 0x02, + BAT_UNICAST = 0x03, + BAT_BCAST = 0x04, + BAT_VIS = 0x05, + BAT_UNICAST_FRAG = 0x06, + BAT_TT_QUERY = 0x07, + BAT_ROAM_ADV = 0x08 +}; + +/* this file is included by batctl which needs these defines */ +#define COMPAT_VERSION 14 + +enum batman_flags { + PRIMARIES_FIRST_HOP = 1 << 4, + VIS_SERVER = 1 << 5, + DIRECTLINK = 1 << 6 +}; + +/* ICMP message types */ +enum icmp_packettype { + ECHO_REPLY = 0, + DESTINATION_UNREACHABLE = 3, + ECHO_REQUEST = 8, + TTL_EXCEEDED = 11, + PARAMETER_PROBLEM = 12 +}; + +/* vis defines */ +enum vis_packettype { + VIS_TYPE_SERVER_SYNC = 0, + VIS_TYPE_CLIENT_UPDATE = 1 +}; + +/* fragmentation defines */ +enum unicast_frag_flags { + UNI_FRAG_HEAD = 1 << 0, + UNI_FRAG_LARGETAIL = 1 << 1 +}; + +/* TT_QUERY subtypes */ +#define TT_QUERY_TYPE_MASK 0x3 + +enum tt_query_packettype { + TT_REQUEST = 0, + TT_RESPONSE = 1 +}; + +/* TT_QUERY flags */ +enum tt_query_flags { + TT_FULL_TABLE = 1 << 2 +}; + +/* TT_CLIENT flags. + * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to + * 1 << 15 are used for local computation only */ +enum tt_client_flags { + TT_CLIENT_DEL = 1 << 0, + TT_CLIENT_ROAM = 1 << 1, + TT_CLIENT_WIFI = 1 << 2, + TT_CLIENT_NOPURGE = 1 << 8, + TT_CLIENT_NEW = 1 << 9, + TT_CLIENT_PENDING = 1 << 10 +}; + +struct batman_header { + uint8_t packet_type; + uint8_t version; /* batman version field */ + uint8_t ttl; +} __packed; + +struct batman_ogm_packet { + struct batman_header header; + uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ + uint32_t seqno; + uint8_t orig[6]; + uint8_t prev_sender[6]; + uint8_t gw_flags; /* flags related to gateway class */ + uint8_t tq; + uint8_t tt_num_changes; + uint8_t ttvn; /* translation table version number */ + uint16_t tt_crc; +} __packed; + +#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet) + +struct icmp_packet { + struct batman_header header; + uint8_t msg_type; /* see ICMP message types above */ + uint8_t dst[6]; + uint8_t orig[6]; + uint16_t seqno; + uint8_t uid; + uint8_t reserved; +} __packed; + +#define BAT_RR_LEN 16 + +/* icmp_packet_rr must start with all fields from imcp_packet + * as this is assumed by code that handles ICMP packets */ +struct icmp_packet_rr { + struct batman_header header; + uint8_t msg_type; /* see ICMP message types above */ + uint8_t dst[6]; + uint8_t orig[6]; + uint16_t seqno; + uint8_t uid; + uint8_t rr_cur; + uint8_t rr[BAT_RR_LEN][ETH_ALEN]; +} __packed; + +struct unicast_packet { + struct batman_header header; + uint8_t ttvn; /* destination translation table version number */ + uint8_t dest[6]; +} __packed; + +struct unicast_frag_packet { + struct batman_header header; + uint8_t ttvn; /* destination translation table version number */ + uint8_t dest[6]; + uint8_t flags; + uint8_t align; + uint8_t orig[6]; + uint16_t seqno; +} __packed; + +struct bcast_packet { + struct batman_header header; + uint8_t reserved; + uint32_t seqno; + uint8_t orig[6]; +} __packed; + +struct vis_packet { + struct batman_header header; + uint8_t vis_type; /* which type of vis-participant sent this? */ + uint32_t seqno; /* sequence number */ + uint8_t entries; /* number of entries behind this struct */ + uint8_t reserved; + uint8_t vis_orig[6]; /* originator that announces its neighbors */ + uint8_t target_orig[6]; /* who should receive this packet */ + uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ +} __packed; + +struct tt_query_packet { + struct batman_header header; + /* the flag field is a combination of: + * - TT_REQUEST or TT_RESPONSE + * - TT_FULL_TABLE */ + uint8_t flags; + uint8_t dst[ETH_ALEN]; + uint8_t src[ETH_ALEN]; + /* the ttvn field is: + * if TT_REQUEST: ttvn that triggered the + * request + * if TT_RESPONSE: new ttvn for the src + * orig_node */ + uint8_t ttvn; + /* tt_data field is: + * if TT_REQUEST: crc associated with the + * ttvn + * if TT_RESPONSE: table_size */ + uint16_t tt_data; +} __packed; + +struct roam_adv_packet { + struct batman_header header; + uint8_t reserved; + uint8_t dst[ETH_ALEN]; + uint8_t src[ETH_ALEN]; + uint8_t client[ETH_ALEN]; +} __packed; + +struct tt_change { + uint8_t flags; + uint8_t addr[ETH_ALEN]; +} __packed; + +#endif /* _NET_BATMAN_ADV_PACKET_H_ */ diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c new file mode 100644 index 00000000..fd63951d --- /dev/null +++ b/net/batman-adv/ring_buffer.c @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "ring_buffer.h" + +void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value) +{ + lq_recv[*lq_index] = value; + *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE; +} + +uint8_t ring_buffer_avg(const uint8_t lq_recv[]) +{ + const uint8_t *ptr; + uint16_t count = 0, i = 0, sum = 0; + + ptr = lq_recv; + + while (i < TQ_GLOBAL_WINDOW_SIZE) { + if (*ptr != 0) { + count++; + sum += *ptr; + } + + i++; + ptr++; + } + + if (count == 0) + return 0; + + return (uint8_t)(sum / count); +} diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h new file mode 100644 index 00000000..8b58bd82 --- /dev/null +++ b/net/batman-adv/ring_buffer.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_ +#define _NET_BATMAN_ADV_RING_BUFFER_H_ + +void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value); +uint8_t ring_buffer_avg(const uint8_t lq_recv[]); + +#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */ diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c new file mode 100644 index 00000000..8df3a1ff --- /dev/null +++ b/net/batman-adv/routing.c @@ -0,0 +1,1132 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "routing.h" +#include "send.h" +#include "soft-interface.h" +#include "hard-interface.h" +#include "icmp_socket.h" +#include "translation-table.h" +#include "originator.h" +#include "vis.h" +#include "unicast.h" + +void slide_own_bcast_window(struct hard_iface *hard_iface) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct hashtable_t *hash = bat_priv->orig_hash; + struct hlist_node *node; + struct hlist_head *head; + struct orig_node *orig_node; + unsigned long *word; + uint32_t i; + size_t word_index; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { + spin_lock_bh(&orig_node->ogm_cnt_lock); + word_index = hard_iface->if_num * NUM_WORDS; + word = &(orig_node->bcast_own[word_index]); + + bit_get_packet(bat_priv, word, 1, 0); + orig_node->bcast_own_sum[hard_iface->if_num] = + bit_packet_count(word); + spin_unlock_bh(&orig_node->ogm_cnt_lock); + } + rcu_read_unlock(); + } +} + +static void _update_route(struct bat_priv *bat_priv, + struct orig_node *orig_node, + struct neigh_node *neigh_node) +{ + struct neigh_node *curr_router; + + curr_router = orig_node_get_router(orig_node); + + /* route deleted */ + if ((curr_router) && (!neigh_node)) { + bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", + orig_node->orig); + tt_global_del_orig(bat_priv, orig_node, + "Deleted route towards originator"); + + /* route added */ + } else if ((!curr_router) && (neigh_node)) { + + bat_dbg(DBG_ROUTES, bat_priv, + "Adding route towards: %pM (via %pM)\n", + orig_node->orig, neigh_node->addr); + /* route changed */ + } else if (neigh_node && curr_router) { + bat_dbg(DBG_ROUTES, bat_priv, + "Changing route towards: %pM (now via %pM - was via %pM)\n", + orig_node->orig, neigh_node->addr, + curr_router->addr); + } + + if (curr_router) + neigh_node_free_ref(curr_router); + + /* increase refcount of new best neighbor */ + if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) + neigh_node = NULL; + + spin_lock_bh(&orig_node->neigh_list_lock); + rcu_assign_pointer(orig_node->router, neigh_node); + spin_unlock_bh(&orig_node->neigh_list_lock); + + /* decrease refcount of previous best neighbor */ + if (curr_router) + neigh_node_free_ref(curr_router); +} + +void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, + struct neigh_node *neigh_node) +{ + struct neigh_node *router = NULL; + + if (!orig_node) + goto out; + + router = orig_node_get_router(orig_node); + + if (router != neigh_node) + _update_route(bat_priv, orig_node, neigh_node); + +out: + if (router) + neigh_node_free_ref(router); +} + +/* caller must hold the neigh_list_lock */ +void bonding_candidate_del(struct orig_node *orig_node, + struct neigh_node *neigh_node) +{ + /* this neighbor is not part of our candidate list */ + if (list_empty(&neigh_node->bonding_list)) + goto out; + + list_del_rcu(&neigh_node->bonding_list); + INIT_LIST_HEAD(&neigh_node->bonding_list); + neigh_node_free_ref(neigh_node); + atomic_dec(&orig_node->bond_candidates); + +out: + return; +} + +void bonding_candidate_add(struct orig_node *orig_node, + struct neigh_node *neigh_node) +{ + struct hlist_node *node; + struct neigh_node *tmp_neigh_node, *router = NULL; + uint8_t interference_candidate = 0; + + spin_lock_bh(&orig_node->neigh_list_lock); + + /* only consider if it has the same primary address ... */ + if (!compare_eth(orig_node->orig, + neigh_node->orig_node->primary_addr)) + goto candidate_del; + + router = orig_node_get_router(orig_node); + if (!router) + goto candidate_del; + + /* ... and is good enough to be considered */ + if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD) + goto candidate_del; + + /** + * check if we have another candidate with the same mac address or + * interface. If we do, we won't select this candidate because of + * possible interference. + */ + hlist_for_each_entry_rcu(tmp_neigh_node, node, + &orig_node->neigh_list, list) { + + if (tmp_neigh_node == neigh_node) + continue; + + /* we only care if the other candidate is even + * considered as candidate. */ + if (list_empty(&tmp_neigh_node->bonding_list)) + continue; + + if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) || + (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) { + interference_candidate = 1; + break; + } + } + + /* don't care further if it is an interference candidate */ + if (interference_candidate) + goto candidate_del; + + /* this neighbor already is part of our candidate list */ + if (!list_empty(&neigh_node->bonding_list)) + goto out; + + if (!atomic_inc_not_zero(&neigh_node->refcount)) + goto out; + + list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list); + atomic_inc(&orig_node->bond_candidates); + goto out; + +candidate_del: + bonding_candidate_del(orig_node, neigh_node); + +out: + spin_unlock_bh(&orig_node->neigh_list_lock); + + if (router) + neigh_node_free_ref(router); +} + +/* copy primary address for bonding */ +void bonding_save_primary(const struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + const struct batman_ogm_packet *batman_ogm_packet) +{ + if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) + return; + + memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN); +} + +/* checks whether the host restarted and is in the protection time. + * returns: + * 0 if the packet is to be accepted + * 1 if the packet is to be ignored. + */ +int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, + unsigned long *last_reset) +{ + if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || + (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { + if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) { + + *last_reset = jiffies; + bat_dbg(DBG_BATMAN, bat_priv, + "old packet received, start protection\n"); + + return 0; + } else { + return 1; + } + } + return 0; +} + +int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct ethhdr *ethhdr; + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN))) + return NET_RX_DROP; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with broadcast indication but unicast recipient */ + if (!is_broadcast_ether_addr(ethhdr->h_dest)) + return NET_RX_DROP; + + /* packet with broadcast sender address */ + if (is_broadcast_ether_addr(ethhdr->h_source)) + return NET_RX_DROP; + + /* create a copy of the skb, if needed, to modify it. */ + if (skb_cow(skb, 0) < 0) + return NET_RX_DROP; + + /* keep skb linear */ + if (skb_linearize(skb) < 0) + return NET_RX_DROP; + + bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb); + + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +static int recv_my_icmp_packet(struct bat_priv *bat_priv, + struct sk_buff *skb, size_t icmp_len) +{ + struct hard_iface *primary_if = NULL; + struct orig_node *orig_node = NULL; + struct neigh_node *router = NULL; + struct icmp_packet_rr *icmp_packet; + int ret = NET_RX_DROP; + + icmp_packet = (struct icmp_packet_rr *)skb->data; + + /* add data to device queue */ + if (icmp_packet->msg_type != ECHO_REQUEST) { + bat_socket_receive_packet(icmp_packet, icmp_len); + goto out; + } + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + /* answer echo request (ping) */ + /* get routing information */ + orig_node = orig_hash_find(bat_priv, icmp_packet->orig); + if (!orig_node) + goto out; + + router = orig_node_get_router(orig_node); + if (!router) + goto out; + + /* create a copy of the skb, if needed, to modify it. */ + if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + goto out; + + icmp_packet = (struct icmp_packet_rr *)skb->data; + + memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); + memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); + icmp_packet->msg_type = ECHO_REPLY; + icmp_packet->header.ttl = TTL; + + send_skb_packet(skb, router->if_incoming, router->addr); + ret = NET_RX_SUCCESS; + +out: + if (primary_if) + hardif_free_ref(primary_if); + if (router) + neigh_node_free_ref(router); + if (orig_node) + orig_node_free_ref(orig_node); + return ret; +} + +static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, + struct sk_buff *skb) +{ + struct hard_iface *primary_if = NULL; + struct orig_node *orig_node = NULL; + struct neigh_node *router = NULL; + struct icmp_packet *icmp_packet; + int ret = NET_RX_DROP; + + icmp_packet = (struct icmp_packet *)skb->data; + + /* send TTL exceeded if packet is an echo request (traceroute) */ + if (icmp_packet->msg_type != ECHO_REQUEST) { + pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", + icmp_packet->orig, icmp_packet->dst); + goto out; + } + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + /* get routing information */ + orig_node = orig_hash_find(bat_priv, icmp_packet->orig); + if (!orig_node) + goto out; + + router = orig_node_get_router(orig_node); + if (!router) + goto out; + + /* create a copy of the skb, if needed, to modify it. */ + if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + goto out; + + icmp_packet = (struct icmp_packet *)skb->data; + + memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); + memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); + icmp_packet->msg_type = TTL_EXCEEDED; + icmp_packet->header.ttl = TTL; + + send_skb_packet(skb, router->if_incoming, router->addr); + ret = NET_RX_SUCCESS; + +out: + if (primary_if) + hardif_free_ref(primary_if); + if (router) + neigh_node_free_ref(router); + if (orig_node) + orig_node_free_ref(orig_node); + return ret; +} + + +int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct icmp_packet_rr *icmp_packet; + struct ethhdr *ethhdr; + struct orig_node *orig_node = NULL; + struct neigh_node *router = NULL; + int hdr_size = sizeof(struct icmp_packet); + int ret = NET_RX_DROP; + + /** + * we truncate all incoming icmp packets if they don't match our size + */ + if (skb->len >= sizeof(struct icmp_packet_rr)) + hdr_size = sizeof(struct icmp_packet_rr); + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, hdr_size))) + goto out; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with unicast indication but broadcast recipient */ + if (is_broadcast_ether_addr(ethhdr->h_dest)) + goto out; + + /* packet with broadcast sender address */ + if (is_broadcast_ether_addr(ethhdr->h_source)) + goto out; + + /* not for me */ + if (!is_my_mac(ethhdr->h_dest)) + goto out; + + icmp_packet = (struct icmp_packet_rr *)skb->data; + + /* add record route information if not full */ + if ((hdr_size == sizeof(struct icmp_packet_rr)) && + (icmp_packet->rr_cur < BAT_RR_LEN)) { + memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]), + ethhdr->h_dest, ETH_ALEN); + icmp_packet->rr_cur++; + } + + /* packet for me */ + if (is_my_mac(icmp_packet->dst)) + return recv_my_icmp_packet(bat_priv, skb, hdr_size); + + /* TTL exceeded */ + if (icmp_packet->header.ttl < 2) + return recv_icmp_ttl_exceeded(bat_priv, skb); + + /* get routing information */ + orig_node = orig_hash_find(bat_priv, icmp_packet->dst); + if (!orig_node) + goto out; + + router = orig_node_get_router(orig_node); + if (!router) + goto out; + + /* create a copy of the skb, if needed, to modify it. */ + if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + goto out; + + icmp_packet = (struct icmp_packet_rr *)skb->data; + + /* decrement ttl */ + icmp_packet->header.ttl--; + + /* route it */ + send_skb_packet(skb, router->if_incoming, router->addr); + ret = NET_RX_SUCCESS; + +out: + if (router) + neigh_node_free_ref(router); + if (orig_node) + orig_node_free_ref(orig_node); + return ret; +} + +/* In the bonding case, send the packets in a round + * robin fashion over the remaining interfaces. + * + * This method rotates the bonding list and increases the + * returned router's refcount. */ +static struct neigh_node *find_bond_router(struct orig_node *primary_orig, + const struct hard_iface *recv_if) +{ + struct neigh_node *tmp_neigh_node; + struct neigh_node *router = NULL, *first_candidate = NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list, + bonding_list) { + if (!first_candidate) + first_candidate = tmp_neigh_node; + + /* recv_if == NULL on the first node. */ + if (tmp_neigh_node->if_incoming == recv_if) + continue; + + if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) + continue; + + router = tmp_neigh_node; + break; + } + + /* use the first candidate if nothing was found. */ + if (!router && first_candidate && + atomic_inc_not_zero(&first_candidate->refcount)) + router = first_candidate; + + if (!router) + goto out; + + /* selected should point to the next element + * after the current router */ + spin_lock_bh(&primary_orig->neigh_list_lock); + /* this is a list_move(), which unfortunately + * does not exist as rcu version */ + list_del_rcu(&primary_orig->bond_list); + list_add_rcu(&primary_orig->bond_list, + &router->bonding_list); + spin_unlock_bh(&primary_orig->neigh_list_lock); + +out: + rcu_read_unlock(); + return router; +} + +/* Interface Alternating: Use the best of the + * remaining candidates which are not using + * this interface. + * + * Increases the returned router's refcount */ +static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig, + const struct hard_iface *recv_if) +{ + struct neigh_node *tmp_neigh_node; + struct neigh_node *router = NULL, *first_candidate = NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list, + bonding_list) { + if (!first_candidate) + first_candidate = tmp_neigh_node; + + /* recv_if == NULL on the first node. */ + if (tmp_neigh_node->if_incoming == recv_if) + continue; + + if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) + continue; + + /* if we don't have a router yet + * or this one is better, choose it. */ + if ((!router) || + (tmp_neigh_node->tq_avg > router->tq_avg)) { + /* decrement refcount of + * previously selected router */ + if (router) + neigh_node_free_ref(router); + + router = tmp_neigh_node; + atomic_inc_not_zero(&router->refcount); + } + + neigh_node_free_ref(tmp_neigh_node); + } + + /* use the first candidate if nothing was found. */ + if (!router && first_candidate && + atomic_inc_not_zero(&first_candidate->refcount)) + router = first_candidate; + + rcu_read_unlock(); + return router; +} + +int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct tt_query_packet *tt_query; + uint16_t tt_len; + struct ethhdr *ethhdr; + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet)))) + goto out; + + /* I could need to modify it */ + if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0) + goto out; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with unicast indication but broadcast recipient */ + if (is_broadcast_ether_addr(ethhdr->h_dest)) + goto out; + + /* packet with broadcast sender address */ + if (is_broadcast_ether_addr(ethhdr->h_source)) + goto out; + + tt_query = (struct tt_query_packet *)skb->data; + + tt_query->tt_data = ntohs(tt_query->tt_data); + + switch (tt_query->flags & TT_QUERY_TYPE_MASK) { + case TT_REQUEST: + /* If we cannot provide an answer the tt_request is + * forwarded */ + if (!send_tt_response(bat_priv, tt_query)) { + bat_dbg(DBG_TT, bat_priv, + "Routing TT_REQUEST to %pM [%c]\n", + tt_query->dst, + (tt_query->flags & TT_FULL_TABLE ? 'F' : '.')); + tt_query->tt_data = htons(tt_query->tt_data); + return route_unicast_packet(skb, recv_if); + } + break; + case TT_RESPONSE: + if (is_my_mac(tt_query->dst)) { + /* packet needs to be linearized to access the TT + * changes */ + if (skb_linearize(skb) < 0) + goto out; + /* skb_linearize() possibly changed skb->data */ + tt_query = (struct tt_query_packet *)skb->data; + + tt_len = tt_query->tt_data * sizeof(struct tt_change); + + /* Ensure we have all the claimed data */ + if (unlikely(skb_headlen(skb) < + sizeof(struct tt_query_packet) + tt_len)) + goto out; + + handle_tt_response(bat_priv, tt_query); + } else { + bat_dbg(DBG_TT, bat_priv, + "Routing TT_RESPONSE to %pM [%c]\n", + tt_query->dst, + (tt_query->flags & TT_FULL_TABLE ? 'F' : '.')); + tt_query->tt_data = htons(tt_query->tt_data); + return route_unicast_packet(skb, recv_if); + } + break; + } + +out: + /* returning NET_RX_DROP will make the caller function kfree the skb */ + return NET_RX_DROP; +} + +int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct roam_adv_packet *roam_adv_packet; + struct orig_node *orig_node; + struct ethhdr *ethhdr; + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet)))) + goto out; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with unicast indication but broadcast recipient */ + if (is_broadcast_ether_addr(ethhdr->h_dest)) + goto out; + + /* packet with broadcast sender address */ + if (is_broadcast_ether_addr(ethhdr->h_source)) + goto out; + + roam_adv_packet = (struct roam_adv_packet *)skb->data; + + if (!is_my_mac(roam_adv_packet->dst)) + return route_unicast_packet(skb, recv_if); + + orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); + if (!orig_node) + goto out; + + bat_dbg(DBG_TT, bat_priv, + "Received ROAMING_ADV from %pM (client %pM)\n", + roam_adv_packet->src, roam_adv_packet->client); + + tt_global_add(bat_priv, orig_node, roam_adv_packet->client, + atomic_read(&orig_node->last_ttvn) + 1, true, false); + + /* Roaming phase starts: I have new information but the ttvn has not + * been incremented yet. This flag will make me check all the incoming + * packets for the correct destination. */ + bat_priv->tt_poss_change = true; + + orig_node_free_ref(orig_node); +out: + /* returning NET_RX_DROP will make the caller function kfree the skb */ + return NET_RX_DROP; +} + +/* find a suitable router for this originator, and use + * bonding if possible. increases the found neighbors + * refcount.*/ +struct neigh_node *find_router(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const struct hard_iface *recv_if) +{ + struct orig_node *primary_orig_node; + struct orig_node *router_orig; + struct neigh_node *router; + static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; + int bonding_enabled; + + if (!orig_node) + return NULL; + + router = orig_node_get_router(orig_node); + if (!router) + goto err; + + /* without bonding, the first node should + * always choose the default router. */ + bonding_enabled = atomic_read(&bat_priv->bonding); + + rcu_read_lock(); + /* select default router to output */ + router_orig = router->orig_node; + if (!router_orig) + goto err_unlock; + + if ((!recv_if) && (!bonding_enabled)) + goto return_router; + + /* if we have something in the primary_addr, we can search + * for a potential bonding candidate. */ + if (compare_eth(router_orig->primary_addr, zero_mac)) + goto return_router; + + /* find the orig_node which has the primary interface. might + * even be the same as our router_orig in many cases */ + + if (compare_eth(router_orig->primary_addr, router_orig->orig)) { + primary_orig_node = router_orig; + } else { + primary_orig_node = orig_hash_find(bat_priv, + router_orig->primary_addr); + if (!primary_orig_node) + goto return_router; + + orig_node_free_ref(primary_orig_node); + } + + /* with less than 2 candidates, we can't do any + * bonding and prefer the original router. */ + if (atomic_read(&primary_orig_node->bond_candidates) < 2) + goto return_router; + + /* all nodes between should choose a candidate which + * is is not on the interface where the packet came + * in. */ + + neigh_node_free_ref(router); + + if (bonding_enabled) + router = find_bond_router(primary_orig_node, recv_if); + else + router = find_ifalter_router(primary_orig_node, recv_if); + +return_router: + if (router && router->if_incoming->if_status != IF_ACTIVE) + goto err_unlock; + + rcu_read_unlock(); + return router; +err_unlock: + rcu_read_unlock(); +err: + if (router) + neigh_node_free_ref(router); + return NULL; +} + +static int check_unicast_packet(struct sk_buff *skb, int hdr_size) +{ + struct ethhdr *ethhdr; + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, hdr_size))) + return -1; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with unicast indication but broadcast recipient */ + if (is_broadcast_ether_addr(ethhdr->h_dest)) + return -1; + + /* packet with broadcast sender address */ + if (is_broadcast_ether_addr(ethhdr->h_source)) + return -1; + + /* not for me */ + if (!is_my_mac(ethhdr->h_dest)) + return -1; + + return 0; +} + +int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct orig_node *orig_node = NULL; + struct neigh_node *neigh_node = NULL; + struct unicast_packet *unicast_packet; + struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); + int ret = NET_RX_DROP; + struct sk_buff *new_skb; + + unicast_packet = (struct unicast_packet *)skb->data; + + /* TTL exceeded */ + if (unicast_packet->header.ttl < 2) { + pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n", + ethhdr->h_source, unicast_packet->dest); + goto out; + } + + /* get routing information */ + orig_node = orig_hash_find(bat_priv, unicast_packet->dest); + + if (!orig_node) + goto out; + + /* find_router() increases neigh_nodes refcount if found. */ + neigh_node = find_router(bat_priv, orig_node, recv_if); + + if (!neigh_node) + goto out; + + /* create a copy of the skb, if needed, to modify it. */ + if (skb_cow(skb, sizeof(struct ethhdr)) < 0) + goto out; + + unicast_packet = (struct unicast_packet *)skb->data; + + if (unicast_packet->header.packet_type == BAT_UNICAST && + atomic_read(&bat_priv->fragmentation) && + skb->len > neigh_node->if_incoming->net_dev->mtu) { + ret = frag_send_skb(skb, bat_priv, + neigh_node->if_incoming, neigh_node->addr); + goto out; + } + + if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG && + frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) { + + ret = frag_reassemble_skb(skb, bat_priv, &new_skb); + + if (ret == NET_RX_DROP) + goto out; + + /* packet was buffered for late merge */ + if (!new_skb) { + ret = NET_RX_SUCCESS; + goto out; + } + + skb = new_skb; + unicast_packet = (struct unicast_packet *)skb->data; + } + + /* decrement ttl */ + unicast_packet->header.ttl--; + + /* route it */ + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + ret = NET_RX_SUCCESS; + +out: + if (neigh_node) + neigh_node_free_ref(neigh_node); + if (orig_node) + orig_node_free_ref(orig_node); + return ret; +} + +static int check_unicast_ttvn(struct bat_priv *bat_priv, + struct sk_buff *skb) { + uint8_t curr_ttvn; + struct orig_node *orig_node; + struct ethhdr *ethhdr; + struct hard_iface *primary_if; + struct unicast_packet *unicast_packet; + bool tt_poss_change; + + /* I could need to modify it */ + if (skb_cow(skb, sizeof(struct unicast_packet)) < 0) + return 0; + + unicast_packet = (struct unicast_packet *)skb->data; + + if (is_my_mac(unicast_packet->dest)) { + tt_poss_change = bat_priv->tt_poss_change; + curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); + } else { + orig_node = orig_hash_find(bat_priv, unicast_packet->dest); + + if (!orig_node) + return 0; + + curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); + tt_poss_change = orig_node->tt_poss_change; + orig_node_free_ref(orig_node); + } + + /* Check whether I have to reroute the packet */ + if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) { + /* Linearize the skb before accessing it */ + if (skb_linearize(skb) < 0) + return 0; + + ethhdr = (struct ethhdr *)(skb->data + + sizeof(struct unicast_packet)); + orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest); + + if (!orig_node) { + if (!is_my_client(bat_priv, ethhdr->h_dest)) + return 0; + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + return 0; + memcpy(unicast_packet->dest, + primary_if->net_dev->dev_addr, ETH_ALEN); + hardif_free_ref(primary_if); + } else { + memcpy(unicast_packet->dest, orig_node->orig, + ETH_ALEN); + curr_ttvn = (uint8_t) + atomic_read(&orig_node->last_ttvn); + orig_node_free_ref(orig_node); + } + + bat_dbg(DBG_ROUTES, bat_priv, + "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n", + unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest, + unicast_packet->dest); + + unicast_packet->ttvn = curr_ttvn; + } + return 1; +} + +int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct unicast_packet *unicast_packet; + int hdr_size = sizeof(*unicast_packet); + + if (check_unicast_packet(skb, hdr_size) < 0) + return NET_RX_DROP; + + if (!check_unicast_ttvn(bat_priv, skb)) + return NET_RX_DROP; + + unicast_packet = (struct unicast_packet *)skb->data; + + /* packet for me */ + if (is_my_mac(unicast_packet->dest)) { + interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); + return NET_RX_SUCCESS; + } + + return route_unicast_packet(skb, recv_if); +} + +int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct unicast_frag_packet *unicast_packet; + int hdr_size = sizeof(*unicast_packet); + struct sk_buff *new_skb = NULL; + int ret; + + if (check_unicast_packet(skb, hdr_size) < 0) + return NET_RX_DROP; + + if (!check_unicast_ttvn(bat_priv, skb)) + return NET_RX_DROP; + + unicast_packet = (struct unicast_frag_packet *)skb->data; + + /* packet for me */ + if (is_my_mac(unicast_packet->dest)) { + + ret = frag_reassemble_skb(skb, bat_priv, &new_skb); + + if (ret == NET_RX_DROP) + return NET_RX_DROP; + + /* packet was buffered for late merge */ + if (!new_skb) + return NET_RX_SUCCESS; + + interface_rx(recv_if->soft_iface, new_skb, recv_if, + sizeof(struct unicast_packet)); + return NET_RX_SUCCESS; + } + + return route_unicast_packet(skb, recv_if); +} + + +int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) +{ + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct orig_node *orig_node = NULL; + struct bcast_packet *bcast_packet; + struct ethhdr *ethhdr; + int hdr_size = sizeof(*bcast_packet); + int ret = NET_RX_DROP; + int32_t seq_diff; + + /* drop packet if it has not necessary minimum size */ + if (unlikely(!pskb_may_pull(skb, hdr_size))) + goto out; + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* packet with broadcast indication but unicast recipient */ + if (!is_broadcast_ether_addr(ethhdr->h_dest)) + goto out; + + /* packet with broadcast sender address */ + if (is_broadcast_ether_addr(ethhdr->h_source)) + goto out; + + /* ignore broadcasts sent by myself */ + if (is_my_mac(ethhdr->h_source)) + goto out; + + bcast_packet = (struct bcast_packet *)skb->data; + + /* ignore broadcasts originated by myself */ + if (is_my_mac(bcast_packet->orig)) + goto out; + + if (bcast_packet->header.ttl < 2) + goto out; + + orig_node = orig_hash_find(bat_priv, bcast_packet->orig); + + if (!orig_node) + goto out; + + spin_lock_bh(&orig_node->bcast_seqno_lock); + + /* check whether the packet is a duplicate */ + if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, + ntohl(bcast_packet->seqno))) + goto spin_unlock; + + seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; + + /* check whether the packet is old and the host just restarted. */ + if (window_protected(bat_priv, seq_diff, + &orig_node->bcast_seqno_reset)) + goto spin_unlock; + + /* mark broadcast in flood history, update window position + * if required. */ + if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) + orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); + + spin_unlock_bh(&orig_node->bcast_seqno_lock); + + /* rebroadcast packet */ + add_bcast_packet_to_list(bat_priv, skb, 1); + + /* broadcast for me */ + interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); + ret = NET_RX_SUCCESS; + goto out; + +spin_unlock: + spin_unlock_bh(&orig_node->bcast_seqno_lock); +out: + if (orig_node) + orig_node_free_ref(orig_node); + return ret; +} + +int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) +{ + struct vis_packet *vis_packet; + struct ethhdr *ethhdr; + struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); + int hdr_size = sizeof(*vis_packet); + + /* keep skb linear */ + if (skb_linearize(skb) < 0) + return NET_RX_DROP; + + if (unlikely(!pskb_may_pull(skb, hdr_size))) + return NET_RX_DROP; + + vis_packet = (struct vis_packet *)skb->data; + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + /* not for me */ + if (!is_my_mac(ethhdr->h_dest)) + return NET_RX_DROP; + + /* ignore own packets */ + if (is_my_mac(vis_packet->vis_orig)) + return NET_RX_DROP; + + if (is_my_mac(vis_packet->sender_orig)) + return NET_RX_DROP; + + switch (vis_packet->vis_type) { + case VIS_TYPE_SERVER_SYNC: + receive_server_sync_packet(bat_priv, vis_packet, + skb_headlen(skb)); + break; + + case VIS_TYPE_CLIENT_UPDATE: + receive_client_update_packet(bat_priv, vis_packet, + skb_headlen(skb)); + break; + + default: /* ignore unknown packet */ + break; + } + + /* We take a copy of the data in the packet, so we should + always free the skbuf. */ + return NET_RX_DROP; +} diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h new file mode 100644 index 00000000..92ac100d --- /dev/null +++ b/net/batman-adv/routing.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_ROUTING_H_ +#define _NET_BATMAN_ADV_ROUTING_H_ + +void slide_own_bcast_window(struct hard_iface *hard_iface); +void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, + struct neigh_node *neigh_node); +int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); +struct neigh_node *find_router(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const struct hard_iface *recv_if); +void bonding_candidate_del(struct orig_node *orig_node, + struct neigh_node *neigh_node); +void bonding_candidate_add(struct orig_node *orig_node, + struct neigh_node *neigh_node); +void bonding_save_primary(const struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + const struct batman_ogm_packet *batman_ogm_packet); +int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, + unsigned long *last_reset); + +#endif /* _NET_BATMAN_ADV_ROUTING_H_ */ diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c new file mode 100644 index 00000000..af7a6741 --- /dev/null +++ b/net/batman-adv/send.c @@ -0,0 +1,410 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "send.h" +#include "routing.h" +#include "translation-table.h" +#include "soft-interface.h" +#include "hard-interface.h" +#include "vis.h" +#include "gateway_common.h" +#include "originator.h" + +static void send_outstanding_bcast_packet(struct work_struct *work); + +/* send out an already prepared packet to the given address via the + * specified batman interface */ +int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, + const uint8_t *dst_addr) +{ + struct ethhdr *ethhdr; + + if (hard_iface->if_status != IF_ACTIVE) + goto send_skb_err; + + if (unlikely(!hard_iface->net_dev)) + goto send_skb_err; + + if (!(hard_iface->net_dev->flags & IFF_UP)) { + pr_warning("Interface %s is not up - can't send packet via that interface!\n", + hard_iface->net_dev->name); + goto send_skb_err; + } + + /* push to the ethernet header. */ + if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0) + goto send_skb_err; + + skb_reset_mac_header(skb); + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); + memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); + ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); + + skb_set_network_header(skb, ETH_HLEN); + skb->priority = TC_PRIO_CONTROL; + skb->protocol = __constant_htons(ETH_P_BATMAN); + + skb->dev = hard_iface->net_dev; + + /* dev_queue_xmit() returns a negative result on error. However on + * congestion and traffic shaping, it drops and returns NET_XMIT_DROP + * (which is > 0). This will not be treated as an error. */ + + return dev_queue_xmit(skb); +send_skb_err: + kfree_skb(skb); + return NET_XMIT_DROP; +} + +static void realloc_packet_buffer(struct hard_iface *hard_iface, + int new_len) +{ + unsigned char *new_buff; + + new_buff = kmalloc(new_len, GFP_ATOMIC); + + /* keep old buffer if kmalloc should fail */ + if (new_buff) { + memcpy(new_buff, hard_iface->packet_buff, + BATMAN_OGM_LEN); + + kfree(hard_iface->packet_buff); + hard_iface->packet_buff = new_buff; + hard_iface->packet_len = new_len; + } +} + +/* when calling this function (hard_iface == primary_if) has to be true */ +static int prepare_packet_buffer(struct bat_priv *bat_priv, + struct hard_iface *hard_iface) +{ + int new_len; + + new_len = BATMAN_OGM_LEN + + tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); + + /* if we have too many changes for one packet don't send any + * and wait for the tt table request which will be fragmented */ + if (new_len > hard_iface->soft_iface->mtu) + new_len = BATMAN_OGM_LEN; + + realloc_packet_buffer(hard_iface, new_len); + + atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv)); + + /* reset the sending counter */ + atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); + + return tt_changes_fill_buffer(bat_priv, + hard_iface->packet_buff + BATMAN_OGM_LEN, + hard_iface->packet_len - BATMAN_OGM_LEN); +} + +static int reset_packet_buffer(struct bat_priv *bat_priv, + struct hard_iface *hard_iface) +{ + realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN); + return 0; +} + +void schedule_bat_ogm(struct hard_iface *hard_iface) +{ + struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct hard_iface *primary_if; + int tt_num_changes = -1; + + if ((hard_iface->if_status == IF_NOT_IN_USE) || + (hard_iface->if_status == IF_TO_BE_REMOVED)) + return; + + /** + * the interface gets activated here to avoid race conditions between + * the moment of activating the interface in + * hardif_activate_interface() where the originator mac is set and + * outdated packets (especially uninitialized mac addresses) in the + * packet queue + */ + if (hard_iface->if_status == IF_TO_BE_ACTIVATED) + hard_iface->if_status = IF_ACTIVE; + + primary_if = primary_if_get_selected(bat_priv); + + if (hard_iface == primary_if) { + /* if at least one change happened */ + if (atomic_read(&bat_priv->tt_local_changes) > 0) { + tt_commit_changes(bat_priv); + tt_num_changes = prepare_packet_buffer(bat_priv, + hard_iface); + } + + /* if the changes have been sent often enough */ + if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt)) + tt_num_changes = reset_packet_buffer(bat_priv, + hard_iface); + } + + if (primary_if) + hardif_free_ref(primary_if); + + bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes); +} + +static void forw_packet_free(struct forw_packet *forw_packet) +{ + if (forw_packet->skb) + kfree_skb(forw_packet->skb); + if (forw_packet->if_incoming) + hardif_free_ref(forw_packet->if_incoming); + kfree(forw_packet); +} + +static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, + struct forw_packet *forw_packet, + unsigned long send_time) +{ + INIT_HLIST_NODE(&forw_packet->list); + + /* add new packet to packet list */ + spin_lock_bh(&bat_priv->forw_bcast_list_lock); + hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); + + /* start timer for this packet */ + INIT_DELAYED_WORK(&forw_packet->delayed_work, + send_outstanding_bcast_packet); + queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work, + send_time); +} + +/* add a broadcast packet to the queue and setup timers. broadcast packets + * are sent multiple times to increase probability for being received. + * + * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on + * errors. + * + * The skb is not consumed, so the caller should make sure that the + * skb is freed. */ +int add_bcast_packet_to_list(struct bat_priv *bat_priv, + const struct sk_buff *skb, unsigned long delay) +{ + struct hard_iface *primary_if = NULL; + struct forw_packet *forw_packet; + struct bcast_packet *bcast_packet; + struct sk_buff *newskb; + + if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { + bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); + goto out; + } + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out_and_inc; + + forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); + + if (!forw_packet) + goto out_and_inc; + + newskb = skb_copy(skb, GFP_ATOMIC); + if (!newskb) + goto packet_free; + + /* as we have a copy now, it is safe to decrease the TTL */ + bcast_packet = (struct bcast_packet *)newskb->data; + bcast_packet->header.ttl--; + + skb_reset_mac_header(newskb); + + forw_packet->skb = newskb; + forw_packet->if_incoming = primary_if; + + /* how often did we send the bcast packet ? */ + forw_packet->num_packets = 0; + + _add_bcast_packet_to_list(bat_priv, forw_packet, delay); + return NETDEV_TX_OK; + +packet_free: + kfree(forw_packet); +out_and_inc: + atomic_inc(&bat_priv->bcast_queue_left); +out: + if (primary_if) + hardif_free_ref(primary_if); + return NETDEV_TX_BUSY; +} + +static void send_outstanding_bcast_packet(struct work_struct *work) +{ + struct hard_iface *hard_iface; + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct forw_packet *forw_packet = + container_of(delayed_work, struct forw_packet, delayed_work); + struct sk_buff *skb1; + struct net_device *soft_iface = forw_packet->if_incoming->soft_iface; + struct bat_priv *bat_priv = netdev_priv(soft_iface); + + spin_lock_bh(&bat_priv->forw_bcast_list_lock); + hlist_del(&forw_packet->list); + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); + + if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) + goto out; + + /* rebroadcast packet */ + rcu_read_lock(); + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { + if (hard_iface->soft_iface != soft_iface) + continue; + + /* send a copy of the saved skb */ + skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); + if (skb1) + send_skb_packet(skb1, hard_iface, broadcast_addr); + } + rcu_read_unlock(); + + forw_packet->num_packets++; + + /* if we still have some more bcasts to send */ + if (forw_packet->num_packets < 3) { + _add_bcast_packet_to_list(bat_priv, forw_packet, + ((5 * HZ) / 1000)); + return; + } + +out: + forw_packet_free(forw_packet); + atomic_inc(&bat_priv->bcast_queue_left); +} + +void send_outstanding_bat_ogm_packet(struct work_struct *work) +{ + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct forw_packet *forw_packet = + container_of(delayed_work, struct forw_packet, delayed_work); + struct bat_priv *bat_priv; + + bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); + spin_lock_bh(&bat_priv->forw_bat_list_lock); + hlist_del(&forw_packet->list); + spin_unlock_bh(&bat_priv->forw_bat_list_lock); + + if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) + goto out; + + bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); + + /** + * we have to have at least one packet in the queue + * to determine the queues wake up time unless we are + * shutting down + */ + if (forw_packet->own) + schedule_bat_ogm(forw_packet->if_incoming); + +out: + /* don't count own packet */ + if (!forw_packet->own) + atomic_inc(&bat_priv->batman_queue_left); + + forw_packet_free(forw_packet); +} + +void purge_outstanding_packets(struct bat_priv *bat_priv, + const struct hard_iface *hard_iface) +{ + struct forw_packet *forw_packet; + struct hlist_node *tmp_node, *safe_tmp_node; + bool pending; + + if (hard_iface) + bat_dbg(DBG_BATMAN, bat_priv, + "purge_outstanding_packets(): %s\n", + hard_iface->net_dev->name); + else + bat_dbg(DBG_BATMAN, bat_priv, + "purge_outstanding_packets()\n"); + + /* free bcast list */ + spin_lock_bh(&bat_priv->forw_bcast_list_lock); + hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, + &bat_priv->forw_bcast_list, list) { + + /** + * if purge_outstanding_packets() was called with an argument + * we delete only packets belonging to the given interface + */ + if ((hard_iface) && + (forw_packet->if_incoming != hard_iface)) + continue; + + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); + + /** + * send_outstanding_bcast_packet() will lock the list to + * delete the item from the list + */ + pending = cancel_delayed_work_sync(&forw_packet->delayed_work); + spin_lock_bh(&bat_priv->forw_bcast_list_lock); + + if (pending) { + hlist_del(&forw_packet->list); + forw_packet_free(forw_packet); + } + } + spin_unlock_bh(&bat_priv->forw_bcast_list_lock); + + /* free batman packet list */ + spin_lock_bh(&bat_priv->forw_bat_list_lock); + hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, + &bat_priv->forw_bat_list, list) { + + /** + * if purge_outstanding_packets() was called with an argument + * we delete only packets belonging to the given interface + */ + if ((hard_iface) && + (forw_packet->if_incoming != hard_iface)) + continue; + + spin_unlock_bh(&bat_priv->forw_bat_list_lock); + + /** + * send_outstanding_bat_packet() will lock the list to + * delete the item from the list + */ + pending = cancel_delayed_work_sync(&forw_packet->delayed_work); + spin_lock_bh(&bat_priv->forw_bat_list_lock); + + if (pending) { + hlist_del(&forw_packet->list); + forw_packet_free(forw_packet); + } + } + spin_unlock_bh(&bat_priv->forw_bat_list_lock); +} diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h new file mode 100644 index 00000000..824ef06f --- /dev/null +++ b/net/batman-adv/send.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_SEND_H_ +#define _NET_BATMAN_ADV_SEND_H_ + +int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, + const uint8_t *dst_addr); +void schedule_bat_ogm(struct hard_iface *hard_iface); +int add_bcast_packet_to_list(struct bat_priv *bat_priv, + const struct sk_buff *skb, unsigned long delay); +void send_outstanding_bat_ogm_packet(struct work_struct *work); +void purge_outstanding_packets(struct bat_priv *bat_priv, + const struct hard_iface *hard_iface); + +#endif /* _NET_BATMAN_ADV_SEND_H_ */ diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c new file mode 100644 index 00000000..a5590f41 --- /dev/null +++ b/net/batman-adv/soft-interface.c @@ -0,0 +1,942 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "soft-interface.h" +#include "hard-interface.h" +#include "routing.h" +#include "send.h" +#include "bat_debugfs.h" +#include "translation-table.h" +#include "hash.h" +#include "gateway_common.h" +#include "gateway_client.h" +#include "bat_sysfs.h" +#include "originator.h" +#include <linux/slab.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include "unicast.h" + + +static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); +static void bat_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info); +static u32 bat_get_msglevel(struct net_device *dev); +static void bat_set_msglevel(struct net_device *dev, u32 value); +static u32 bat_get_link(struct net_device *dev); + +static const struct ethtool_ops bat_ethtool_ops = { + .get_settings = bat_get_settings, + .get_drvinfo = bat_get_drvinfo, + .get_msglevel = bat_get_msglevel, + .set_msglevel = bat_set_msglevel, + .get_link = bat_get_link, +}; + +int my_skb_head_push(struct sk_buff *skb, unsigned int len) +{ + int result; + + /** + * TODO: We must check if we can release all references to non-payload + * data using skb_header_release in our skbs to allow skb_cow_header to + * work optimally. This means that those skbs are not allowed to read + * or write any data which is before the current position of skb->data + * after that call and thus allow other skbs with the same data buffer + * to write freely in that area. + */ + result = skb_cow_head(skb, len); + if (result < 0) + return result; + + skb_push(skb, len); + return 0; +} + +static void softif_neigh_free_ref(struct softif_neigh *softif_neigh) +{ + if (atomic_dec_and_test(&softif_neigh->refcount)) + kfree_rcu(softif_neigh, rcu); +} + +static void softif_neigh_vid_free_rcu(struct rcu_head *rcu) +{ + struct softif_neigh_vid *softif_neigh_vid; + struct softif_neigh *softif_neigh; + struct hlist_node *node, *node_tmp; + struct bat_priv *bat_priv; + + softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu); + bat_priv = softif_neigh_vid->bat_priv; + + spin_lock_bh(&bat_priv->softif_neigh_lock); + hlist_for_each_entry_safe(softif_neigh, node, node_tmp, + &softif_neigh_vid->softif_neigh_list, list) { + hlist_del_rcu(&softif_neigh->list); + softif_neigh_free_ref(softif_neigh); + } + spin_unlock_bh(&bat_priv->softif_neigh_lock); + + kfree(softif_neigh_vid); +} + +static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid) +{ + if (atomic_dec_and_test(&softif_neigh_vid->refcount)) + call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu); +} + +static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv, + short vid) +{ + struct softif_neigh_vid *softif_neigh_vid; + struct hlist_node *node; + + rcu_read_lock(); + hlist_for_each_entry_rcu(softif_neigh_vid, node, + &bat_priv->softif_neigh_vids, list) { + if (softif_neigh_vid->vid != vid) + continue; + + if (!atomic_inc_not_zero(&softif_neigh_vid->refcount)) + continue; + + goto out; + } + + softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC); + if (!softif_neigh_vid) + goto out; + + softif_neigh_vid->vid = vid; + softif_neigh_vid->bat_priv = bat_priv; + + /* initialize with 2 - caller decrements counter by one */ + atomic_set(&softif_neigh_vid->refcount, 2); + INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list); + INIT_HLIST_NODE(&softif_neigh_vid->list); + spin_lock_bh(&bat_priv->softif_neigh_vid_lock); + hlist_add_head_rcu(&softif_neigh_vid->list, + &bat_priv->softif_neigh_vids); + spin_unlock_bh(&bat_priv->softif_neigh_vid_lock); + +out: + rcu_read_unlock(); + return softif_neigh_vid; +} + +static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, + const uint8_t *addr, short vid) +{ + struct softif_neigh_vid *softif_neigh_vid; + struct softif_neigh *softif_neigh = NULL; + struct hlist_node *node; + + softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); + if (!softif_neigh_vid) + goto out; + + rcu_read_lock(); + hlist_for_each_entry_rcu(softif_neigh, node, + &softif_neigh_vid->softif_neigh_list, + list) { + if (!compare_eth(softif_neigh->addr, addr)) + continue; + + if (!atomic_inc_not_zero(&softif_neigh->refcount)) + continue; + + softif_neigh->last_seen = jiffies; + goto unlock; + } + + softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC); + if (!softif_neigh) + goto unlock; + + memcpy(softif_neigh->addr, addr, ETH_ALEN); + softif_neigh->last_seen = jiffies; + /* initialize with 2 - caller decrements counter by one */ + atomic_set(&softif_neigh->refcount, 2); + + INIT_HLIST_NODE(&softif_neigh->list); + spin_lock_bh(&bat_priv->softif_neigh_lock); + hlist_add_head_rcu(&softif_neigh->list, + &softif_neigh_vid->softif_neigh_list); + spin_unlock_bh(&bat_priv->softif_neigh_lock); + +unlock: + rcu_read_unlock(); +out: + if (softif_neigh_vid) + softif_neigh_vid_free_ref(softif_neigh_vid); + return softif_neigh; +} + +static struct softif_neigh *softif_neigh_get_selected( + struct softif_neigh_vid *softif_neigh_vid) +{ + struct softif_neigh *softif_neigh; + + rcu_read_lock(); + softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh); + + if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount)) + softif_neigh = NULL; + + rcu_read_unlock(); + return softif_neigh; +} + +static struct softif_neigh *softif_neigh_vid_get_selected( + struct bat_priv *bat_priv, + short vid) +{ + struct softif_neigh_vid *softif_neigh_vid; + struct softif_neigh *softif_neigh = NULL; + + softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); + if (!softif_neigh_vid) + goto out; + + softif_neigh = softif_neigh_get_selected(softif_neigh_vid); +out: + if (softif_neigh_vid) + softif_neigh_vid_free_ref(softif_neigh_vid); + return softif_neigh; +} + +static void softif_neigh_vid_select(struct bat_priv *bat_priv, + struct softif_neigh *new_neigh, + short vid) +{ + struct softif_neigh_vid *softif_neigh_vid; + struct softif_neigh *curr_neigh; + + softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); + if (!softif_neigh_vid) + goto out; + + spin_lock_bh(&bat_priv->softif_neigh_lock); + + if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount)) + new_neigh = NULL; + + curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh, + 1); + rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh); + + if ((curr_neigh) && (!new_neigh)) + bat_dbg(DBG_ROUTES, bat_priv, + "Removing mesh exit point on vid: %d (prev: %pM).\n", + vid, curr_neigh->addr); + else if ((curr_neigh) && (new_neigh)) + bat_dbg(DBG_ROUTES, bat_priv, + "Changing mesh exit point on vid: %d from %pM to %pM.\n", + vid, curr_neigh->addr, new_neigh->addr); + else if ((!curr_neigh) && (new_neigh)) + bat_dbg(DBG_ROUTES, bat_priv, + "Setting mesh exit point on vid: %d to %pM.\n", + vid, new_neigh->addr); + + if (curr_neigh) + softif_neigh_free_ref(curr_neigh); + + spin_unlock_bh(&bat_priv->softif_neigh_lock); + +out: + if (softif_neigh_vid) + softif_neigh_vid_free_ref(softif_neigh_vid); +} + +static void softif_neigh_vid_deselect(struct bat_priv *bat_priv, + struct softif_neigh_vid *softif_neigh_vid) +{ + struct softif_neigh *curr_neigh; + struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp; + struct hard_iface *primary_if = NULL; + struct hlist_node *node; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + /* find new softif_neigh immediately to avoid temporary loops */ + rcu_read_lock(); + curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh); + + hlist_for_each_entry_rcu(softif_neigh_tmp, node, + &softif_neigh_vid->softif_neigh_list, + list) { + if (softif_neigh_tmp == curr_neigh) + continue; + + /* we got a neighbor but its mac is 'bigger' than ours */ + if (memcmp(primary_if->net_dev->dev_addr, + softif_neigh_tmp->addr, ETH_ALEN) < 0) + continue; + + if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount)) + continue; + + softif_neigh = softif_neigh_tmp; + goto unlock; + } + +unlock: + rcu_read_unlock(); +out: + softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid); + + if (primary_if) + hardif_free_ref(primary_if); + if (softif_neigh) + softif_neigh_free_ref(softif_neigh); +} + +int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct softif_neigh_vid *softif_neigh_vid; + struct softif_neigh *softif_neigh; + struct hard_iface *primary_if; + struct hlist_node *node, *node_tmp; + struct softif_neigh *curr_softif_neigh; + int ret = 0, last_seen_secs, last_seen_msecs; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - please specify interfaces to enable it\n", + net_dev->name); + goto out; + } + + if (primary_if->if_status != IF_ACTIVE) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - primary interface not active\n", + net_dev->name); + goto out; + } + + seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); + + rcu_read_lock(); + hlist_for_each_entry_rcu(softif_neigh_vid, node, + &bat_priv->softif_neigh_vids, list) { + seq_printf(seq, " %-15s %s on vid: %d\n", + "Originator", "last-seen", softif_neigh_vid->vid); + + curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid); + + hlist_for_each_entry_rcu(softif_neigh, node_tmp, + &softif_neigh_vid->softif_neigh_list, + list) { + last_seen_secs = jiffies_to_msecs(jiffies - + softif_neigh->last_seen) / 1000; + last_seen_msecs = jiffies_to_msecs(jiffies - + softif_neigh->last_seen) % 1000; + seq_printf(seq, "%s %pM %3i.%03is\n", + curr_softif_neigh == softif_neigh + ? "=>" : " ", softif_neigh->addr, + last_seen_secs, last_seen_msecs); + } + + if (curr_softif_neigh) + softif_neigh_free_ref(curr_softif_neigh); + + seq_printf(seq, "\n"); + } + rcu_read_unlock(); + +out: + if (primary_if) + hardif_free_ref(primary_if); + return ret; +} + +void softif_neigh_purge(struct bat_priv *bat_priv) +{ + struct softif_neigh *softif_neigh, *curr_softif_neigh; + struct softif_neigh_vid *softif_neigh_vid; + struct hlist_node *node, *node_tmp, *node_tmp2; + int do_deselect; + + rcu_read_lock(); + hlist_for_each_entry_rcu(softif_neigh_vid, node, + &bat_priv->softif_neigh_vids, list) { + if (!atomic_inc_not_zero(&softif_neigh_vid->refcount)) + continue; + + curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid); + do_deselect = 0; + + spin_lock_bh(&bat_priv->softif_neigh_lock); + hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2, + &softif_neigh_vid->softif_neigh_list, + list) { + if ((!has_timed_out(softif_neigh->last_seen, + SOFTIF_NEIGH_TIMEOUT)) && + (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)) + continue; + + if (curr_softif_neigh == softif_neigh) { + bat_dbg(DBG_ROUTES, bat_priv, + "Current mesh exit point on vid: %d '%pM' vanished.\n", + softif_neigh_vid->vid, + softif_neigh->addr); + do_deselect = 1; + } + + hlist_del_rcu(&softif_neigh->list); + softif_neigh_free_ref(softif_neigh); + } + spin_unlock_bh(&bat_priv->softif_neigh_lock); + + /* soft_neigh_vid_deselect() needs to acquire the + * softif_neigh_lock */ + if (do_deselect) + softif_neigh_vid_deselect(bat_priv, softif_neigh_vid); + + if (curr_softif_neigh) + softif_neigh_free_ref(curr_softif_neigh); + + softif_neigh_vid_free_ref(softif_neigh_vid); + } + rcu_read_unlock(); + + spin_lock_bh(&bat_priv->softif_neigh_vid_lock); + hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp, + &bat_priv->softif_neigh_vids, list) { + if (!hlist_empty(&softif_neigh_vid->softif_neigh_list)) + continue; + + hlist_del_rcu(&softif_neigh_vid->list); + softif_neigh_vid_free_ref(softif_neigh_vid); + } + spin_unlock_bh(&bat_priv->softif_neigh_vid_lock); + +} + +static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, + short vid) +{ + struct bat_priv *bat_priv = netdev_priv(dev); + struct ethhdr *ethhdr = (struct ethhdr *)skb->data; + struct batman_ogm_packet *batman_ogm_packet; + struct softif_neigh *softif_neigh = NULL; + struct hard_iface *primary_if = NULL; + struct softif_neigh *curr_softif_neigh = NULL; + + if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) + batman_ogm_packet = (struct batman_ogm_packet *) + (skb->data + ETH_HLEN + VLAN_HLEN); + else + batman_ogm_packet = (struct batman_ogm_packet *) + (skb->data + ETH_HLEN); + + if (batman_ogm_packet->header.version != COMPAT_VERSION) + goto out; + + if (batman_ogm_packet->header.packet_type != BAT_OGM) + goto out; + + if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) + goto out; + + if (is_my_mac(batman_ogm_packet->orig)) + goto out; + + softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid); + if (!softif_neigh) + goto out; + + curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); + if (curr_softif_neigh == softif_neigh) + goto out; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + /* we got a neighbor but its mac is 'bigger' than ours */ + if (memcmp(primary_if->net_dev->dev_addr, + softif_neigh->addr, ETH_ALEN) < 0) + goto out; + + /* close own batX device and use softif_neigh as exit node */ + if (!curr_softif_neigh) { + softif_neigh_vid_select(bat_priv, softif_neigh, vid); + goto out; + } + + /* switch to new 'smallest neighbor' */ + if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0) + softif_neigh_vid_select(bat_priv, softif_neigh, vid); + +out: + kfree_skb(skb); + if (softif_neigh) + softif_neigh_free_ref(softif_neigh); + if (curr_softif_neigh) + softif_neigh_free_ref(curr_softif_neigh); + if (primary_if) + hardif_free_ref(primary_if); + return; +} + +static int interface_open(struct net_device *dev) +{ + netif_start_queue(dev); + return 0; +} + +static int interface_release(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} + +static struct net_device_stats *interface_stats(struct net_device *dev) +{ + struct bat_priv *bat_priv = netdev_priv(dev); + return &bat_priv->stats; +} + +static int interface_set_mac_addr(struct net_device *dev, void *p) +{ + struct bat_priv *bat_priv = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* only modify transtable if it has been initialized before */ + if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { + tt_local_remove(bat_priv, dev->dev_addr, + "mac address changed", false); + tt_local_add(dev, addr->sa_data, NULL_IFINDEX); + } + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; + return 0; +} + +static int interface_change_mtu(struct net_device *dev, int new_mtu) +{ + /* check ranges */ + if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev))) + return -EINVAL; + + dev->mtu = new_mtu; + + return 0; +} + +static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) +{ + struct ethhdr *ethhdr = (struct ethhdr *)skb->data; + struct bat_priv *bat_priv = netdev_priv(soft_iface); + struct hard_iface *primary_if = NULL; + struct bcast_packet *bcast_packet; + struct vlan_ethhdr *vhdr; + struct softif_neigh *curr_softif_neigh = NULL; + unsigned int header_len = 0; + int data_len = skb->len, ret; + short vid = -1; + bool do_bcast = false; + + if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) + goto dropped; + + soft_iface->trans_start = jiffies; + + switch (ntohs(ethhdr->h_proto)) { + case ETH_P_8021Q: + vhdr = (struct vlan_ethhdr *)skb->data; + vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + + if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN) + break; + + /* fall through */ + case ETH_P_BATMAN: + softif_batman_recv(skb, soft_iface, vid); + goto end; + } + + /** + * if we have a another chosen mesh exit node in range + * it will transport the packets to the mesh + */ + curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); + if (curr_softif_neigh) + goto dropped; + + /* Register the client MAC in the transtable */ + tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); + + if (is_multicast_ether_addr(ethhdr->h_dest)) { + do_bcast = true; + + switch (atomic_read(&bat_priv->gw_mode)) { + case GW_MODE_SERVER: + /* gateway servers should not send dhcp + * requests into the mesh */ + ret = gw_is_dhcp_target(skb, &header_len); + if (ret) + goto dropped; + break; + case GW_MODE_CLIENT: + /* gateway clients should send dhcp requests + * via unicast to their gateway */ + ret = gw_is_dhcp_target(skb, &header_len); + if (ret) + do_bcast = false; + break; + case GW_MODE_OFF: + default: + break; + } + } + + /* ethernet packet should be broadcasted */ + if (do_bcast) { + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto dropped; + + if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0) + goto dropped; + + bcast_packet = (struct bcast_packet *)skb->data; + bcast_packet->header.version = COMPAT_VERSION; + bcast_packet->header.ttl = TTL; + + /* batman packet type: broadcast */ + bcast_packet->header.packet_type = BAT_BCAST; + + /* hw address of first interface is the orig mac because only + * this mac is known throughout the mesh */ + memcpy(bcast_packet->orig, + primary_if->net_dev->dev_addr, ETH_ALEN); + + /* set broadcast sequence number */ + bcast_packet->seqno = + htonl(atomic_inc_return(&bat_priv->bcast_seqno)); + + add_bcast_packet_to_list(bat_priv, skb, 1); + + /* a copy is stored in the bcast list, therefore removing + * the original skb. */ + kfree_skb(skb); + + /* unicast packet */ + } else { + if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) { + ret = gw_out_of_range(bat_priv, skb, ethhdr); + if (ret) + goto dropped; + } + + ret = unicast_send_skb(skb, bat_priv); + if (ret != 0) + goto dropped_freed; + } + + bat_priv->stats.tx_packets++; + bat_priv->stats.tx_bytes += data_len; + goto end; + +dropped: + kfree_skb(skb); +dropped_freed: + bat_priv->stats.tx_dropped++; +end: + if (curr_softif_neigh) + softif_neigh_free_ref(curr_softif_neigh); + if (primary_if) + hardif_free_ref(primary_if); + return NETDEV_TX_OK; +} + +void interface_rx(struct net_device *soft_iface, + struct sk_buff *skb, struct hard_iface *recv_if, + int hdr_size) +{ + struct bat_priv *bat_priv = netdev_priv(soft_iface); + struct unicast_packet *unicast_packet; + struct ethhdr *ethhdr; + struct vlan_ethhdr *vhdr; + struct softif_neigh *curr_softif_neigh = NULL; + short vid = -1; + int ret; + + /* check if enough space is available for pulling, and pull */ + if (!pskb_may_pull(skb, hdr_size)) + goto dropped; + + skb_pull_rcsum(skb, hdr_size); + skb_reset_mac_header(skb); + + ethhdr = (struct ethhdr *)skb_mac_header(skb); + + switch (ntohs(ethhdr->h_proto)) { + case ETH_P_8021Q: + vhdr = (struct vlan_ethhdr *)skb->data; + vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + + if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN) + break; + + /* fall through */ + case ETH_P_BATMAN: + goto dropped; + } + + /** + * if we have a another chosen mesh exit node in range + * it will transport the packets to the non-mesh network + */ + curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); + if (curr_softif_neigh) { + skb_push(skb, hdr_size); + unicast_packet = (struct unicast_packet *)skb->data; + + if ((unicast_packet->header.packet_type != BAT_UNICAST) && + (unicast_packet->header.packet_type != BAT_UNICAST_FRAG)) + goto dropped; + + skb_reset_mac_header(skb); + + memcpy(unicast_packet->dest, + curr_softif_neigh->addr, ETH_ALEN); + ret = route_unicast_packet(skb, recv_if); + if (ret == NET_RX_DROP) + goto dropped; + + goto out; + } + + /* skb->dev & skb->pkt_type are set here */ + if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) + goto dropped; + skb->protocol = eth_type_trans(skb, soft_iface); + + /* should not be necessary anymore as we use skb_pull_rcsum() + * TODO: please verify this and remove this TODO + * -- Dec 21st 2009, Simon Wunderlich */ + +/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ + + bat_priv->stats.rx_packets++; + bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr); + + soft_iface->last_rx = jiffies; + + if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) + goto dropped; + + netif_rx(skb); + goto out; + +dropped: + kfree_skb(skb); +out: + if (curr_softif_neigh) + softif_neigh_free_ref(curr_softif_neigh); + return; +} + +static const struct net_device_ops bat_netdev_ops = { + .ndo_open = interface_open, + .ndo_stop = interface_release, + .ndo_get_stats = interface_stats, + .ndo_set_mac_address = interface_set_mac_addr, + .ndo_change_mtu = interface_change_mtu, + .ndo_start_xmit = interface_tx, + .ndo_validate_addr = eth_validate_addr +}; + +static void interface_setup(struct net_device *dev) +{ + struct bat_priv *priv = netdev_priv(dev); + + ether_setup(dev); + + dev->netdev_ops = &bat_netdev_ops; + dev->destructor = free_netdev; + dev->tx_queue_len = 0; + + /** + * can't call min_mtu, because the needed variables + * have not been initialized yet + */ + dev->mtu = ETH_DATA_LEN; + /* reserve more space in the skbuff for our header */ + dev->hard_header_len = BAT_HEADER_LEN; + + /* generate random address */ + eth_hw_addr_random(dev); + + SET_ETHTOOL_OPS(dev, &bat_ethtool_ops); + + memset(priv, 0, sizeof(*priv)); +} + +struct net_device *softif_create(const char *name) +{ + struct net_device *soft_iface; + struct bat_priv *bat_priv; + int ret; + + soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup); + + if (!soft_iface) + goto out; + + ret = register_netdevice(soft_iface); + if (ret < 0) { + pr_err("Unable to register the batman interface '%s': %i\n", + name, ret); + goto free_soft_iface; + } + + bat_priv = netdev_priv(soft_iface); + + atomic_set(&bat_priv->aggregated_ogms, 1); + atomic_set(&bat_priv->bonding, 0); + atomic_set(&bat_priv->ap_isolation, 0); + atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); + atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); + atomic_set(&bat_priv->gw_sel_class, 20); + atomic_set(&bat_priv->gw_bandwidth, 41); + atomic_set(&bat_priv->orig_interval, 1000); + atomic_set(&bat_priv->hop_penalty, 10); + atomic_set(&bat_priv->log_level, 0); + atomic_set(&bat_priv->fragmentation, 1); + atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN); + atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN); + + atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); + atomic_set(&bat_priv->bcast_seqno, 1); + atomic_set(&bat_priv->ttvn, 0); + atomic_set(&bat_priv->tt_local_changes, 0); + atomic_set(&bat_priv->tt_ogm_append_cnt, 0); + + bat_priv->tt_buff = NULL; + bat_priv->tt_buff_len = 0; + bat_priv->tt_poss_change = false; + + bat_priv->primary_if = NULL; + bat_priv->num_ifaces = 0; + + ret = bat_algo_select(bat_priv, bat_routing_algo); + if (ret < 0) + goto unreg_soft_iface; + + ret = sysfs_add_meshif(soft_iface); + if (ret < 0) + goto unreg_soft_iface; + + ret = debugfs_add_meshif(soft_iface); + if (ret < 0) + goto unreg_sysfs; + + ret = mesh_init(soft_iface); + if (ret < 0) + goto unreg_debugfs; + + return soft_iface; + +unreg_debugfs: + debugfs_del_meshif(soft_iface); +unreg_sysfs: + sysfs_del_meshif(soft_iface); +unreg_soft_iface: + unregister_netdevice(soft_iface); + return NULL; + +free_soft_iface: + free_netdev(soft_iface); +out: + return NULL; +} + +void softif_destroy(struct net_device *soft_iface) +{ + debugfs_del_meshif(soft_iface); + sysfs_del_meshif(soft_iface); + mesh_free(soft_iface); + unregister_netdevice(soft_iface); +} + +int softif_is_valid(const struct net_device *net_dev) +{ + if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) + return 1; + + return 0; +} + +/* ethtool */ +static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + cmd->supported = 0; + cmd->advertising = 0; + ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_TP; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_DISABLE; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + + return 0; +} + +static void bat_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, "B.A.T.M.A.N. advanced"); + strcpy(info->version, SOURCE_VERSION); + strcpy(info->fw_version, "N/A"); + strcpy(info->bus_info, "batman"); +} + +static u32 bat_get_msglevel(struct net_device *dev) +{ + return -EOPNOTSUPP; +} + +static void bat_set_msglevel(struct net_device *dev, u32 value) +{ +} + +static u32 bat_get_link(struct net_device *dev) +{ + return 1; +} diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h new file mode 100644 index 00000000..756eab5b --- /dev/null +++ b/net/batman-adv/soft-interface.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_ +#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ + +int my_skb_head_push(struct sk_buff *skb, unsigned int len); +int softif_neigh_seq_print_text(struct seq_file *seq, void *offset); +void softif_neigh_purge(struct bat_priv *bat_priv); +void interface_rx(struct net_device *soft_iface, + struct sk_buff *skb, struct hard_iface *recv_if, + int hdr_size); +struct net_device *softif_create(const char *name); +void softif_destroy(struct net_device *soft_iface); +int softif_is_valid(const struct net_device *net_dev); + +#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c new file mode 100644 index 00000000..f014bf83 --- /dev/null +++ b/net/batman-adv/translation-table.c @@ -0,0 +1,1888 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "translation-table.h" +#include "soft-interface.h" +#include "hard-interface.h" +#include "send.h" +#include "hash.h" +#include "originator.h" +#include "routing.h" + +#include <linux/crc16.h> + +static void _tt_global_del(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + const char *message); +static void tt_purge(struct work_struct *work); + +/* returns 1 if they are the same mac addr */ +static int compare_tt(const struct hlist_node *node, const void *data2) +{ + const void *data1 = container_of(node, struct tt_common_entry, + hash_entry); + + return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); +} + +static void tt_start_timer(struct bat_priv *bat_priv) +{ + INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge); + queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, + msecs_to_jiffies(5000)); +} + +static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash, + const void *data) +{ + struct hlist_head *head; + struct hlist_node *node; + struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL; + uint32_t index; + + if (!hash) + return NULL; + + index = choose_orig(data, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { + if (!compare_eth(tt_common_entry, data)) + continue; + + if (!atomic_inc_not_zero(&tt_common_entry->refcount)) + continue; + + tt_common_entry_tmp = tt_common_entry; + break; + } + rcu_read_unlock(); + + return tt_common_entry_tmp; +} + +static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, + const void *data) +{ + struct tt_common_entry *tt_common_entry; + struct tt_local_entry *tt_local_entry = NULL; + + tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data); + if (tt_common_entry) + tt_local_entry = container_of(tt_common_entry, + struct tt_local_entry, common); + return tt_local_entry; +} + +static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, + const void *data) +{ + struct tt_common_entry *tt_common_entry; + struct tt_global_entry *tt_global_entry = NULL; + + tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data); + if (tt_common_entry) + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, common); + return tt_global_entry; + +} + +static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry) +{ + if (atomic_dec_and_test(&tt_local_entry->common.refcount)) + kfree_rcu(tt_local_entry, common.rcu); +} + +static void tt_global_entry_free_rcu(struct rcu_head *rcu) +{ + struct tt_common_entry *tt_common_entry; + struct tt_global_entry *tt_global_entry; + + tt_common_entry = container_of(rcu, struct tt_common_entry, rcu); + tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, + common); + + if (tt_global_entry->orig_node) + orig_node_free_ref(tt_global_entry->orig_node); + + kfree(tt_global_entry); +} + +static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) +{ + if (atomic_dec_and_test(&tt_global_entry->common.refcount)) + call_rcu(&tt_global_entry->common.rcu, + tt_global_entry_free_rcu); +} + +static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, + uint8_t flags) +{ + struct tt_change_node *tt_change_node; + + tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC); + + if (!tt_change_node) + return; + + tt_change_node->change.flags = flags; + memcpy(tt_change_node->change.addr, addr, ETH_ALEN); + + spin_lock_bh(&bat_priv->tt_changes_list_lock); + /* track the change in the OGMinterval list */ + list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list); + atomic_inc(&bat_priv->tt_local_changes); + spin_unlock_bh(&bat_priv->tt_changes_list_lock); + + atomic_set(&bat_priv->tt_ogm_append_cnt, 0); +} + +int tt_len(int changes_num) +{ + return changes_num * sizeof(struct tt_change); +} + +static int tt_local_init(struct bat_priv *bat_priv) +{ + if (bat_priv->tt_local_hash) + return 1; + + bat_priv->tt_local_hash = hash_new(1024); + + if (!bat_priv->tt_local_hash) + return 0; + + return 1; +} + +void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, + int ifindex) +{ + struct bat_priv *bat_priv = netdev_priv(soft_iface); + struct tt_local_entry *tt_local_entry = NULL; + struct tt_global_entry *tt_global_entry = NULL; + int hash_added; + + tt_local_entry = tt_local_hash_find(bat_priv, addr); + + if (tt_local_entry) { + tt_local_entry->last_seen = jiffies; + goto out; + } + + tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC); + if (!tt_local_entry) + goto out; + + bat_dbg(DBG_TT, bat_priv, + "Creating new local tt entry: %pM (ttvn: %d)\n", addr, + (uint8_t)atomic_read(&bat_priv->ttvn)); + + memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); + tt_local_entry->common.flags = NO_FLAGS; + if (is_wifi_iface(ifindex)) + tt_local_entry->common.flags |= TT_CLIENT_WIFI; + atomic_set(&tt_local_entry->common.refcount, 2); + tt_local_entry->last_seen = jiffies; + + /* the batman interface mac address should never be purged */ + if (compare_eth(addr, soft_iface->dev_addr)) + tt_local_entry->common.flags |= TT_CLIENT_NOPURGE; + + /* The local entry has to be marked as NEW to avoid to send it in + * a full table response going out before the next ttvn increment + * (consistency check) */ + tt_local_entry->common.flags |= TT_CLIENT_NEW; + + hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig, + &tt_local_entry->common, + &tt_local_entry->common.hash_entry); + + if (unlikely(hash_added != 0)) { + /* remove the reference for the hash */ + tt_local_entry_free_ref(tt_local_entry); + goto out; + } + + tt_local_event(bat_priv, addr, tt_local_entry->common.flags); + + /* remove address from global hash if present */ + tt_global_entry = tt_global_hash_find(bat_priv, addr); + + /* Check whether it is a roaming! */ + if (tt_global_entry) { + /* This node is probably going to update its tt table */ + tt_global_entry->orig_node->tt_poss_change = true; + /* The global entry has to be marked as ROAMING and has to be + * kept for consistency purpose */ + tt_global_entry->common.flags |= TT_CLIENT_ROAM; + tt_global_entry->roam_at = jiffies; + send_roam_adv(bat_priv, tt_global_entry->common.addr, + tt_global_entry->orig_node); + } +out: + if (tt_local_entry) + tt_local_entry_free_ref(tt_local_entry); + if (tt_global_entry) + tt_global_entry_free_ref(tt_global_entry); +} + +int tt_changes_fill_buffer(struct bat_priv *bat_priv, + unsigned char *buff, int buff_len) +{ + int count = 0, tot_changes = 0; + struct tt_change_node *entry, *safe; + + if (buff_len > 0) + tot_changes = buff_len / tt_len(1); + + spin_lock_bh(&bat_priv->tt_changes_list_lock); + atomic_set(&bat_priv->tt_local_changes, 0); + + list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, + list) { + if (count < tot_changes) { + memcpy(buff + tt_len(count), + &entry->change, sizeof(struct tt_change)); + count++; + } + list_del(&entry->list); + kfree(entry); + } + spin_unlock_bh(&bat_priv->tt_changes_list_lock); + + /* Keep the buffer for possible tt_request */ + spin_lock_bh(&bat_priv->tt_buff_lock); + kfree(bat_priv->tt_buff); + bat_priv->tt_buff_len = 0; + bat_priv->tt_buff = NULL; + /* We check whether this new OGM has no changes due to size + * problems */ + if (buff_len > 0) { + /** + * if kmalloc() fails we will reply with the full table + * instead of providing the diff + */ + bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC); + if (bat_priv->tt_buff) { + memcpy(bat_priv->tt_buff, buff, buff_len); + bat_priv->tt_buff_len = buff_len; + } + } + spin_unlock_bh(&bat_priv->tt_buff_lock); + + return tot_changes; +} + +int tt_local_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct hashtable_t *hash = bat_priv->tt_local_hash; + struct tt_common_entry *tt_common_entry; + struct hard_iface *primary_if; + struct hlist_node *node; + struct hlist_head *head; + uint32_t i; + int ret = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - please specify interfaces to enable it\n", + net_dev->name); + goto out; + } + + if (primary_if->if_status != IF_ACTIVE) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - primary interface not active\n", + net_dev->name); + goto out; + } + + seq_printf(seq, + "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", + net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tt_common_entry, node, + head, hash_entry) { + seq_printf(seq, " * %pM [%c%c%c%c%c]\n", + tt_common_entry->addr, + (tt_common_entry->flags & + TT_CLIENT_ROAM ? 'R' : '.'), + (tt_common_entry->flags & + TT_CLIENT_NOPURGE ? 'P' : '.'), + (tt_common_entry->flags & + TT_CLIENT_NEW ? 'N' : '.'), + (tt_common_entry->flags & + TT_CLIENT_PENDING ? 'X' : '.'), + (tt_common_entry->flags & + TT_CLIENT_WIFI ? 'W' : '.')); + } + rcu_read_unlock(); + } +out: + if (primary_if) + hardif_free_ref(primary_if); + return ret; +} + +static void tt_local_set_pending(struct bat_priv *bat_priv, + struct tt_local_entry *tt_local_entry, + uint16_t flags, const char *message) +{ + tt_local_event(bat_priv, tt_local_entry->common.addr, + tt_local_entry->common.flags | flags); + + /* The local client has to be marked as "pending to be removed" but has + * to be kept in the table in order to send it in a full table + * response issued before the net ttvn increment (consistency check) */ + tt_local_entry->common.flags |= TT_CLIENT_PENDING; + + bat_dbg(DBG_TT, bat_priv, + "Local tt entry (%pM) pending to be removed: %s\n", + tt_local_entry->common.addr, message); +} + +void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, + const char *message, bool roaming) +{ + struct tt_local_entry *tt_local_entry = NULL; + + tt_local_entry = tt_local_hash_find(bat_priv, addr); + if (!tt_local_entry) + goto out; + + tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL | + (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message); +out: + if (tt_local_entry) + tt_local_entry_free_ref(tt_local_entry); +} + +static void tt_local_purge(struct bat_priv *bat_priv) +{ + struct hashtable_t *hash = bat_priv->tt_local_hash; + struct tt_local_entry *tt_local_entry; + struct tt_common_entry *tt_common_entry; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + spinlock_t *list_lock; /* protects write access to the hash lists */ + uint32_t i; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, + head, hash_entry) { + tt_local_entry = container_of(tt_common_entry, + struct tt_local_entry, + common); + if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE) + continue; + + /* entry already marked for deletion */ + if (tt_local_entry->common.flags & TT_CLIENT_PENDING) + continue; + + if (!has_timed_out(tt_local_entry->last_seen, + TT_LOCAL_TIMEOUT)) + continue; + + tt_local_set_pending(bat_priv, tt_local_entry, + TT_CLIENT_DEL, "timed out"); + } + spin_unlock_bh(list_lock); + } + +} + +static void tt_local_table_free(struct bat_priv *bat_priv) +{ + struct hashtable_t *hash; + spinlock_t *list_lock; /* protects write access to the hash lists */ + struct tt_common_entry *tt_common_entry; + struct tt_local_entry *tt_local_entry; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + uint32_t i; + + if (!bat_priv->tt_local_hash) + return; + + hash = bat_priv->tt_local_hash; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, + head, hash_entry) { + hlist_del_rcu(node); + tt_local_entry = container_of(tt_common_entry, + struct tt_local_entry, + common); + tt_local_entry_free_ref(tt_local_entry); + } + spin_unlock_bh(list_lock); + } + + hash_destroy(hash); + + bat_priv->tt_local_hash = NULL; +} + +static int tt_global_init(struct bat_priv *bat_priv) +{ + if (bat_priv->tt_global_hash) + return 1; + + bat_priv->tt_global_hash = hash_new(1024); + + if (!bat_priv->tt_global_hash) + return 0; + + return 1; +} + +static void tt_changes_list_free(struct bat_priv *bat_priv) +{ + struct tt_change_node *entry, *safe; + + spin_lock_bh(&bat_priv->tt_changes_list_lock); + + list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, + list) { + list_del(&entry->list); + kfree(entry); + } + + atomic_set(&bat_priv->tt_local_changes, 0); + spin_unlock_bh(&bat_priv->tt_changes_list_lock); +} + +/* caller must hold orig_node refcount */ +int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_addr, uint8_t ttvn, bool roaming, + bool wifi) +{ + struct tt_global_entry *tt_global_entry; + struct orig_node *orig_node_tmp; + int ret = 0; + int hash_added; + + tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); + + if (!tt_global_entry) { + tt_global_entry = + kmalloc(sizeof(*tt_global_entry), + GFP_ATOMIC); + if (!tt_global_entry) + goto out; + + memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN); + tt_global_entry->common.flags = NO_FLAGS; + atomic_set(&tt_global_entry->common.refcount, 2); + /* Assign the new orig_node */ + atomic_inc(&orig_node->refcount); + tt_global_entry->orig_node = orig_node; + tt_global_entry->ttvn = ttvn; + tt_global_entry->roam_at = 0; + + hash_added = hash_add(bat_priv->tt_global_hash, compare_tt, + choose_orig, &tt_global_entry->common, + &tt_global_entry->common.hash_entry); + + if (unlikely(hash_added != 0)) { + /* remove the reference for the hash */ + tt_global_entry_free_ref(tt_global_entry); + goto out_remove; + } + atomic_inc(&orig_node->tt_size); + } else { + if (tt_global_entry->orig_node != orig_node) { + atomic_dec(&tt_global_entry->orig_node->tt_size); + orig_node_tmp = tt_global_entry->orig_node; + atomic_inc(&orig_node->refcount); + tt_global_entry->orig_node = orig_node; + orig_node_free_ref(orig_node_tmp); + atomic_inc(&orig_node->tt_size); + } + tt_global_entry->common.flags = NO_FLAGS; + tt_global_entry->ttvn = ttvn; + tt_global_entry->roam_at = 0; + } + + if (wifi) + tt_global_entry->common.flags |= TT_CLIENT_WIFI; + + bat_dbg(DBG_TT, bat_priv, + "Creating new global tt entry: %pM (via %pM)\n", + tt_global_entry->common.addr, orig_node->orig); + +out_remove: + /* remove address from local hash if present */ + tt_local_remove(bat_priv, tt_global_entry->common.addr, + "global tt received", roaming); + ret = 1; +out: + if (tt_global_entry) + tt_global_entry_free_ref(tt_global_entry); + return ret; +} + +int tt_global_seq_print_text(struct seq_file *seq, void *offset) +{ + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct hashtable_t *hash = bat_priv->tt_global_hash; + struct tt_common_entry *tt_common_entry; + struct tt_global_entry *tt_global_entry; + struct hard_iface *primary_if; + struct hlist_node *node; + struct hlist_head *head; + uint32_t i; + int ret = 0; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - please specify interfaces to enable it\n", + net_dev->name); + goto out; + } + + if (primary_if->if_status != IF_ACTIVE) { + ret = seq_printf(seq, + "BATMAN mesh %s disabled - primary interface not active\n", + net_dev->name); + goto out; + } + + seq_printf(seq, + "Globally announced TT entries received via the mesh %s\n", + net_dev->name); + seq_printf(seq, " %-13s %s %-15s %s %s\n", + "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags"); + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tt_common_entry, node, + head, hash_entry) { + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, + common); + seq_printf(seq, + " * %pM (%3u) via %pM (%3u) [%c%c]\n", + tt_global_entry->common.addr, + tt_global_entry->ttvn, + tt_global_entry->orig_node->orig, + (uint8_t) atomic_read( + &tt_global_entry->orig_node-> + last_ttvn), + (tt_global_entry->common.flags & + TT_CLIENT_ROAM ? 'R' : '.'), + (tt_global_entry->common.flags & + TT_CLIENT_WIFI ? 'W' : '.')); + } + rcu_read_unlock(); + } +out: + if (primary_if) + hardif_free_ref(primary_if); + return ret; +} + +static void _tt_global_del(struct bat_priv *bat_priv, + struct tt_global_entry *tt_global_entry, + const char *message) +{ + if (!tt_global_entry) + goto out; + + bat_dbg(DBG_TT, bat_priv, + "Deleting global tt entry %pM (via %pM): %s\n", + tt_global_entry->common.addr, tt_global_entry->orig_node->orig, + message); + + atomic_dec(&tt_global_entry->orig_node->tt_size); + + hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig, + tt_global_entry->common.addr); +out: + if (tt_global_entry) + tt_global_entry_free_ref(tt_global_entry); +} + +void tt_global_del(struct bat_priv *bat_priv, + struct orig_node *orig_node, const unsigned char *addr, + const char *message, bool roaming) +{ + struct tt_global_entry *tt_global_entry = NULL; + struct tt_local_entry *tt_local_entry = NULL; + + tt_global_entry = tt_global_hash_find(bat_priv, addr); + if (!tt_global_entry || tt_global_entry->orig_node != orig_node) + goto out; + + if (!roaming) + goto out_del; + + /* if we are deleting a global entry due to a roam + * event, there are two possibilities: + * 1) the client roamed from node A to node B => we mark + * it with TT_CLIENT_ROAM, we start a timer and we + * wait for node B to claim it. In case of timeout + * the entry is purged. + * 2) the client roamed to us => we can directly delete + * the global entry, since it is useless now. */ + tt_local_entry = tt_local_hash_find(bat_priv, + tt_global_entry->common.addr); + if (!tt_local_entry) { + tt_global_entry->common.flags |= TT_CLIENT_ROAM; + tt_global_entry->roam_at = jiffies; + goto out; + } + +out_del: + _tt_global_del(bat_priv, tt_global_entry, message); + +out: + if (tt_global_entry) + tt_global_entry_free_ref(tt_global_entry); + if (tt_local_entry) + tt_local_entry_free_ref(tt_local_entry); +} + +void tt_global_del_orig(struct bat_priv *bat_priv, + struct orig_node *orig_node, const char *message) +{ + struct tt_global_entry *tt_global_entry; + struct tt_common_entry *tt_common_entry; + uint32_t i; + struct hashtable_t *hash = bat_priv->tt_global_hash; + struct hlist_node *node, *safe; + struct hlist_head *head; + spinlock_t *list_lock; /* protects write access to the hash lists */ + + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(tt_common_entry, node, safe, + head, hash_entry) { + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, + common); + if (tt_global_entry->orig_node == orig_node) { + bat_dbg(DBG_TT, bat_priv, + "Deleting global tt entry %pM (via %pM): %s\n", + tt_global_entry->common.addr, + tt_global_entry->orig_node->orig, + message); + hlist_del_rcu(node); + tt_global_entry_free_ref(tt_global_entry); + } + } + spin_unlock_bh(list_lock); + } + atomic_set(&orig_node->tt_size, 0); + orig_node->tt_initialised = false; +} + +static void tt_global_roam_purge(struct bat_priv *bat_priv) +{ + struct hashtable_t *hash = bat_priv->tt_global_hash; + struct tt_common_entry *tt_common_entry; + struct tt_global_entry *tt_global_entry; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + spinlock_t *list_lock; /* protects write access to the hash lists */ + uint32_t i; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, + head, hash_entry) { + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, + common); + if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM)) + continue; + if (!has_timed_out(tt_global_entry->roam_at, + TT_CLIENT_ROAM_TIMEOUT)) + continue; + + bat_dbg(DBG_TT, bat_priv, + "Deleting global tt entry (%pM): Roaming timeout\n", + tt_global_entry->common.addr); + atomic_dec(&tt_global_entry->orig_node->tt_size); + hlist_del_rcu(node); + tt_global_entry_free_ref(tt_global_entry); + } + spin_unlock_bh(list_lock); + } + +} + +static void tt_global_table_free(struct bat_priv *bat_priv) +{ + struct hashtable_t *hash; + spinlock_t *list_lock; /* protects write access to the hash lists */ + struct tt_common_entry *tt_common_entry; + struct tt_global_entry *tt_global_entry; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + uint32_t i; + + if (!bat_priv->tt_global_hash) + return; + + hash = bat_priv->tt_global_hash; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, + head, hash_entry) { + hlist_del_rcu(node); + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, + common); + tt_global_entry_free_ref(tt_global_entry); + } + spin_unlock_bh(list_lock); + } + + hash_destroy(hash); + + bat_priv->tt_global_hash = NULL; +} + +static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry, + struct tt_global_entry *tt_global_entry) +{ + bool ret = false; + + if (tt_local_entry->common.flags & TT_CLIENT_WIFI && + tt_global_entry->common.flags & TT_CLIENT_WIFI) + ret = true; + + return ret; +} + +struct orig_node *transtable_search(struct bat_priv *bat_priv, + const uint8_t *src, const uint8_t *addr) +{ + struct tt_local_entry *tt_local_entry = NULL; + struct tt_global_entry *tt_global_entry = NULL; + struct orig_node *orig_node = NULL; + + if (src && atomic_read(&bat_priv->ap_isolation)) { + tt_local_entry = tt_local_hash_find(bat_priv, src); + if (!tt_local_entry) + goto out; + } + + tt_global_entry = tt_global_hash_find(bat_priv, addr); + if (!tt_global_entry) + goto out; + + /* check whether the clients should not communicate due to AP + * isolation */ + if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) + goto out; + + if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) + goto out; + + orig_node = tt_global_entry->orig_node; + +out: + if (tt_global_entry) + tt_global_entry_free_ref(tt_global_entry); + if (tt_local_entry) + tt_local_entry_free_ref(tt_local_entry); + + return orig_node; +} + +/* Calculates the checksum of the local table of a given orig_node */ +uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) +{ + uint16_t total = 0, total_one; + struct hashtable_t *hash = bat_priv->tt_global_hash; + struct tt_common_entry *tt_common_entry; + struct tt_global_entry *tt_global_entry; + struct hlist_node *node; + struct hlist_head *head; + uint32_t i; + int j; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tt_common_entry, node, + head, hash_entry) { + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, + common); + if (compare_eth(tt_global_entry->orig_node, + orig_node)) { + /* Roaming clients are in the global table for + * consistency only. They don't have to be + * taken into account while computing the + * global crc */ + if (tt_common_entry->flags & TT_CLIENT_ROAM) + continue; + total_one = 0; + for (j = 0; j < ETH_ALEN; j++) + total_one = crc16_byte(total_one, + tt_common_entry->addr[j]); + total ^= total_one; + } + } + rcu_read_unlock(); + } + + return total; +} + +/* Calculates the checksum of the local table */ +uint16_t tt_local_crc(struct bat_priv *bat_priv) +{ + uint16_t total = 0, total_one; + struct hashtable_t *hash = bat_priv->tt_local_hash; + struct tt_common_entry *tt_common_entry; + struct hlist_node *node; + struct hlist_head *head; + uint32_t i; + int j; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tt_common_entry, node, + head, hash_entry) { + /* not yet committed clients have not to be taken into + * account while computing the CRC */ + if (tt_common_entry->flags & TT_CLIENT_NEW) + continue; + total_one = 0; + for (j = 0; j < ETH_ALEN; j++) + total_one = crc16_byte(total_one, + tt_common_entry->addr[j]); + total ^= total_one; + } + rcu_read_unlock(); + } + + return total; +} + +static void tt_req_list_free(struct bat_priv *bat_priv) +{ + struct tt_req_node *node, *safe; + + spin_lock_bh(&bat_priv->tt_req_list_lock); + + list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { + list_del(&node->list); + kfree(node); + } + + spin_unlock_bh(&bat_priv->tt_req_list_lock); +} + +void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_buff, uint8_t tt_num_changes) +{ + uint16_t tt_buff_len = tt_len(tt_num_changes); + + /* Replace the old buffer only if I received something in the + * last OGM (the OGM could carry no changes) */ + spin_lock_bh(&orig_node->tt_buff_lock); + if (tt_buff_len > 0) { + kfree(orig_node->tt_buff); + orig_node->tt_buff_len = 0; + orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); + if (orig_node->tt_buff) { + memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); + orig_node->tt_buff_len = tt_buff_len; + } + } + spin_unlock_bh(&orig_node->tt_buff_lock); +} + +static void tt_req_purge(struct bat_priv *bat_priv) +{ + struct tt_req_node *node, *safe; + + spin_lock_bh(&bat_priv->tt_req_list_lock); + list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { + if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) { + list_del(&node->list); + kfree(node); + } + } + spin_unlock_bh(&bat_priv->tt_req_list_lock); +} + +/* returns the pointer to the new tt_req_node struct if no request + * has already been issued for this orig_node, NULL otherwise */ +static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv, + struct orig_node *orig_node) +{ + struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; + + spin_lock_bh(&bat_priv->tt_req_list_lock); + list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { + if (compare_eth(tt_req_node_tmp, orig_node) && + !has_timed_out(tt_req_node_tmp->issued_at, + TT_REQUEST_TIMEOUT)) + goto unlock; + } + + tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC); + if (!tt_req_node) + goto unlock; + + memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN); + tt_req_node->issued_at = jiffies; + + list_add(&tt_req_node->list, &bat_priv->tt_req_list); +unlock: + spin_unlock_bh(&bat_priv->tt_req_list_lock); + return tt_req_node; +} + +/* data_ptr is useless here, but has to be kept to respect the prototype */ +static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr) +{ + const struct tt_common_entry *tt_common_entry = entry_ptr; + + if (tt_common_entry->flags & TT_CLIENT_NEW) + return 0; + return 1; +} + +static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) +{ + const struct tt_common_entry *tt_common_entry = entry_ptr; + const struct tt_global_entry *tt_global_entry; + const struct orig_node *orig_node = data_ptr; + + if (tt_common_entry->flags & TT_CLIENT_ROAM) + return 0; + + tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, + common); + + return (tt_global_entry->orig_node == orig_node); +} + +static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, + struct hashtable_t *hash, + struct hard_iface *primary_if, + int (*valid_cb)(const void *, + const void *), + void *cb_data) +{ + struct tt_common_entry *tt_common_entry; + struct tt_query_packet *tt_response; + struct tt_change *tt_change; + struct hlist_node *node; + struct hlist_head *head; + struct sk_buff *skb = NULL; + uint16_t tt_tot, tt_count; + ssize_t tt_query_size = sizeof(struct tt_query_packet); + uint32_t i; + + if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { + tt_len = primary_if->soft_iface->mtu - tt_query_size; + tt_len -= tt_len % sizeof(struct tt_change); + } + tt_tot = tt_len / sizeof(struct tt_change); + + skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN); + if (!skb) + goto out; + + skb_reserve(skb, ETH_HLEN); + tt_response = (struct tt_query_packet *)skb_put(skb, + tt_query_size + tt_len); + tt_response->ttvn = ttvn; + + tt_change = (struct tt_change *)(skb->data + tt_query_size); + tt_count = 0; + + rcu_read_lock(); + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + hlist_for_each_entry_rcu(tt_common_entry, node, + head, hash_entry) { + if (tt_count == tt_tot) + break; + + if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data))) + continue; + + memcpy(tt_change->addr, tt_common_entry->addr, + ETH_ALEN); + tt_change->flags = NO_FLAGS; + + tt_count++; + tt_change++; + } + } + rcu_read_unlock(); + + /* store in the message the number of entries we have successfully + * copied */ + tt_response->tt_data = htons(tt_count); + +out: + return skb; +} + +static int send_tt_request(struct bat_priv *bat_priv, + struct orig_node *dst_orig_node, + uint8_t ttvn, uint16_t tt_crc, bool full_table) +{ + struct sk_buff *skb = NULL; + struct tt_query_packet *tt_request; + struct neigh_node *neigh_node = NULL; + struct hard_iface *primary_if; + struct tt_req_node *tt_req_node = NULL; + int ret = 1; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + /* The new tt_req will be issued only if I'm not waiting for a + * reply from the same orig_node yet */ + tt_req_node = new_tt_req_node(bat_priv, dst_orig_node); + if (!tt_req_node) + goto out; + + skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN); + if (!skb) + goto out; + + skb_reserve(skb, ETH_HLEN); + + tt_request = (struct tt_query_packet *)skb_put(skb, + sizeof(struct tt_query_packet)); + + tt_request->header.packet_type = BAT_TT_QUERY; + tt_request->header.version = COMPAT_VERSION; + memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN); + memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); + tt_request->header.ttl = TTL; + tt_request->ttvn = ttvn; + tt_request->tt_data = tt_crc; + tt_request->flags = TT_REQUEST; + + if (full_table) + tt_request->flags |= TT_FULL_TABLE; + + neigh_node = orig_node_get_router(dst_orig_node); + if (!neigh_node) + goto out; + + bat_dbg(DBG_TT, bat_priv, + "Sending TT_REQUEST to %pM via %pM [%c]\n", + dst_orig_node->orig, neigh_node->addr, + (full_table ? 'F' : '.')); + + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + ret = 0; + +out: + if (neigh_node) + neigh_node_free_ref(neigh_node); + if (primary_if) + hardif_free_ref(primary_if); + if (ret) + kfree_skb(skb); + if (ret && tt_req_node) { + spin_lock_bh(&bat_priv->tt_req_list_lock); + list_del(&tt_req_node->list); + spin_unlock_bh(&bat_priv->tt_req_list_lock); + kfree(tt_req_node); + } + return ret; +} + +static bool send_other_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_request) +{ + struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL; + struct neigh_node *neigh_node = NULL; + struct hard_iface *primary_if = NULL; + uint8_t orig_ttvn, req_ttvn, ttvn; + int ret = false; + unsigned char *tt_buff; + bool full_table; + uint16_t tt_len, tt_tot; + struct sk_buff *skb = NULL; + struct tt_query_packet *tt_response; + + bat_dbg(DBG_TT, bat_priv, + "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n", + tt_request->src, tt_request->ttvn, tt_request->dst, + (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); + + /* Let's get the orig node of the REAL destination */ + req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst); + if (!req_dst_orig_node) + goto out; + + res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src); + if (!res_dst_orig_node) + goto out; + + neigh_node = orig_node_get_router(res_dst_orig_node); + if (!neigh_node) + goto out; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); + req_ttvn = tt_request->ttvn; + + /* I don't have the requested data */ + if (orig_ttvn != req_ttvn || + tt_request->tt_data != req_dst_orig_node->tt_crc) + goto out; + + /* If the full table has been explicitly requested */ + if (tt_request->flags & TT_FULL_TABLE || + !req_dst_orig_node->tt_buff) + full_table = true; + else + full_table = false; + + /* In this version, fragmentation is not implemented, then + * I'll send only one packet with as much TT entries as I can */ + if (!full_table) { + spin_lock_bh(&req_dst_orig_node->tt_buff_lock); + tt_len = req_dst_orig_node->tt_buff_len; + tt_tot = tt_len / sizeof(struct tt_change); + + skb = dev_alloc_skb(sizeof(struct tt_query_packet) + + tt_len + ETH_HLEN); + if (!skb) + goto unlock; + + skb_reserve(skb, ETH_HLEN); + tt_response = (struct tt_query_packet *)skb_put(skb, + sizeof(struct tt_query_packet) + tt_len); + tt_response->ttvn = req_ttvn; + tt_response->tt_data = htons(tt_tot); + + tt_buff = skb->data + sizeof(struct tt_query_packet); + /* Copy the last orig_node's OGM buffer */ + memcpy(tt_buff, req_dst_orig_node->tt_buff, + req_dst_orig_node->tt_buff_len); + + spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); + } else { + tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) * + sizeof(struct tt_change); + ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); + + skb = tt_response_fill_table(tt_len, ttvn, + bat_priv->tt_global_hash, + primary_if, tt_global_valid_entry, + req_dst_orig_node); + if (!skb) + goto out; + + tt_response = (struct tt_query_packet *)skb->data; + } + + tt_response->header.packet_type = BAT_TT_QUERY; + tt_response->header.version = COMPAT_VERSION; + tt_response->header.ttl = TTL; + memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN); + memcpy(tt_response->dst, tt_request->src, ETH_ALEN); + tt_response->flags = TT_RESPONSE; + + if (full_table) + tt_response->flags |= TT_FULL_TABLE; + + bat_dbg(DBG_TT, bat_priv, + "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n", + res_dst_orig_node->orig, neigh_node->addr, + req_dst_orig_node->orig, req_ttvn); + + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + ret = true; + goto out; + +unlock: + spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); + +out: + if (res_dst_orig_node) + orig_node_free_ref(res_dst_orig_node); + if (req_dst_orig_node) + orig_node_free_ref(req_dst_orig_node); + if (neigh_node) + neigh_node_free_ref(neigh_node); + if (primary_if) + hardif_free_ref(primary_if); + if (!ret) + kfree_skb(skb); + return ret; + +} +static bool send_my_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_request) +{ + struct orig_node *orig_node = NULL; + struct neigh_node *neigh_node = NULL; + struct hard_iface *primary_if = NULL; + uint8_t my_ttvn, req_ttvn, ttvn; + int ret = false; + unsigned char *tt_buff; + bool full_table; + uint16_t tt_len, tt_tot; + struct sk_buff *skb = NULL; + struct tt_query_packet *tt_response; + + bat_dbg(DBG_TT, bat_priv, + "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n", + tt_request->src, tt_request->ttvn, + (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); + + + my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); + req_ttvn = tt_request->ttvn; + + orig_node = orig_hash_find(bat_priv, tt_request->src); + if (!orig_node) + goto out; + + neigh_node = orig_node_get_router(orig_node); + if (!neigh_node) + goto out; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + /* If the full table has been explicitly requested or the gap + * is too big send the whole local translation table */ + if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn || + !bat_priv->tt_buff) + full_table = true; + else + full_table = false; + + /* In this version, fragmentation is not implemented, then + * I'll send only one packet with as much TT entries as I can */ + if (!full_table) { + spin_lock_bh(&bat_priv->tt_buff_lock); + tt_len = bat_priv->tt_buff_len; + tt_tot = tt_len / sizeof(struct tt_change); + + skb = dev_alloc_skb(sizeof(struct tt_query_packet) + + tt_len + ETH_HLEN); + if (!skb) + goto unlock; + + skb_reserve(skb, ETH_HLEN); + tt_response = (struct tt_query_packet *)skb_put(skb, + sizeof(struct tt_query_packet) + tt_len); + tt_response->ttvn = req_ttvn; + tt_response->tt_data = htons(tt_tot); + + tt_buff = skb->data + sizeof(struct tt_query_packet); + memcpy(tt_buff, bat_priv->tt_buff, + bat_priv->tt_buff_len); + spin_unlock_bh(&bat_priv->tt_buff_lock); + } else { + tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) * + sizeof(struct tt_change); + ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); + + skb = tt_response_fill_table(tt_len, ttvn, + bat_priv->tt_local_hash, + primary_if, tt_local_valid_entry, + NULL); + if (!skb) + goto out; + + tt_response = (struct tt_query_packet *)skb->data; + } + + tt_response->header.packet_type = BAT_TT_QUERY; + tt_response->header.version = COMPAT_VERSION; + tt_response->header.ttl = TTL; + memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN); + memcpy(tt_response->dst, tt_request->src, ETH_ALEN); + tt_response->flags = TT_RESPONSE; + + if (full_table) + tt_response->flags |= TT_FULL_TABLE; + + bat_dbg(DBG_TT, bat_priv, + "Sending TT_RESPONSE to %pM via %pM [%c]\n", + orig_node->orig, neigh_node->addr, + (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); + + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + ret = true; + goto out; + +unlock: + spin_unlock_bh(&bat_priv->tt_buff_lock); +out: + if (orig_node) + orig_node_free_ref(orig_node); + if (neigh_node) + neigh_node_free_ref(neigh_node); + if (primary_if) + hardif_free_ref(primary_if); + if (!ret) + kfree_skb(skb); + /* This packet was for me, so it doesn't need to be re-routed */ + return true; +} + +bool send_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_request) +{ + if (is_my_mac(tt_request->dst)) + return send_my_tt_response(bat_priv, tt_request); + else + return send_other_tt_response(bat_priv, tt_request); +} + +static void _tt_update_changes(struct bat_priv *bat_priv, + struct orig_node *orig_node, + struct tt_change *tt_change, + uint16_t tt_num_changes, uint8_t ttvn) +{ + int i; + + for (i = 0; i < tt_num_changes; i++) { + if ((tt_change + i)->flags & TT_CLIENT_DEL) + tt_global_del(bat_priv, orig_node, + (tt_change + i)->addr, + "tt removed by changes", + (tt_change + i)->flags & TT_CLIENT_ROAM); + else + if (!tt_global_add(bat_priv, orig_node, + (tt_change + i)->addr, ttvn, false, + (tt_change + i)->flags & + TT_CLIENT_WIFI)) + /* In case of problem while storing a + * global_entry, we stop the updating + * procedure without committing the + * ttvn change. This will avoid to send + * corrupted data on tt_request + */ + return; + } + orig_node->tt_initialised = true; +} + +static void tt_fill_gtable(struct bat_priv *bat_priv, + struct tt_query_packet *tt_response) +{ + struct orig_node *orig_node = NULL; + + orig_node = orig_hash_find(bat_priv, tt_response->src); + if (!orig_node) + goto out; + + /* Purge the old table first.. */ + tt_global_del_orig(bat_priv, orig_node, "Received full table"); + + _tt_update_changes(bat_priv, orig_node, + (struct tt_change *)(tt_response + 1), + tt_response->tt_data, tt_response->ttvn); + + spin_lock_bh(&orig_node->tt_buff_lock); + kfree(orig_node->tt_buff); + orig_node->tt_buff_len = 0; + orig_node->tt_buff = NULL; + spin_unlock_bh(&orig_node->tt_buff_lock); + + atomic_set(&orig_node->last_ttvn, tt_response->ttvn); + +out: + if (orig_node) + orig_node_free_ref(orig_node); +} + +static void tt_update_changes(struct bat_priv *bat_priv, + struct orig_node *orig_node, + uint16_t tt_num_changes, uint8_t ttvn, + struct tt_change *tt_change) +{ + _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes, + ttvn); + + tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change, + tt_num_changes); + atomic_set(&orig_node->last_ttvn, ttvn); +} + +bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) +{ + struct tt_local_entry *tt_local_entry = NULL; + bool ret = false; + + tt_local_entry = tt_local_hash_find(bat_priv, addr); + if (!tt_local_entry) + goto out; + /* Check if the client has been logically deleted (but is kept for + * consistency purpose) */ + if (tt_local_entry->common.flags & TT_CLIENT_PENDING) + goto out; + ret = true; +out: + if (tt_local_entry) + tt_local_entry_free_ref(tt_local_entry); + return ret; +} + +void handle_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_response) +{ + struct tt_req_node *node, *safe; + struct orig_node *orig_node = NULL; + + bat_dbg(DBG_TT, bat_priv, + "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n", + tt_response->src, tt_response->ttvn, tt_response->tt_data, + (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); + + orig_node = orig_hash_find(bat_priv, tt_response->src); + if (!orig_node) + goto out; + + if (tt_response->flags & TT_FULL_TABLE) + tt_fill_gtable(bat_priv, tt_response); + else + tt_update_changes(bat_priv, orig_node, tt_response->tt_data, + tt_response->ttvn, + (struct tt_change *)(tt_response + 1)); + + /* Delete the tt_req_node from pending tt_requests list */ + spin_lock_bh(&bat_priv->tt_req_list_lock); + list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { + if (!compare_eth(node->addr, tt_response->src)) + continue; + list_del(&node->list); + kfree(node); + } + spin_unlock_bh(&bat_priv->tt_req_list_lock); + + /* Recalculate the CRC for this orig_node and store it */ + orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); + /* Roaming phase is over: tables are in sync again. I can + * unset the flag */ + orig_node->tt_poss_change = false; +out: + if (orig_node) + orig_node_free_ref(orig_node); +} + +int tt_init(struct bat_priv *bat_priv) +{ + if (!tt_local_init(bat_priv)) + return 0; + + if (!tt_global_init(bat_priv)) + return 0; + + tt_start_timer(bat_priv); + + return 1; +} + +static void tt_roam_list_free(struct bat_priv *bat_priv) +{ + struct tt_roam_node *node, *safe; + + spin_lock_bh(&bat_priv->tt_roam_list_lock); + + list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { + list_del(&node->list); + kfree(node); + } + + spin_unlock_bh(&bat_priv->tt_roam_list_lock); +} + +static void tt_roam_purge(struct bat_priv *bat_priv) +{ + struct tt_roam_node *node, *safe; + + spin_lock_bh(&bat_priv->tt_roam_list_lock); + list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { + if (!has_timed_out(node->first_time, ROAMING_MAX_TIME)) + continue; + + list_del(&node->list); + kfree(node); + } + spin_unlock_bh(&bat_priv->tt_roam_list_lock); +} + +/* This function checks whether the client already reached the + * maximum number of possible roaming phases. In this case the ROAMING_ADV + * will not be sent. + * + * returns true if the ROAMING_ADV can be sent, false otherwise */ +static bool tt_check_roam_count(struct bat_priv *bat_priv, + uint8_t *client) +{ + struct tt_roam_node *tt_roam_node; + bool ret = false; + + spin_lock_bh(&bat_priv->tt_roam_list_lock); + /* The new tt_req will be issued only if I'm not waiting for a + * reply from the same orig_node yet */ + list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) { + if (!compare_eth(tt_roam_node->addr, client)) + continue; + + if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME)) + continue; + + if (!atomic_dec_not_zero(&tt_roam_node->counter)) + /* Sorry, you roamed too many times! */ + goto unlock; + ret = true; + break; + } + + if (!ret) { + tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC); + if (!tt_roam_node) + goto unlock; + + tt_roam_node->first_time = jiffies; + atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1); + memcpy(tt_roam_node->addr, client, ETH_ALEN); + + list_add(&tt_roam_node->list, &bat_priv->tt_roam_list); + ret = true; + } + +unlock: + spin_unlock_bh(&bat_priv->tt_roam_list_lock); + return ret; +} + +void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, + struct orig_node *orig_node) +{ + struct neigh_node *neigh_node = NULL; + struct sk_buff *skb = NULL; + struct roam_adv_packet *roam_adv_packet; + int ret = 1; + struct hard_iface *primary_if; + + /* before going on we have to check whether the client has + * already roamed to us too many times */ + if (!tt_check_roam_count(bat_priv, client)) + goto out; + + skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN); + if (!skb) + goto out; + + skb_reserve(skb, ETH_HLEN); + + roam_adv_packet = (struct roam_adv_packet *)skb_put(skb, + sizeof(struct roam_adv_packet)); + + roam_adv_packet->header.packet_type = BAT_ROAM_ADV; + roam_adv_packet->header.version = COMPAT_VERSION; + roam_adv_packet->header.ttl = TTL; + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); + hardif_free_ref(primary_if); + memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); + memcpy(roam_adv_packet->client, client, ETH_ALEN); + + neigh_node = orig_node_get_router(orig_node); + if (!neigh_node) + goto out; + + bat_dbg(DBG_TT, bat_priv, + "Sending ROAMING_ADV to %pM (client %pM) via %pM\n", + orig_node->orig, client, neigh_node->addr); + + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + ret = 0; + +out: + if (neigh_node) + neigh_node_free_ref(neigh_node); + if (ret) + kfree_skb(skb); + return; +} + +static void tt_purge(struct work_struct *work) +{ + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct bat_priv *bat_priv = + container_of(delayed_work, struct bat_priv, tt_work); + + tt_local_purge(bat_priv); + tt_global_roam_purge(bat_priv); + tt_req_purge(bat_priv); + tt_roam_purge(bat_priv); + + tt_start_timer(bat_priv); +} + +void tt_free(struct bat_priv *bat_priv) +{ + cancel_delayed_work_sync(&bat_priv->tt_work); + + tt_local_table_free(bat_priv); + tt_global_table_free(bat_priv); + tt_req_list_free(bat_priv); + tt_changes_list_free(bat_priv); + tt_roam_list_free(bat_priv); + + kfree(bat_priv->tt_buff); +} + +/* This function will enable or disable the specified flags for all the entries + * in the given hash table and returns the number of modified entries */ +static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags, + bool enable) +{ + uint32_t i; + uint16_t changed_num = 0; + struct hlist_head *head; + struct hlist_node *node; + struct tt_common_entry *tt_common_entry; + + if (!hash) + goto out; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tt_common_entry, node, + head, hash_entry) { + if (enable) { + if ((tt_common_entry->flags & flags) == flags) + continue; + tt_common_entry->flags |= flags; + } else { + if (!(tt_common_entry->flags & flags)) + continue; + tt_common_entry->flags &= ~flags; + } + changed_num++; + } + rcu_read_unlock(); + } +out: + return changed_num; +} + +/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */ +static void tt_local_purge_pending_clients(struct bat_priv *bat_priv) +{ + struct hashtable_t *hash = bat_priv->tt_local_hash; + struct tt_common_entry *tt_common_entry; + struct tt_local_entry *tt_local_entry; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + spinlock_t *list_lock; /* protects write access to the hash lists */ + uint32_t i; + + if (!hash) + return; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + list_lock = &hash->list_locks[i]; + + spin_lock_bh(list_lock); + hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, + head, hash_entry) { + if (!(tt_common_entry->flags & TT_CLIENT_PENDING)) + continue; + + bat_dbg(DBG_TT, bat_priv, + "Deleting local tt entry (%pM): pending\n", + tt_common_entry->addr); + + atomic_dec(&bat_priv->num_local_tt); + hlist_del_rcu(node); + tt_local_entry = container_of(tt_common_entry, + struct tt_local_entry, + common); + tt_local_entry_free_ref(tt_local_entry); + } + spin_unlock_bh(list_lock); + } + +} + +void tt_commit_changes(struct bat_priv *bat_priv) +{ + uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash, + TT_CLIENT_NEW, false); + /* all the reset entries have now to be effectively counted as local + * entries */ + atomic_add(changed_num, &bat_priv->num_local_tt); + tt_local_purge_pending_clients(bat_priv); + + /* Increment the TTVN only once per OGM interval */ + atomic_inc(&bat_priv->ttvn); + bat_priv->tt_poss_change = false; +} + +bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst) +{ + struct tt_local_entry *tt_local_entry = NULL; + struct tt_global_entry *tt_global_entry = NULL; + bool ret = false; + + if (!atomic_read(&bat_priv->ap_isolation)) + goto out; + + tt_local_entry = tt_local_hash_find(bat_priv, dst); + if (!tt_local_entry) + goto out; + + tt_global_entry = tt_global_hash_find(bat_priv, src); + if (!tt_global_entry) + goto out; + + if (!_is_ap_isolated(tt_local_entry, tt_global_entry)) + goto out; + + ret = true; + +out: + if (tt_global_entry) + tt_global_entry_free_ref(tt_global_entry); + if (tt_local_entry) + tt_local_entry_free_ref(tt_local_entry); + return ret; +} + +void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_buff, uint8_t tt_num_changes, + uint8_t ttvn, uint16_t tt_crc) +{ + uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); + bool full_table = true; + + /* orig table not initialised AND first diff is in the OGM OR the ttvn + * increased by one -> we can apply the attached changes */ + if ((!orig_node->tt_initialised && ttvn == 1) || + ttvn - orig_ttvn == 1) { + /* the OGM could not contain the changes due to their size or + * because they have already been sent TT_OGM_APPEND_MAX times. + * In this case send a tt request */ + if (!tt_num_changes) { + full_table = false; + goto request_table; + } + + tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn, + (struct tt_change *)tt_buff); + + /* Even if we received the precomputed crc with the OGM, we + * prefer to recompute it to spot any possible inconsistency + * in the global table */ + orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); + + /* The ttvn alone is not enough to guarantee consistency + * because a single value could represent different states + * (due to the wrap around). Thus a node has to check whether + * the resulting table (after applying the changes) is still + * consistent or not. E.g. a node could disconnect while its + * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case + * checking the CRC value is mandatory to detect the + * inconsistency */ + if (orig_node->tt_crc != tt_crc) + goto request_table; + + /* Roaming phase is over: tables are in sync again. I can + * unset the flag */ + orig_node->tt_poss_change = false; + } else { + /* if we missed more than one change or our tables are not + * in sync anymore -> request fresh tt data */ + if (!orig_node->tt_initialised || ttvn != orig_ttvn || + orig_node->tt_crc != tt_crc) { +request_table: + bat_dbg(DBG_TT, bat_priv, + "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n", + orig_node->orig, ttvn, orig_ttvn, tt_crc, + orig_node->tt_crc, tt_num_changes); + send_tt_request(bat_priv, orig_node, ttvn, tt_crc, + full_table); + return; + } + } +} diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h new file mode 100644 index 00000000..c753633b --- /dev/null +++ b/net/batman-adv/translation-table.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ +#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ + +int tt_len(int changes_num); +int tt_changes_fill_buffer(struct bat_priv *bat_priv, + unsigned char *buff, int buff_len); +int tt_init(struct bat_priv *bat_priv); +void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, + int ifindex); +void tt_local_remove(struct bat_priv *bat_priv, + const uint8_t *addr, const char *message, bool roaming); +int tt_local_seq_print_text(struct seq_file *seq, void *offset); +void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_buff, int tt_buff_len); +int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *addr, uint8_t ttvn, bool roaming, + bool wifi); +int tt_global_seq_print_text(struct seq_file *seq, void *offset); +void tt_global_del_orig(struct bat_priv *bat_priv, + struct orig_node *orig_node, const char *message); +void tt_global_del(struct bat_priv *bat_priv, + struct orig_node *orig_node, const unsigned char *addr, + const char *message, bool roaming); +struct orig_node *transtable_search(struct bat_priv *bat_priv, + const uint8_t *src, const uint8_t *addr); +void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_buff, uint8_t tt_num_changes); +uint16_t tt_local_crc(struct bat_priv *bat_priv); +uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node); +void tt_free(struct bat_priv *bat_priv); +bool send_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_request); +bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); +void handle_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_response); +void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, + struct orig_node *orig_node); +void tt_commit_changes(struct bat_priv *bat_priv); +bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); +void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_buff, uint8_t tt_num_changes, + uint8_t ttvn, uint16_t tt_crc); + +#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h new file mode 100644 index 00000000..302efb52 --- /dev/null +++ b/net/batman-adv/types.h @@ -0,0 +1,367 @@ +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + + + +#ifndef _NET_BATMAN_ADV_TYPES_H_ +#define _NET_BATMAN_ADV_TYPES_H_ + +#include "packet.h" +#include "bitarray.h" + +#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \ + ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \ + sizeof(struct unicast_packet) : \ + sizeof(struct bcast_packet)))) + + +struct hard_iface { + struct list_head list; + int16_t if_num; + char if_status; + struct net_device *net_dev; + atomic_t seqno; + atomic_t frag_seqno; + unsigned char *packet_buff; + int packet_len; + struct kobject *hardif_obj; + atomic_t refcount; + struct packet_type batman_adv_ptype; + struct net_device *soft_iface; + struct rcu_head rcu; +}; + +/** + * orig_node - structure for orig_list maintaining nodes of mesh + * @primary_addr: hosts primary interface address + * @last_valid: when last packet from this node was received + * @bcast_seqno_reset: time when the broadcast seqno window was reset + * @batman_seqno_reset: time when the batman seqno window was reset + * @gw_flags: flags related to gateway class + * @flags: for now only VIS_SERVER flag + * @last_real_seqno: last and best known sequence number + * @last_ttl: ttl of last received packet + * @last_bcast_seqno: last broadcast sequence number received by this host + * + * @candidates: how many candidates are available + * @selected: next bonding candidate + */ +struct orig_node { + uint8_t orig[ETH_ALEN]; + uint8_t primary_addr[ETH_ALEN]; + struct neigh_node __rcu *router; /* rcu protected pointer */ + unsigned long *bcast_own; + uint8_t *bcast_own_sum; + unsigned long last_valid; + unsigned long bcast_seqno_reset; + unsigned long batman_seqno_reset; + uint8_t gw_flags; + uint8_t flags; + atomic_t last_ttvn; /* last seen translation table version number */ + uint16_t tt_crc; + unsigned char *tt_buff; + int16_t tt_buff_len; + spinlock_t tt_buff_lock; /* protects tt_buff */ + atomic_t tt_size; + bool tt_initialised; + /* The tt_poss_change flag is used to detect an ongoing roaming phase. + * If true, then I sent a Roaming_adv to this orig_node and I have to + * inspect every packet directed to it to check whether it is still + * the true destination or not. This flag will be reset to false as + * soon as I receive a new TTVN from this orig_node */ + bool tt_poss_change; + uint32_t last_real_seqno; + uint8_t last_ttl; + unsigned long bcast_bits[NUM_WORDS]; + uint32_t last_bcast_seqno; + struct hlist_head neigh_list; + struct list_head frag_list; + spinlock_t neigh_list_lock; /* protects neigh_list and router */ + atomic_t refcount; + struct rcu_head rcu; + struct hlist_node hash_entry; + struct bat_priv *bat_priv; + unsigned long last_frag_packet; + /* ogm_cnt_lock protects: bcast_own, bcast_own_sum, + * neigh_node->real_bits, neigh_node->real_packet_count */ + spinlock_t ogm_cnt_lock; + /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */ + spinlock_t bcast_seqno_lock; + spinlock_t tt_list_lock; /* protects tt_list */ + atomic_t bond_candidates; + struct list_head bond_list; +}; + +struct gw_node { + struct hlist_node list; + struct orig_node *orig_node; + unsigned long deleted; + atomic_t refcount; + struct rcu_head rcu; +}; + +/** + * neigh_node + * @last_valid: when last packet via this neighbor was received + */ +struct neigh_node { + struct hlist_node list; + uint8_t addr[ETH_ALEN]; + uint8_t real_packet_count; + uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE]; + uint8_t tq_index; + uint8_t tq_avg; + uint8_t last_ttl; + struct list_head bonding_list; + unsigned long last_valid; + unsigned long real_bits[NUM_WORDS]; + atomic_t refcount; + struct rcu_head rcu; + struct orig_node *orig_node; + struct hard_iface *if_incoming; + spinlock_t tq_lock; /* protects: tq_recv, tq_index */ +}; + + +struct bat_priv { + atomic_t mesh_state; + struct net_device_stats stats; + atomic_t aggregated_ogms; /* boolean */ + atomic_t bonding; /* boolean */ + atomic_t fragmentation; /* boolean */ + atomic_t ap_isolation; /* boolean */ + atomic_t vis_mode; /* VIS_TYPE_* */ + atomic_t gw_mode; /* GW_MODE_* */ + atomic_t gw_sel_class; /* uint */ + atomic_t gw_bandwidth; /* gw bandwidth */ + atomic_t orig_interval; /* uint */ + atomic_t hop_penalty; /* uint */ + atomic_t log_level; /* uint */ + atomic_t bcast_seqno; + atomic_t bcast_queue_left; + atomic_t batman_queue_left; + atomic_t ttvn; /* translation table version number */ + atomic_t tt_ogm_append_cnt; + atomic_t tt_local_changes; /* changes registered in a OGM interval */ + /* The tt_poss_change flag is used to detect an ongoing roaming phase. + * If true, then I received a Roaming_adv and I have to inspect every + * packet directed to me to check whether I am still the true + * destination or not. This flag will be reset to false as soon as I + * increase my TTVN */ + bool tt_poss_change; + char num_ifaces; + struct debug_log *debug_log; + struct kobject *mesh_obj; + struct dentry *debug_dir; + struct hlist_head forw_bat_list; + struct hlist_head forw_bcast_list; + struct hlist_head gw_list; + struct hlist_head softif_neigh_vids; + struct list_head tt_changes_list; /* tracks changes in a OGM int */ + struct list_head vis_send_list; + struct hashtable_t *orig_hash; + struct hashtable_t *tt_local_hash; + struct hashtable_t *tt_global_hash; + struct list_head tt_req_list; /* list of pending tt_requests */ + struct list_head tt_roam_list; + struct hashtable_t *vis_hash; + spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ + spinlock_t forw_bcast_list_lock; /* protects */ + spinlock_t tt_changes_list_lock; /* protects tt_changes */ + spinlock_t tt_req_list_lock; /* protects tt_req_list */ + spinlock_t tt_roam_list_lock; /* protects tt_roam_list */ + spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ + spinlock_t vis_hash_lock; /* protects vis_hash */ + spinlock_t vis_list_lock; /* protects vis_info::recv_list */ + spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ + spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */ + atomic_t num_local_tt; + /* Checksum of the local table, recomputed before sending a new OGM */ + atomic_t tt_crc; + unsigned char *tt_buff; + int16_t tt_buff_len; + spinlock_t tt_buff_lock; /* protects tt_buff */ + struct delayed_work tt_work; + struct delayed_work orig_work; + struct delayed_work vis_work; + struct gw_node __rcu *curr_gw; /* rcu protected pointer */ + atomic_t gw_reselect; + struct hard_iface __rcu *primary_if; /* rcu protected pointer */ + struct vis_info *my_vis_info; + struct bat_algo_ops *bat_algo_ops; +}; + +struct socket_client { + struct list_head queue_list; + unsigned int queue_len; + unsigned char index; + spinlock_t lock; /* protects queue_list, queue_len, index */ + wait_queue_head_t queue_wait; + struct bat_priv *bat_priv; +}; + +struct socket_packet { + struct list_head list; + size_t icmp_len; + struct icmp_packet_rr icmp_packet; +}; + +struct tt_common_entry { + uint8_t addr[ETH_ALEN]; + struct hlist_node hash_entry; + uint16_t flags; + atomic_t refcount; + struct rcu_head rcu; +}; + +struct tt_local_entry { + struct tt_common_entry common; + unsigned long last_seen; +}; + +struct tt_global_entry { + struct tt_common_entry common; + struct orig_node *orig_node; + uint8_t ttvn; + unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ +}; + +struct tt_change_node { + struct list_head list; + struct tt_change change; +}; + +struct tt_req_node { + uint8_t addr[ETH_ALEN]; + unsigned long issued_at; + struct list_head list; +}; + +struct tt_roam_node { + uint8_t addr[ETH_ALEN]; + atomic_t counter; + unsigned long first_time; + struct list_head list; +}; + +/** + * forw_packet - structure for forw_list maintaining packets to be + * send/forwarded + */ +struct forw_packet { + struct hlist_node list; + unsigned long send_time; + uint8_t own; + struct sk_buff *skb; + uint16_t packet_len; + uint32_t direct_link_flags; + uint8_t num_packets; + struct delayed_work delayed_work; + struct hard_iface *if_incoming; +}; + +/* While scanning for vis-entries of a particular vis-originator + * this list collects its interfaces to create a subgraph/cluster + * out of them later + */ +struct if_list_entry { + uint8_t addr[ETH_ALEN]; + bool primary; + struct hlist_node list; +}; + +struct debug_log { + char log_buff[LOG_BUF_LEN]; + unsigned long log_start; + unsigned long log_end; + spinlock_t lock; /* protects log_buff, log_start and log_end */ + wait_queue_head_t queue_wait; +}; + +struct frag_packet_list_entry { + struct list_head list; + uint16_t seqno; + struct sk_buff *skb; +}; + +struct vis_info { + unsigned long first_seen; + /* list of server-neighbors we received a vis-packet + * from. we should not reply to them. */ + struct list_head recv_list; + struct list_head send_list; + struct kref refcount; + struct hlist_node hash_entry; + struct bat_priv *bat_priv; + /* this packet might be part of the vis send queue. */ + struct sk_buff *skb_packet; + /* vis_info may follow here*/ +} __packed; + +struct vis_info_entry { + uint8_t src[ETH_ALEN]; + uint8_t dest[ETH_ALEN]; + uint8_t quality; /* quality = 0 client */ +} __packed; + +struct recvlist_node { + struct list_head list; + uint8_t mac[ETH_ALEN]; +}; + +struct softif_neigh_vid { + struct hlist_node list; + struct bat_priv *bat_priv; + short vid; + atomic_t refcount; + struct softif_neigh __rcu *softif_neigh; + struct rcu_head rcu; + struct hlist_head softif_neigh_list; +}; + +struct softif_neigh { + struct hlist_node list; + uint8_t addr[ETH_ALEN]; + unsigned long last_seen; + atomic_t refcount; + struct rcu_head rcu; +}; + +struct bat_algo_ops { + struct hlist_node list; + char *name; + /* init OGM when hard-interface is enabled */ + void (*bat_ogm_init)(struct hard_iface *hard_iface); + /* init primary OGM when primary interface is selected */ + void (*bat_ogm_init_primary)(struct hard_iface *hard_iface); + /* init mac addresses of the OGM belonging to this hard-interface */ + void (*bat_ogm_update_mac)(struct hard_iface *hard_iface); + /* prepare a new outgoing OGM for the send queue */ + void (*bat_ogm_schedule)(struct hard_iface *hard_iface, + int tt_num_changes); + /* send scheduled OGM */ + void (*bat_ogm_emit)(struct forw_packet *forw_packet); + /* receive incoming OGM */ + void (*bat_ogm_receive)(struct hard_iface *if_incoming, + struct sk_buff *skb); +}; + +#endif /* _NET_BATMAN_ADV_TYPES_H_ */ diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c new file mode 100644 index 00000000..676f6a62 --- /dev/null +++ b/net/batman-adv/unicast.c @@ -0,0 +1,356 @@ +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: + * + * Andreas Langer + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "unicast.h" +#include "send.h" +#include "soft-interface.h" +#include "gateway_client.h" +#include "originator.h" +#include "hash.h" +#include "translation-table.h" +#include "routing.h" +#include "hard-interface.h" + + +static struct sk_buff *frag_merge_packet(struct list_head *head, + struct frag_packet_list_entry *tfp, + struct sk_buff *skb) +{ + struct unicast_frag_packet *up = + (struct unicast_frag_packet *)skb->data; + struct sk_buff *tmp_skb; + struct unicast_packet *unicast_packet; + int hdr_len = sizeof(*unicast_packet); + int uni_diff = sizeof(*up) - hdr_len; + + /* set skb to the first part and tmp_skb to the second part */ + if (up->flags & UNI_FRAG_HEAD) { + tmp_skb = tfp->skb; + } else { + tmp_skb = skb; + skb = tfp->skb; + } + + if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0) + goto err; + + skb_pull(tmp_skb, sizeof(*up)); + if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) + goto err; + + /* move free entry to end */ + tfp->skb = NULL; + tfp->seqno = 0; + list_move_tail(&tfp->list, head); + + memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len); + kfree_skb(tmp_skb); + + memmove(skb->data + uni_diff, skb->data, hdr_len); + unicast_packet = (struct unicast_packet *)skb_pull(skb, uni_diff); + unicast_packet->header.packet_type = BAT_UNICAST; + + return skb; + +err: + /* free buffered skb, skb will be freed later */ + kfree_skb(tfp->skb); + return NULL; +} + +static void frag_create_entry(struct list_head *head, struct sk_buff *skb) +{ + struct frag_packet_list_entry *tfp; + struct unicast_frag_packet *up = + (struct unicast_frag_packet *)skb->data; + + /* free and oldest packets stand at the end */ + tfp = list_entry((head)->prev, typeof(*tfp), list); + kfree_skb(tfp->skb); + + tfp->seqno = ntohs(up->seqno); + tfp->skb = skb; + list_move(&tfp->list, head); + return; +} + +static int frag_create_buffer(struct list_head *head) +{ + int i; + struct frag_packet_list_entry *tfp; + + for (i = 0; i < FRAG_BUFFER_SIZE; i++) { + tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC); + if (!tfp) { + frag_list_free(head); + return -ENOMEM; + } + tfp->skb = NULL; + tfp->seqno = 0; + INIT_LIST_HEAD(&tfp->list); + list_add(&tfp->list, head); + } + + return 0; +} + +static struct frag_packet_list_entry *frag_search_packet(struct list_head *head, + const struct unicast_frag_packet *up) +{ + struct frag_packet_list_entry *tfp; + struct unicast_frag_packet *tmp_up = NULL; + uint16_t search_seqno; + + if (up->flags & UNI_FRAG_HEAD) + search_seqno = ntohs(up->seqno)+1; + else + search_seqno = ntohs(up->seqno)-1; + + list_for_each_entry(tfp, head, list) { + + if (!tfp->skb) + continue; + + if (tfp->seqno == ntohs(up->seqno)) + goto mov_tail; + + tmp_up = (struct unicast_frag_packet *)tfp->skb->data; + + if (tfp->seqno == search_seqno) { + + if ((tmp_up->flags & UNI_FRAG_HEAD) != + (up->flags & UNI_FRAG_HEAD)) + return tfp; + else + goto mov_tail; + } + } + return NULL; + +mov_tail: + list_move_tail(&tfp->list, head); + return NULL; +} + +void frag_list_free(struct list_head *head) +{ + struct frag_packet_list_entry *pf, *tmp_pf; + + if (!list_empty(head)) { + + list_for_each_entry_safe(pf, tmp_pf, head, list) { + kfree_skb(pf->skb); + list_del(&pf->list); + kfree(pf); + } + } + return; +} + +/* frag_reassemble_skb(): + * returns NET_RX_DROP if the operation failed - skb is left intact + * returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL) + * or the skb could be reassembled (skb_new will point to the new packet and + * skb was freed) + */ +int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, + struct sk_buff **new_skb) +{ + struct orig_node *orig_node; + struct frag_packet_list_entry *tmp_frag_entry; + int ret = NET_RX_DROP; + struct unicast_frag_packet *unicast_packet = + (struct unicast_frag_packet *)skb->data; + + *new_skb = NULL; + + orig_node = orig_hash_find(bat_priv, unicast_packet->orig); + if (!orig_node) + goto out; + + orig_node->last_frag_packet = jiffies; + + if (list_empty(&orig_node->frag_list) && + frag_create_buffer(&orig_node->frag_list)) { + pr_debug("couldn't create frag buffer\n"); + goto out; + } + + tmp_frag_entry = frag_search_packet(&orig_node->frag_list, + unicast_packet); + + if (!tmp_frag_entry) { + frag_create_entry(&orig_node->frag_list, skb); + ret = NET_RX_SUCCESS; + goto out; + } + + *new_skb = frag_merge_packet(&orig_node->frag_list, tmp_frag_entry, + skb); + /* if not, merge failed */ + if (*new_skb) + ret = NET_RX_SUCCESS; + +out: + if (orig_node) + orig_node_free_ref(orig_node); + return ret; +} + +int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, + struct hard_iface *hard_iface, const uint8_t dstaddr[]) +{ + struct unicast_packet tmp_uc, *unicast_packet; + struct hard_iface *primary_if; + struct sk_buff *frag_skb; + struct unicast_frag_packet *frag1, *frag2; + int uc_hdr_len = sizeof(*unicast_packet); + int ucf_hdr_len = sizeof(*frag1); + int data_len = skb->len - uc_hdr_len; + int large_tail = 0, ret = NET_RX_DROP; + uint16_t seqno; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto dropped; + + frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); + if (!frag_skb) + goto dropped; + skb_reserve(frag_skb, ucf_hdr_len); + + unicast_packet = (struct unicast_packet *)skb->data; + memcpy(&tmp_uc, unicast_packet, uc_hdr_len); + skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len); + + if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || + my_skb_head_push(frag_skb, ucf_hdr_len) < 0) + goto drop_frag; + + frag1 = (struct unicast_frag_packet *)skb->data; + frag2 = (struct unicast_frag_packet *)frag_skb->data; + + memcpy(frag1, &tmp_uc, sizeof(tmp_uc)); + + frag1->header.ttl--; + frag1->header.version = COMPAT_VERSION; + frag1->header.packet_type = BAT_UNICAST_FRAG; + + memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN); + memcpy(frag2, frag1, sizeof(*frag2)); + + if (data_len & 1) + large_tail = UNI_FRAG_LARGETAIL; + + frag1->flags = UNI_FRAG_HEAD | large_tail; + frag2->flags = large_tail; + + seqno = atomic_add_return(2, &hard_iface->frag_seqno); + frag1->seqno = htons(seqno - 1); + frag2->seqno = htons(seqno); + + send_skb_packet(skb, hard_iface, dstaddr); + send_skb_packet(frag_skb, hard_iface, dstaddr); + ret = NET_RX_SUCCESS; + goto out; + +drop_frag: + kfree_skb(frag_skb); +dropped: + kfree_skb(skb); +out: + if (primary_if) + hardif_free_ref(primary_if); + return ret; +} + +int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) +{ + struct ethhdr *ethhdr = (struct ethhdr *)skb->data; + struct unicast_packet *unicast_packet; + struct orig_node *orig_node; + struct neigh_node *neigh_node; + int data_len = skb->len; + int ret = 1; + + /* get routing information */ + if (is_multicast_ether_addr(ethhdr->h_dest)) { + orig_node = gw_get_selected_orig(bat_priv); + if (orig_node) + goto find_router; + } + + /* check for tt host - increases orig_node refcount. + * returns NULL in case of AP isolation */ + orig_node = transtable_search(bat_priv, ethhdr->h_source, + ethhdr->h_dest); + +find_router: + /** + * find_router(): + * - if orig_node is NULL it returns NULL + * - increases neigh_nodes refcount if found. + */ + neigh_node = find_router(bat_priv, orig_node, NULL); + + if (!neigh_node) + goto out; + + if (my_skb_head_push(skb, sizeof(*unicast_packet)) < 0) + goto out; + + unicast_packet = (struct unicast_packet *)skb->data; + + unicast_packet->header.version = COMPAT_VERSION; + /* batman packet type: unicast */ + unicast_packet->header.packet_type = BAT_UNICAST; + /* set unicast ttl */ + unicast_packet->header.ttl = TTL; + /* copy the destination for faster routing */ + memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); + /* set the destination tt version number */ + unicast_packet->ttvn = + (uint8_t)atomic_read(&orig_node->last_ttvn); + + if (atomic_read(&bat_priv->fragmentation) && + data_len + sizeof(*unicast_packet) > + neigh_node->if_incoming->net_dev->mtu) { + /* send frag skb decreases ttl */ + unicast_packet->header.ttl++; + ret = frag_send_skb(skb, bat_priv, + neigh_node->if_incoming, neigh_node->addr); + goto out; + } + + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + ret = 0; + goto out; + +out: + if (neigh_node) + neigh_node_free_ref(neigh_node); + if (orig_node) + orig_node_free_ref(orig_node); + if (ret == 1) + kfree_skb(skb); + return ret; +} diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h new file mode 100644 index 00000000..a9faf6b1 --- /dev/null +++ b/net/batman-adv/unicast.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: + * + * Andreas Langer + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_UNICAST_H_ +#define _NET_BATMAN_ADV_UNICAST_H_ + +#include "packet.h" + +#define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */ +#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */ + +int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, + struct sk_buff **new_skb); +void frag_list_free(struct list_head *head); +int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); +int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, + struct hard_iface *hard_iface, const uint8_t dstaddr[]); + +static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu) +{ + const struct unicast_frag_packet *unicast_packet; + int uneven_correction = 0; + unsigned int merged_size; + + unicast_packet = (struct unicast_frag_packet *)skb->data; + + if (unicast_packet->flags & UNI_FRAG_LARGETAIL) { + if (unicast_packet->flags & UNI_FRAG_HEAD) + uneven_correction = 1; + else + uneven_correction = -1; + } + + merged_size = (skb->len - sizeof(*unicast_packet)) * 2; + merged_size += sizeof(struct unicast_packet) + uneven_correction; + + return merged_size <= mtu; +} + +#endif /* _NET_BATMAN_ADV_UNICAST_H_ */ diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c new file mode 100644 index 00000000..c4a5b8ca --- /dev/null +++ b/net/batman-adv/vis.c @@ -0,0 +1,975 @@ +/* + * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#include "main.h" +#include "send.h" +#include "translation-table.h" +#include "vis.h" +#include "soft-interface.h" +#include "hard-interface.h" +#include "hash.h" +#include "originator.h" + +#define MAX_VIS_PACKET_SIZE 1000 + +static void start_vis_timer(struct bat_priv *bat_priv); + +/* free the info */ +static void free_info(struct kref *ref) +{ + struct vis_info *info = container_of(ref, struct vis_info, refcount); + struct bat_priv *bat_priv = info->bat_priv; + struct recvlist_node *entry, *tmp; + + list_del_init(&info->send_list); + spin_lock_bh(&bat_priv->vis_list_lock); + list_for_each_entry_safe(entry, tmp, &info->recv_list, list) { + list_del(&entry->list); + kfree(entry); + } + + spin_unlock_bh(&bat_priv->vis_list_lock); + kfree_skb(info->skb_packet); + kfree(info); +} + +/* Compare two vis packets, used by the hashing algorithm */ +static int vis_info_cmp(const struct hlist_node *node, const void *data2) +{ + const struct vis_info *d1, *d2; + const struct vis_packet *p1, *p2; + + d1 = container_of(node, struct vis_info, hash_entry); + d2 = data2; + p1 = (struct vis_packet *)d1->skb_packet->data; + p2 = (struct vis_packet *)d2->skb_packet->data; + return compare_eth(p1->vis_orig, p2->vis_orig); +} + +/* hash function to choose an entry in a hash table of given size */ +/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ +static uint32_t vis_info_choose(const void *data, uint32_t size) +{ + const struct vis_info *vis_info = data; + const struct vis_packet *packet; + const unsigned char *key; + uint32_t hash = 0; + size_t i; + + packet = (struct vis_packet *)vis_info->skb_packet->data; + key = packet->vis_orig; + for (i = 0; i < ETH_ALEN; i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash % size; +} + +static struct vis_info *vis_hash_find(struct bat_priv *bat_priv, + const void *data) +{ + struct hashtable_t *hash = bat_priv->vis_hash; + struct hlist_head *head; + struct hlist_node *node; + struct vis_info *vis_info, *vis_info_tmp = NULL; + uint32_t index; + + if (!hash) + return NULL; + + index = vis_info_choose(data, hash->size); + head = &hash->table[index]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) { + if (!vis_info_cmp(node, data)) + continue; + + vis_info_tmp = vis_info; + break; + } + rcu_read_unlock(); + + return vis_info_tmp; +} + +/* insert interface to the list of interfaces of one originator, if it + * does not already exist in the list */ +static void vis_data_insert_interface(const uint8_t *interface, + struct hlist_head *if_list, + bool primary) +{ + struct if_list_entry *entry; + struct hlist_node *pos; + + hlist_for_each_entry(entry, pos, if_list, list) { + if (compare_eth(entry->addr, interface)) + return; + } + + /* it's a new address, add it to the list */ + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return; + memcpy(entry->addr, interface, ETH_ALEN); + entry->primary = primary; + hlist_add_head(&entry->list, if_list); +} + +static ssize_t vis_data_read_prim_sec(char *buff, + const struct hlist_head *if_list) +{ + struct if_list_entry *entry; + struct hlist_node *pos; + size_t len = 0; + + hlist_for_each_entry(entry, pos, if_list, list) { + if (entry->primary) + len += sprintf(buff + len, "PRIMARY, "); + else + len += sprintf(buff + len, "SEC %pM, ", entry->addr); + } + + return len; +} + +static size_t vis_data_count_prim_sec(struct hlist_head *if_list) +{ + struct if_list_entry *entry; + struct hlist_node *pos; + size_t count = 0; + + hlist_for_each_entry(entry, pos, if_list, list) { + if (entry->primary) + count += 9; + else + count += 23; + } + + return count; +} + +/* read an entry */ +static ssize_t vis_data_read_entry(char *buff, + const struct vis_info_entry *entry, + const uint8_t *src, bool primary) +{ + /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ + if (primary && entry->quality == 0) + return sprintf(buff, "TT %pM, ", entry->dest); + else if (compare_eth(entry->src, src)) + return sprintf(buff, "TQ %pM %d, ", entry->dest, + entry->quality); + + return 0; +} + +int vis_seq_print_text(struct seq_file *seq, void *offset) +{ + struct hard_iface *primary_if; + struct hlist_node *node; + struct hlist_head *head; + struct vis_info *info; + struct vis_packet *packet; + struct vis_info_entry *entries; + struct net_device *net_dev = (struct net_device *)seq->private; + struct bat_priv *bat_priv = netdev_priv(net_dev); + struct hashtable_t *hash = bat_priv->vis_hash; + HLIST_HEAD(vis_if_list); + struct if_list_entry *entry; + struct hlist_node *pos, *n; + uint32_t i; + int j, ret = 0; + int vis_server = atomic_read(&bat_priv->vis_mode); + size_t buff_pos, buf_size; + char *buff; + int compare; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + if (vis_server == VIS_TYPE_CLIENT_UPDATE) + goto out; + + buf_size = 1; + /* Estimate length */ + spin_lock_bh(&bat_priv->vis_hash_lock); + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(info, node, head, hash_entry) { + packet = (struct vis_packet *)info->skb_packet->data; + entries = (struct vis_info_entry *) + ((char *)packet + sizeof(*packet)); + + for (j = 0; j < packet->entries; j++) { + if (entries[j].quality == 0) + continue; + compare = + compare_eth(entries[j].src, packet->vis_orig); + vis_data_insert_interface(entries[j].src, + &vis_if_list, + compare); + } + + hlist_for_each_entry(entry, pos, &vis_if_list, list) { + buf_size += 18 + 26 * packet->entries; + + /* add primary/secondary records */ + if (compare_eth(entry->addr, packet->vis_orig)) + buf_size += + vis_data_count_prim_sec(&vis_if_list); + + buf_size += 1; + } + + hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, + list) { + hlist_del(&entry->list); + kfree(entry); + } + } + rcu_read_unlock(); + } + + buff = kmalloc(buf_size, GFP_ATOMIC); + if (!buff) { + spin_unlock_bh(&bat_priv->vis_hash_lock); + ret = -ENOMEM; + goto out; + } + buff[0] = '\0'; + buff_pos = 0; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(info, node, head, hash_entry) { + packet = (struct vis_packet *)info->skb_packet->data; + entries = (struct vis_info_entry *) + ((char *)packet + sizeof(*packet)); + + for (j = 0; j < packet->entries; j++) { + if (entries[j].quality == 0) + continue; + compare = + compare_eth(entries[j].src, packet->vis_orig); + vis_data_insert_interface(entries[j].src, + &vis_if_list, + compare); + } + + hlist_for_each_entry(entry, pos, &vis_if_list, list) { + buff_pos += sprintf(buff + buff_pos, "%pM,", + entry->addr); + + for (j = 0; j < packet->entries; j++) + buff_pos += vis_data_read_entry( + buff + buff_pos, + &entries[j], + entry->addr, + entry->primary); + + /* add primary/secondary records */ + if (compare_eth(entry->addr, packet->vis_orig)) + buff_pos += + vis_data_read_prim_sec(buff + buff_pos, + &vis_if_list); + + buff_pos += sprintf(buff + buff_pos, "\n"); + } + + hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, + list) { + hlist_del(&entry->list); + kfree(entry); + } + } + rcu_read_unlock(); + } + + spin_unlock_bh(&bat_priv->vis_hash_lock); + + seq_printf(seq, "%s", buff); + kfree(buff); + +out: + if (primary_if) + hardif_free_ref(primary_if); + return ret; +} + +/* add the info packet to the send list, if it was not + * already linked in. */ +static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info) +{ + if (list_empty(&info->send_list)) { + kref_get(&info->refcount); + list_add_tail(&info->send_list, &bat_priv->vis_send_list); + } +} + +/* delete the info packet from the send list, if it was + * linked in. */ +static void send_list_del(struct vis_info *info) +{ + if (!list_empty(&info->send_list)) { + list_del_init(&info->send_list); + kref_put(&info->refcount, free_info); + } +} + +/* tries to add one entry to the receive list. */ +static void recv_list_add(struct bat_priv *bat_priv, + struct list_head *recv_list, const char *mac) +{ + struct recvlist_node *entry; + + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return; + + memcpy(entry->mac, mac, ETH_ALEN); + spin_lock_bh(&bat_priv->vis_list_lock); + list_add_tail(&entry->list, recv_list); + spin_unlock_bh(&bat_priv->vis_list_lock); +} + +/* returns 1 if this mac is in the recv_list */ +static int recv_list_is_in(struct bat_priv *bat_priv, + const struct list_head *recv_list, const char *mac) +{ + const struct recvlist_node *entry; + + spin_lock_bh(&bat_priv->vis_list_lock); + list_for_each_entry(entry, recv_list, list) { + if (compare_eth(entry->mac, mac)) { + spin_unlock_bh(&bat_priv->vis_list_lock); + return 1; + } + } + spin_unlock_bh(&bat_priv->vis_list_lock); + return 0; +} + +/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old, + * broken.. ). vis hash must be locked outside. is_new is set when the packet + * is newer than old entries in the hash. */ +static struct vis_info *add_packet(struct bat_priv *bat_priv, + struct vis_packet *vis_packet, + int vis_info_len, int *is_new, + int make_broadcast) +{ + struct vis_info *info, *old_info; + struct vis_packet *search_packet, *old_packet; + struct vis_info search_elem; + struct vis_packet *packet; + int hash_added; + + *is_new = 0; + /* sanity check */ + if (!bat_priv->vis_hash) + return NULL; + + /* see if the packet is already in vis_hash */ + search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet)); + if (!search_elem.skb_packet) + return NULL; + search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet, + sizeof(*search_packet)); + + memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN); + old_info = vis_hash_find(bat_priv, &search_elem); + kfree_skb(search_elem.skb_packet); + + if (old_info) { + old_packet = (struct vis_packet *)old_info->skb_packet->data; + if (!seq_after(ntohl(vis_packet->seqno), + ntohl(old_packet->seqno))) { + if (old_packet->seqno == vis_packet->seqno) { + recv_list_add(bat_priv, &old_info->recv_list, + vis_packet->sender_orig); + return old_info; + } else { + /* newer packet is already in hash. */ + return NULL; + } + } + /* remove old entry */ + hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, + old_info); + send_list_del(old_info); + kref_put(&old_info->refcount, free_info); + } + + info = kmalloc(sizeof(*info), GFP_ATOMIC); + if (!info) + return NULL; + + info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len + + sizeof(struct ethhdr)); + if (!info->skb_packet) { + kfree(info); + return NULL; + } + skb_reserve(info->skb_packet, sizeof(struct ethhdr)); + packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet) + + vis_info_len); + + kref_init(&info->refcount); + INIT_LIST_HEAD(&info->send_list); + INIT_LIST_HEAD(&info->recv_list); + info->first_seen = jiffies; + info->bat_priv = bat_priv; + memcpy(packet, vis_packet, sizeof(*packet) + vis_info_len); + + /* initialize and add new packet. */ + *is_new = 1; + + /* Make it a broadcast packet, if required */ + if (make_broadcast) + memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); + + /* repair if entries is longer than packet. */ + if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len) + packet->entries = vis_info_len / sizeof(struct vis_info_entry); + + recv_list_add(bat_priv, &info->recv_list, packet->sender_orig); + + /* try to add it */ + hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, + info, &info->hash_entry); + if (hash_added != 0) { + /* did not work (for some reason) */ + kref_put(&info->refcount, free_info); + info = NULL; + } + + return info; +} + +/* handle the server sync packet, forward if needed. */ +void receive_server_sync_packet(struct bat_priv *bat_priv, + struct vis_packet *vis_packet, + int vis_info_len) +{ + struct vis_info *info; + int is_new, make_broadcast; + int vis_server = atomic_read(&bat_priv->vis_mode); + + make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC); + + spin_lock_bh(&bat_priv->vis_hash_lock); + info = add_packet(bat_priv, vis_packet, vis_info_len, + &is_new, make_broadcast); + if (!info) + goto end; + + /* only if we are server ourselves and packet is newer than the one in + * hash.*/ + if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) + send_list_add(bat_priv, info); +end: + spin_unlock_bh(&bat_priv->vis_hash_lock); +} + +/* handle an incoming client update packet and schedule forward if needed. */ +void receive_client_update_packet(struct bat_priv *bat_priv, + struct vis_packet *vis_packet, + int vis_info_len) +{ + struct vis_info *info; + struct vis_packet *packet; + int is_new; + int vis_server = atomic_read(&bat_priv->vis_mode); + int are_target = 0; + + /* clients shall not broadcast. */ + if (is_broadcast_ether_addr(vis_packet->target_orig)) + return; + + /* Are we the target for this VIS packet? */ + if (vis_server == VIS_TYPE_SERVER_SYNC && + is_my_mac(vis_packet->target_orig)) + are_target = 1; + + spin_lock_bh(&bat_priv->vis_hash_lock); + info = add_packet(bat_priv, vis_packet, vis_info_len, + &is_new, are_target); + + if (!info) + goto end; + /* note that outdated packets will be dropped at this point. */ + + packet = (struct vis_packet *)info->skb_packet->data; + + /* send only if we're the target server or ... */ + if (are_target && is_new) { + packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */ + send_list_add(bat_priv, info); + + /* ... we're not the recipient (and thus need to forward). */ + } else if (!is_my_mac(packet->target_orig)) { + send_list_add(bat_priv, info); + } + +end: + spin_unlock_bh(&bat_priv->vis_hash_lock); +} + +/* Walk the originators and find the VIS server with the best tq. Set the packet + * address to its address and return the best_tq. + * + * Must be called with the originator hash locked */ +static int find_best_vis_server(struct bat_priv *bat_priv, + struct vis_info *info) +{ + struct hashtable_t *hash = bat_priv->orig_hash; + struct neigh_node *router; + struct hlist_node *node; + struct hlist_head *head; + struct orig_node *orig_node; + struct vis_packet *packet; + int best_tq = -1; + uint32_t i; + + packet = (struct vis_packet *)info->skb_packet->data; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { + router = orig_node_get_router(orig_node); + if (!router) + continue; + + if ((orig_node->flags & VIS_SERVER) && + (router->tq_avg > best_tq)) { + best_tq = router->tq_avg; + memcpy(packet->target_orig, orig_node->orig, + ETH_ALEN); + } + neigh_node_free_ref(router); + } + rcu_read_unlock(); + } + + return best_tq; +} + +/* Return true if the vis packet is full. */ +static bool vis_packet_full(const struct vis_info *info) +{ + const struct vis_packet *packet; + packet = (struct vis_packet *)info->skb_packet->data; + + if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry) + < packet->entries + 1) + return true; + return false; +} + +/* generates a packet of own vis data, + * returns 0 on success, -1 if no packet could be generated */ +static int generate_vis_packet(struct bat_priv *bat_priv) +{ + struct hashtable_t *hash = bat_priv->orig_hash; + struct hlist_node *node; + struct hlist_head *head; + struct orig_node *orig_node; + struct neigh_node *router; + struct vis_info *info = bat_priv->my_vis_info; + struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; + struct vis_info_entry *entry; + struct tt_common_entry *tt_common_entry; + int best_tq = -1; + uint32_t i; + + info->first_seen = jiffies; + packet->vis_type = atomic_read(&bat_priv->vis_mode); + + memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); + packet->header.ttl = TTL; + packet->seqno = htonl(ntohl(packet->seqno) + 1); + packet->entries = 0; + skb_trim(info->skb_packet, sizeof(*packet)); + + if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) { + best_tq = find_best_vis_server(bat_priv, info); + + if (best_tq < 0) + return -1; + } + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { + router = orig_node_get_router(orig_node); + if (!router) + continue; + + if (!compare_eth(router->addr, orig_node->orig)) + goto next; + + if (router->if_incoming->if_status != IF_ACTIVE) + goto next; + + if (router->tq_avg < 1) + goto next; + + /* fill one entry into buffer. */ + entry = (struct vis_info_entry *) + skb_put(info->skb_packet, sizeof(*entry)); + memcpy(entry->src, + router->if_incoming->net_dev->dev_addr, + ETH_ALEN); + memcpy(entry->dest, orig_node->orig, ETH_ALEN); + entry->quality = router->tq_avg; + packet->entries++; + +next: + neigh_node_free_ref(router); + + if (vis_packet_full(info)) + goto unlock; + } + rcu_read_unlock(); + } + + hash = bat_priv->tt_local_hash; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(tt_common_entry, node, head, + hash_entry) { + entry = (struct vis_info_entry *) + skb_put(info->skb_packet, + sizeof(*entry)); + memset(entry->src, 0, ETH_ALEN); + memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN); + entry->quality = 0; /* 0 means TT */ + packet->entries++; + + if (vis_packet_full(info)) + goto unlock; + } + rcu_read_unlock(); + } + + return 0; + +unlock: + rcu_read_unlock(); + return 0; +} + +/* free old vis packets. Must be called with this vis_hash_lock + * held */ +static void purge_vis_packets(struct bat_priv *bat_priv) +{ + uint32_t i; + struct hashtable_t *hash = bat_priv->vis_hash; + struct hlist_node *node, *node_tmp; + struct hlist_head *head; + struct vis_info *info; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + hlist_for_each_entry_safe(info, node, node_tmp, + head, hash_entry) { + /* never purge own data. */ + if (info == bat_priv->my_vis_info) + continue; + + if (has_timed_out(info->first_seen, VIS_TIMEOUT)) { + hlist_del(node); + send_list_del(info); + kref_put(&info->refcount, free_info); + } + } + } +} + +static void broadcast_vis_packet(struct bat_priv *bat_priv, + struct vis_info *info) +{ + struct neigh_node *router; + struct hashtable_t *hash = bat_priv->orig_hash; + struct hlist_node *node; + struct hlist_head *head; + struct orig_node *orig_node; + struct vis_packet *packet; + struct sk_buff *skb; + struct hard_iface *hard_iface; + uint8_t dstaddr[ETH_ALEN]; + uint32_t i; + + + packet = (struct vis_packet *)info->skb_packet->data; + + /* send to all routers in range. */ + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { + /* if it's a vis server and reachable, send it. */ + if (!(orig_node->flags & VIS_SERVER)) + continue; + + router = orig_node_get_router(orig_node); + if (!router) + continue; + + /* don't send it if we already received the packet from + * this node. */ + if (recv_list_is_in(bat_priv, &info->recv_list, + orig_node->orig)) { + neigh_node_free_ref(router); + continue; + } + + memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); + hard_iface = router->if_incoming; + memcpy(dstaddr, router->addr, ETH_ALEN); + + neigh_node_free_ref(router); + + skb = skb_clone(info->skb_packet, GFP_ATOMIC); + if (skb) + send_skb_packet(skb, hard_iface, dstaddr); + + } + rcu_read_unlock(); + } +} + +static void unicast_vis_packet(struct bat_priv *bat_priv, + struct vis_info *info) +{ + struct orig_node *orig_node; + struct neigh_node *router = NULL; + struct sk_buff *skb; + struct vis_packet *packet; + + packet = (struct vis_packet *)info->skb_packet->data; + + orig_node = orig_hash_find(bat_priv, packet->target_orig); + if (!orig_node) + goto out; + + router = orig_node_get_router(orig_node); + if (!router) + goto out; + + skb = skb_clone(info->skb_packet, GFP_ATOMIC); + if (skb) + send_skb_packet(skb, router->if_incoming, router->addr); + +out: + if (router) + neigh_node_free_ref(router); + if (orig_node) + orig_node_free_ref(orig_node); +} + +/* only send one vis packet. called from send_vis_packets() */ +static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info) +{ + struct hard_iface *primary_if; + struct vis_packet *packet; + + primary_if = primary_if_get_selected(bat_priv); + if (!primary_if) + goto out; + + packet = (struct vis_packet *)info->skb_packet->data; + if (packet->header.ttl < 2) { + pr_debug("Error - can't send vis packet: ttl exceeded\n"); + goto out; + } + + memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN); + packet->header.ttl--; + + if (is_broadcast_ether_addr(packet->target_orig)) + broadcast_vis_packet(bat_priv, info); + else + unicast_vis_packet(bat_priv, info); + packet->header.ttl++; /* restore TTL */ + +out: + if (primary_if) + hardif_free_ref(primary_if); +} + +/* called from timer; send (and maybe generate) vis packet. */ +static void send_vis_packets(struct work_struct *work) +{ + struct delayed_work *delayed_work = + container_of(work, struct delayed_work, work); + struct bat_priv *bat_priv = + container_of(delayed_work, struct bat_priv, vis_work); + struct vis_info *info; + + spin_lock_bh(&bat_priv->vis_hash_lock); + purge_vis_packets(bat_priv); + + if (generate_vis_packet(bat_priv) == 0) { + /* schedule if generation was successful */ + send_list_add(bat_priv, bat_priv->my_vis_info); + } + + while (!list_empty(&bat_priv->vis_send_list)) { + info = list_first_entry(&bat_priv->vis_send_list, + typeof(*info), send_list); + + kref_get(&info->refcount); + spin_unlock_bh(&bat_priv->vis_hash_lock); + + send_vis_packet(bat_priv, info); + + spin_lock_bh(&bat_priv->vis_hash_lock); + send_list_del(info); + kref_put(&info->refcount, free_info); + } + spin_unlock_bh(&bat_priv->vis_hash_lock); + start_vis_timer(bat_priv); +} + +/* init the vis server. this may only be called when if_list is already + * initialized (e.g. bat0 is initialized, interfaces have been added) */ +int vis_init(struct bat_priv *bat_priv) +{ + struct vis_packet *packet; + int hash_added; + + if (bat_priv->vis_hash) + return 1; + + spin_lock_bh(&bat_priv->vis_hash_lock); + + bat_priv->vis_hash = hash_new(256); + if (!bat_priv->vis_hash) { + pr_err("Can't initialize vis_hash\n"); + goto err; + } + + bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC); + if (!bat_priv->my_vis_info) + goto err; + + bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + + MAX_VIS_PACKET_SIZE + + sizeof(struct ethhdr)); + if (!bat_priv->my_vis_info->skb_packet) + goto free_info; + + skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr)); + packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet, + sizeof(*packet)); + + /* prefill the vis info */ + bat_priv->my_vis_info->first_seen = jiffies - + msecs_to_jiffies(VIS_INTERVAL); + INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list); + INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list); + kref_init(&bat_priv->my_vis_info->refcount); + bat_priv->my_vis_info->bat_priv = bat_priv; + packet->header.version = COMPAT_VERSION; + packet->header.packet_type = BAT_VIS; + packet->header.ttl = TTL; + packet->seqno = 0; + packet->entries = 0; + + INIT_LIST_HEAD(&bat_priv->vis_send_list); + + hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, + bat_priv->my_vis_info, + &bat_priv->my_vis_info->hash_entry); + if (hash_added != 0) { + pr_err("Can't add own vis packet into hash\n"); + /* not in hash, need to remove it manually. */ + kref_put(&bat_priv->my_vis_info->refcount, free_info); + goto err; + } + + spin_unlock_bh(&bat_priv->vis_hash_lock); + start_vis_timer(bat_priv); + return 1; + +free_info: + kfree(bat_priv->my_vis_info); + bat_priv->my_vis_info = NULL; +err: + spin_unlock_bh(&bat_priv->vis_hash_lock); + vis_quit(bat_priv); + return 0; +} + +/* Decrease the reference count on a hash item info */ +static void free_info_ref(struct hlist_node *node, void *arg) +{ + struct vis_info *info; + + info = container_of(node, struct vis_info, hash_entry); + send_list_del(info); + kref_put(&info->refcount, free_info); +} + +/* shutdown vis-server */ +void vis_quit(struct bat_priv *bat_priv) +{ + if (!bat_priv->vis_hash) + return; + + cancel_delayed_work_sync(&bat_priv->vis_work); + + spin_lock_bh(&bat_priv->vis_hash_lock); + /* properly remove, kill timers ... */ + hash_delete(bat_priv->vis_hash, free_info_ref, NULL); + bat_priv->vis_hash = NULL; + bat_priv->my_vis_info = NULL; + spin_unlock_bh(&bat_priv->vis_hash_lock); +} + +/* schedule packets for (re)transmission */ +static void start_vis_timer(struct bat_priv *bat_priv) +{ + INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets); + queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work, + msecs_to_jiffies(VIS_INTERVAL)); +} diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h new file mode 100644 index 00000000..ee2e46e5 --- /dev/null +++ b/net/batman-adv/vis.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors: + * + * Simon Wunderlich, Marek Lindner + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA + * + */ + +#ifndef _NET_BATMAN_ADV_VIS_H_ +#define _NET_BATMAN_ADV_VIS_H_ + +#define VIS_TIMEOUT 200000 /* timeout of vis packets + * in miliseconds */ + +int vis_seq_print_text(struct seq_file *seq, void *offset); +void receive_server_sync_packet(struct bat_priv *bat_priv, + struct vis_packet *vis_packet, + int vis_info_len); +void receive_client_update_packet(struct bat_priv *bat_priv, + struct vis_packet *vis_packet, + int vis_info_len); +int vis_init(struct bat_priv *bat_priv); +void vis_quit(struct bat_priv *bat_priv); + +#endif /* _NET_BATMAN_ADV_VIS_H_ */ |