linux/net/ax25/af_ax25.c

2083 lines
45 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Darryl Miles G7LED (dlm@g7led.demon.co.uk)
* Copyright (C) Steven Whitehouse GW7RRM (stevew@acm.org)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
* Copyright (C) Hans Alblas PE1AYX (hans@esrac.ele.tue.nl)
* Copyright (C) Frederic Rible F1OAT (frible@teaser.fr)
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/tcp_states.h>
#include <net/ip.h>
#include <net/arp.h>
HLIST_HEAD(ax25_list);
DEFINE_SPINLOCK(ax25_list_lock);
static const struct proto_ops ax25_proto_ops;
static void ax25_free_sock(struct sock *sk)
{
ax25_cb_put(sk_to_ax25(sk));
}
/*
* Socket removal during an interrupt is now safe.
*/
static void ax25_cb_del(ax25_cb *ax25)
{
ax25: Fix ax25 session cleanup problems There are session cleanup problems in ax25_release() and ax25_disconnect(). If we setup a session and then disconnect, the disconnected session is still in "LISTENING" state that is shown below. Active AX.25 sockets Dest Source Device State Vr/Vs Send-Q Recv-Q DL9SAU-4 DL9SAU-3 ??? LISTENING 000/000 0 0 DL9SAU-3 DL9SAU-4 ??? LISTENING 000/000 0 0 The first reason is caused by del_timer_sync() in ax25_release(). The timers of ax25 are used for correct session cleanup. If we use ax25_release() to close ax25 sessions and ax25_dev is not null, the del_timer_sync() functions in ax25_release() will execute. As a result, the sessions could not be cleaned up correctly, because the timers have stopped. In order to solve this problem, this patch adds a device_up flag in ax25_dev in order to judge whether the device is up. If there are sessions to be cleaned up, the del_timer_sync() in ax25_release() will not execute. What's more, we add ax25_cb_del() in ax25_kill_by_device(), because the timers have been stopped and there are no functions that could delete ax25_cb if we do not call ax25_release(). Finally, we reorder the position of ax25_list_lock in ax25_cb_del() in order to synchronize among different functions that call ax25_cb_del(). The second reason is caused by improper check in ax25_disconnect(). The incoming ax25 sessions which ax25->sk is null will close heartbeat timer, because the check "if(!ax25->sk || ..)" is satisfied. As a result, the session could not be cleaned up properly. In order to solve this problem, this patch changes the improper check to "if(ax25->sk && ..)" in ax25_disconnect(). What`s more, the ax25_disconnect() may be called twice, which is not necessary. For example, ax25_kill_by_device() calls ax25_disconnect() and sets ax25->state to AX25_STATE_0, but ax25_release() calls ax25_disconnect() again. In order to solve this problem, this patch add a check in ax25_release(). If the flag of ax25->sk equals to SOCK_DEAD, the ax25_disconnect() in ax25_release() should not be executed. Fixes: 82e31755e55f ("ax25: Fix UAF bugs in ax25 timers") Fixes: 8a367e74c012 ("ax25: Fix segfault after sock connection timeout") Reported-and-tested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Link: https://lore.kernel.org/r/20220530152158.108619-1-duoming@zju.edu.cn Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-05-30 23:21:58 +08:00
spin_lock_bh(&ax25_list_lock);
if (!hlist_unhashed(&ax25->ax25_node)) {
hlist_del_init(&ax25->ax25_node);
ax25_cb_put(ax25);
}
ax25: Fix ax25 session cleanup problems There are session cleanup problems in ax25_release() and ax25_disconnect(). If we setup a session and then disconnect, the disconnected session is still in "LISTENING" state that is shown below. Active AX.25 sockets Dest Source Device State Vr/Vs Send-Q Recv-Q DL9SAU-4 DL9SAU-3 ??? LISTENING 000/000 0 0 DL9SAU-3 DL9SAU-4 ??? LISTENING 000/000 0 0 The first reason is caused by del_timer_sync() in ax25_release(). The timers of ax25 are used for correct session cleanup. If we use ax25_release() to close ax25 sessions and ax25_dev is not null, the del_timer_sync() functions in ax25_release() will execute. As a result, the sessions could not be cleaned up correctly, because the timers have stopped. In order to solve this problem, this patch adds a device_up flag in ax25_dev in order to judge whether the device is up. If there are sessions to be cleaned up, the del_timer_sync() in ax25_release() will not execute. What's more, we add ax25_cb_del() in ax25_kill_by_device(), because the timers have been stopped and there are no functions that could delete ax25_cb if we do not call ax25_release(). Finally, we reorder the position of ax25_list_lock in ax25_cb_del() in order to synchronize among different functions that call ax25_cb_del(). The second reason is caused by improper check in ax25_disconnect(). The incoming ax25 sessions which ax25->sk is null will close heartbeat timer, because the check "if(!ax25->sk || ..)" is satisfied. As a result, the session could not be cleaned up properly. In order to solve this problem, this patch changes the improper check to "if(ax25->sk && ..)" in ax25_disconnect(). What`s more, the ax25_disconnect() may be called twice, which is not necessary. For example, ax25_kill_by_device() calls ax25_disconnect() and sets ax25->state to AX25_STATE_0, but ax25_release() calls ax25_disconnect() again. In order to solve this problem, this patch add a check in ax25_release(). If the flag of ax25->sk equals to SOCK_DEAD, the ax25_disconnect() in ax25_release() should not be executed. Fixes: 82e31755e55f ("ax25: Fix UAF bugs in ax25 timers") Fixes: 8a367e74c012 ("ax25: Fix segfault after sock connection timeout") Reported-and-tested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Link: https://lore.kernel.org/r/20220530152158.108619-1-duoming@zju.edu.cn Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-05-30 23:21:58 +08:00
spin_unlock_bh(&ax25_list_lock);
}
/*
* Kill all bound sockets on a dropped device.
*/
static void ax25_kill_by_device(struct net_device *dev)
{
ax25_dev *ax25_dev;
ax25_cb *s;
ax25: improve the incomplete fix to avoid UAF and NPD bugs The previous commit 1ade48d0c27d ("ax25: NPD bug when detaching AX25 device") introduce lock_sock() into ax25_kill_by_device to prevent NPD bug. But the concurrency NPD or UAF bug will occur, when lock_sock() or release_sock() dereferences the ax25_cb->sock. The NULL pointer dereference bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() | ax25_cb_del() ... | ... | ax25->sk=NULL; lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is set to null before dereference site (1) or (2). Therefore, this patch extracts the ax25_cb->sock in advance, and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() and ensure the value of sock is not null before dereference sites. The concurrency UAF bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() ... | ... | sock_put(sk); //FREE lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is released before dereference site (1) or (2). Therefore, this patch uses sock_hold() to increase the refcount of sock and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() in ax25_destroy_socket() and ensure the sock wil not be released before dereference sites. Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-01-28 12:47:15 +08:00
struct sock *sk;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
ax25: Fix ax25 session cleanup problems There are session cleanup problems in ax25_release() and ax25_disconnect(). If we setup a session and then disconnect, the disconnected session is still in "LISTENING" state that is shown below. Active AX.25 sockets Dest Source Device State Vr/Vs Send-Q Recv-Q DL9SAU-4 DL9SAU-3 ??? LISTENING 000/000 0 0 DL9SAU-3 DL9SAU-4 ??? LISTENING 000/000 0 0 The first reason is caused by del_timer_sync() in ax25_release(). The timers of ax25 are used for correct session cleanup. If we use ax25_release() to close ax25 sessions and ax25_dev is not null, the del_timer_sync() functions in ax25_release() will execute. As a result, the sessions could not be cleaned up correctly, because the timers have stopped. In order to solve this problem, this patch adds a device_up flag in ax25_dev in order to judge whether the device is up. If there are sessions to be cleaned up, the del_timer_sync() in ax25_release() will not execute. What's more, we add ax25_cb_del() in ax25_kill_by_device(), because the timers have been stopped and there are no functions that could delete ax25_cb if we do not call ax25_release(). Finally, we reorder the position of ax25_list_lock in ax25_cb_del() in order to synchronize among different functions that call ax25_cb_del(). The second reason is caused by improper check in ax25_disconnect(). The incoming ax25 sessions which ax25->sk is null will close heartbeat timer, because the check "if(!ax25->sk || ..)" is satisfied. As a result, the session could not be cleaned up properly. In order to solve this problem, this patch changes the improper check to "if(ax25->sk && ..)" in ax25_disconnect(). What`s more, the ax25_disconnect() may be called twice, which is not necessary. For example, ax25_kill_by_device() calls ax25_disconnect() and sets ax25->state to AX25_STATE_0, but ax25_release() calls ax25_disconnect() again. In order to solve this problem, this patch add a check in ax25_release(). If the flag of ax25->sk equals to SOCK_DEAD, the ax25_disconnect() in ax25_release() should not be executed. Fixes: 82e31755e55f ("ax25: Fix UAF bugs in ax25 timers") Fixes: 8a367e74c012 ("ax25: Fix segfault after sock connection timeout") Reported-and-tested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Link: https://lore.kernel.org/r/20220530152158.108619-1-duoming@zju.edu.cn Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-05-30 23:21:58 +08:00
ax25_dev->device_up = false;
spin_lock_bh(&ax25_list_lock);
[AX25] af_ax25: Possible circular locking. Bernard Pidoux F6BVP reported: > When I killall kissattach I can see the following message. > > This happens on kernel 2.6.24-rc5 already patched with the 6 previously > patches I sent recently. > > > ======================================================= > [ INFO: possible circular locking dependency detected ] > 2.6.23.9 #1 > ------------------------------------------------------- > kissattach/2906 is trying to acquire lock: > (linkfail_lock){-+..}, at: [<d8bd4603>] ax25_link_failed+0x11/0x39 [ax25] > > but task is already holding lock: > (ax25_list_lock){-+..}, at: [<d8bd7c7c>] ax25_device_event+0x38/0x84 > [ax25] > > which lock already depends on the new lock. > > > the existing dependency chain (in reverse order) is: ... lockdep is worried about the different order here: #1 (rose_neigh_list_lock){-+..}: #3 (ax25_list_lock){-+..}: #0 (linkfail_lock){-+..}: #1 (rose_neigh_list_lock){-+..}: #3 (ax25_list_lock){-+..}: #0 (linkfail_lock){-+..}: So, ax25_list_lock could be taken before and after linkfail_lock. I don't know if this three-thread clutch is very probable (or possible at all), but it seems another bug reported by Bernard ("[...] system impossible to reboot with linux-2.6.24-rc5") could have similar source - namely ax25_list_lock held by ax25_kill_by_device() during ax25_disconnect(). It looks like the only place which calls ax25_disconnect() this way, so I guess, it isn't necessary. This patch is breaking the lock for ax25_disconnect(). Reported-and-tested-by: Bernard Pidoux <f6bvp@free.fr> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-01-11 13:21:20 +08:00
again:
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
ax25_for_each(s, &ax25_list) {
if (s->ax25_dev == ax25_dev) {
ax25: improve the incomplete fix to avoid UAF and NPD bugs The previous commit 1ade48d0c27d ("ax25: NPD bug when detaching AX25 device") introduce lock_sock() into ax25_kill_by_device to prevent NPD bug. But the concurrency NPD or UAF bug will occur, when lock_sock() or release_sock() dereferences the ax25_cb->sock. The NULL pointer dereference bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() | ax25_cb_del() ... | ... | ax25->sk=NULL; lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is set to null before dereference site (1) or (2). Therefore, this patch extracts the ax25_cb->sock in advance, and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() and ensure the value of sock is not null before dereference sites. The concurrency UAF bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() ... | ... | sock_put(sk); //FREE lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is released before dereference site (1) or (2). Therefore, this patch uses sock_hold() to increase the refcount of sock and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() in ax25_destroy_socket() and ensure the sock wil not be released before dereference sites. Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-01-28 12:47:15 +08:00
sk = s->sk;
if (!sk) {
spin_unlock_bh(&ax25_list_lock);
ax25_disconnect(s, ENETUNREACH);
ax25: Fix NULL pointer dereferences in ax25 timers The previous commit 7ec02f5ac8a5 ("ax25: fix NPD bug in ax25_disconnect") move ax25_disconnect into lock_sock() in order to prevent NPD bugs. But there are race conditions that may lead to null pointer dereferences in ax25_heartbeat_expiry(), ax25_t1timer_expiry(), ax25_t2timer_expiry(), ax25_t3timer_expiry() and ax25_idletimer_expiry(), when we use ax25_kill_by_device() to detach the ax25 device. One of the race conditions that cause null pointer dereferences can be shown as below: (Thread 1) | (Thread 2) ax25_connect() | ax25_std_establish_data_link() | ax25_start_t1timer() | mod_timer(&ax25->t1timer,..) | | ax25_kill_by_device() (wait a time) | ... | s->ax25_dev = NULL; //(1) ax25_t1timer_expiry() | ax25->ax25_dev->values[..] //(2)| ... ... | We set null to ax25_cb->ax25_dev in position (1) and dereference the null pointer in position (2). The corresponding fail log is shown below: =============================================================== BUG: kernel NULL pointer dereference, address: 0000000000000050 CPU: 1 PID: 0 Comm: swapper/1 Not tainted 5.17.0-rc6-00794-g45690b7d0 RIP: 0010:ax25_t1timer_expiry+0x12/0x40 ... Call Trace: call_timer_fn+0x21/0x120 __run_timers.part.0+0x1ca/0x250 run_timer_softirq+0x2c/0x60 __do_softirq+0xef/0x2f3 irq_exit_rcu+0xb6/0x100 sysvec_apic_timer_interrupt+0xa2/0xd0 ... This patch moves ax25_disconnect() before s->ax25_dev = NULL and uses del_timer_sync() to delete timers in ax25_disconnect(). If ax25_disconnect() is called by ax25_kill_by_device() or ax25->ax25_dev is NULL, the reason in ax25_disconnect() will be equal to ENETUNREACH, it will wait all timers to stop before we set null to s->ax25_dev in ax25_kill_by_device(). Fixes: 7ec02f5ac8a5 ("ax25: fix NPD bug in ax25_disconnect") Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-03-18 08:54:05 +08:00
s->ax25_dev = NULL;
ax25: Fix ax25 session cleanup problems There are session cleanup problems in ax25_release() and ax25_disconnect(). If we setup a session and then disconnect, the disconnected session is still in "LISTENING" state that is shown below. Active AX.25 sockets Dest Source Device State Vr/Vs Send-Q Recv-Q DL9SAU-4 DL9SAU-3 ??? LISTENING 000/000 0 0 DL9SAU-3 DL9SAU-4 ??? LISTENING 000/000 0 0 The first reason is caused by del_timer_sync() in ax25_release(). The timers of ax25 are used for correct session cleanup. If we use ax25_release() to close ax25 sessions and ax25_dev is not null, the del_timer_sync() functions in ax25_release() will execute. As a result, the sessions could not be cleaned up correctly, because the timers have stopped. In order to solve this problem, this patch adds a device_up flag in ax25_dev in order to judge whether the device is up. If there are sessions to be cleaned up, the del_timer_sync() in ax25_release() will not execute. What's more, we add ax25_cb_del() in ax25_kill_by_device(), because the timers have been stopped and there are no functions that could delete ax25_cb if we do not call ax25_release(). Finally, we reorder the position of ax25_list_lock in ax25_cb_del() in order to synchronize among different functions that call ax25_cb_del(). The second reason is caused by improper check in ax25_disconnect(). The incoming ax25 sessions which ax25->sk is null will close heartbeat timer, because the check "if(!ax25->sk || ..)" is satisfied. As a result, the session could not be cleaned up properly. In order to solve this problem, this patch changes the improper check to "if(ax25->sk && ..)" in ax25_disconnect(). What`s more, the ax25_disconnect() may be called twice, which is not necessary. For example, ax25_kill_by_device() calls ax25_disconnect() and sets ax25->state to AX25_STATE_0, but ax25_release() calls ax25_disconnect() again. In order to solve this problem, this patch add a check in ax25_release(). If the flag of ax25->sk equals to SOCK_DEAD, the ax25_disconnect() in ax25_release() should not be executed. Fixes: 82e31755e55f ("ax25: Fix UAF bugs in ax25 timers") Fixes: 8a367e74c012 ("ax25: Fix segfault after sock connection timeout") Reported-and-tested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Link: https://lore.kernel.org/r/20220530152158.108619-1-duoming@zju.edu.cn Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-05-30 23:21:58 +08:00
ax25_cb_del(s);
spin_lock_bh(&ax25_list_lock);
goto again;
}
ax25: improve the incomplete fix to avoid UAF and NPD bugs The previous commit 1ade48d0c27d ("ax25: NPD bug when detaching AX25 device") introduce lock_sock() into ax25_kill_by_device to prevent NPD bug. But the concurrency NPD or UAF bug will occur, when lock_sock() or release_sock() dereferences the ax25_cb->sock. The NULL pointer dereference bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() | ax25_cb_del() ... | ... | ax25->sk=NULL; lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is set to null before dereference site (1) or (2). Therefore, this patch extracts the ax25_cb->sock in advance, and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() and ensure the value of sock is not null before dereference sites. The concurrency UAF bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() ... | ... | sock_put(sk); //FREE lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is released before dereference site (1) or (2). Therefore, this patch uses sock_hold() to increase the refcount of sock and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() in ax25_destroy_socket() and ensure the sock wil not be released before dereference sites. Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-01-28 12:47:15 +08:00
sock_hold(sk);
[AX25] af_ax25: Possible circular locking. Bernard Pidoux F6BVP reported: > When I killall kissattach I can see the following message. > > This happens on kernel 2.6.24-rc5 already patched with the 6 previously > patches I sent recently. > > > ======================================================= > [ INFO: possible circular locking dependency detected ] > 2.6.23.9 #1 > ------------------------------------------------------- > kissattach/2906 is trying to acquire lock: > (linkfail_lock){-+..}, at: [<d8bd4603>] ax25_link_failed+0x11/0x39 [ax25] > > but task is already holding lock: > (ax25_list_lock){-+..}, at: [<d8bd7c7c>] ax25_device_event+0x38/0x84 > [ax25] > > which lock already depends on the new lock. > > > the existing dependency chain (in reverse order) is: ... lockdep is worried about the different order here: #1 (rose_neigh_list_lock){-+..}: #3 (ax25_list_lock){-+..}: #0 (linkfail_lock){-+..}: #1 (rose_neigh_list_lock){-+..}: #3 (ax25_list_lock){-+..}: #0 (linkfail_lock){-+..}: So, ax25_list_lock could be taken before and after linkfail_lock. I don't know if this three-thread clutch is very probable (or possible at all), but it seems another bug reported by Bernard ("[...] system impossible to reboot with linux-2.6.24-rc5") could have similar source - namely ax25_list_lock held by ax25_kill_by_device() during ax25_disconnect(). It looks like the only place which calls ax25_disconnect() this way, so I guess, it isn't necessary. This patch is breaking the lock for ax25_disconnect(). Reported-and-tested-by: Bernard Pidoux <f6bvp@free.fr> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-01-11 13:21:20 +08:00
spin_unlock_bh(&ax25_list_lock);
ax25: improve the incomplete fix to avoid UAF and NPD bugs The previous commit 1ade48d0c27d ("ax25: NPD bug when detaching AX25 device") introduce lock_sock() into ax25_kill_by_device to prevent NPD bug. But the concurrency NPD or UAF bug will occur, when lock_sock() or release_sock() dereferences the ax25_cb->sock. The NULL pointer dereference bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() | ax25_cb_del() ... | ... | ax25->sk=NULL; lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is set to null before dereference site (1) or (2). Therefore, this patch extracts the ax25_cb->sock in advance, and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() and ensure the value of sock is not null before dereference sites. The concurrency UAF bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() ... | ... | sock_put(sk); //FREE lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is released before dereference site (1) or (2). Therefore, this patch uses sock_hold() to increase the refcount of sock and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() in ax25_destroy_socket() and ensure the sock wil not be released before dereference sites. Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-01-28 12:47:15 +08:00
lock_sock(sk);
ax25: Fix NULL pointer dereferences in ax25 timers The previous commit 7ec02f5ac8a5 ("ax25: fix NPD bug in ax25_disconnect") move ax25_disconnect into lock_sock() in order to prevent NPD bugs. But there are race conditions that may lead to null pointer dereferences in ax25_heartbeat_expiry(), ax25_t1timer_expiry(), ax25_t2timer_expiry(), ax25_t3timer_expiry() and ax25_idletimer_expiry(), when we use ax25_kill_by_device() to detach the ax25 device. One of the race conditions that cause null pointer dereferences can be shown as below: (Thread 1) | (Thread 2) ax25_connect() | ax25_std_establish_data_link() | ax25_start_t1timer() | mod_timer(&ax25->t1timer,..) | | ax25_kill_by_device() (wait a time) | ... | s->ax25_dev = NULL; //(1) ax25_t1timer_expiry() | ax25->ax25_dev->values[..] //(2)| ... ... | We set null to ax25_cb->ax25_dev in position (1) and dereference the null pointer in position (2). The corresponding fail log is shown below: =============================================================== BUG: kernel NULL pointer dereference, address: 0000000000000050 CPU: 1 PID: 0 Comm: swapper/1 Not tainted 5.17.0-rc6-00794-g45690b7d0 RIP: 0010:ax25_t1timer_expiry+0x12/0x40 ... Call Trace: call_timer_fn+0x21/0x120 __run_timers.part.0+0x1ca/0x250 run_timer_softirq+0x2c/0x60 __do_softirq+0xef/0x2f3 irq_exit_rcu+0xb6/0x100 sysvec_apic_timer_interrupt+0xa2/0xd0 ... This patch moves ax25_disconnect() before s->ax25_dev = NULL and uses del_timer_sync() to delete timers in ax25_disconnect(). If ax25_disconnect() is called by ax25_kill_by_device() or ax25->ax25_dev is NULL, the reason in ax25_disconnect() will be equal to ENETUNREACH, it will wait all timers to stop before we set null to s->ax25_dev in ax25_kill_by_device(). Fixes: 7ec02f5ac8a5 ("ax25: fix NPD bug in ax25_disconnect") Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-03-18 08:54:05 +08:00
ax25_disconnect(s, ENETUNREACH);
s->ax25_dev = NULL;
ax25: Fix refcount leaks caused by ax25_cb_del() The previous commit d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") and commit feef318c855a ("ax25: fix UAF bugs of net_device caused by rebinding operation") increase the refcounts of ax25_dev and net_device in ax25_bind() and decrease the matching refcounts in ax25_kill_by_device() in order to prevent UAF bugs, but there are reference count leaks. The root cause of refcount leaks is shown below: (Thread 1) | (Thread 2) ax25_bind() | ... | ax25_addr_ax25dev() | ax25_dev_hold() //(1) | ... | dev_hold_track() //(2) | ... | ax25_destroy_socket() | ax25_cb_del() | ... | hlist_del_init() //(3) | | (Thread 3) | ax25_kill_by_device() | ... | ax25_for_each(s, &ax25_list) { | if (s->ax25_dev == ax25_dev) //(4) | ... | Firstly, we use ax25_bind() to increase the refcount of ax25_dev in position (1) and increase the refcount of net_device in position (2). Then, we use ax25_cb_del() invoked by ax25_destroy_socket() to delete ax25_cb in hlist in position (3) before calling ax25_kill_by_device(). Finally, the decrements of refcounts in ax25_kill_by_device() will not be executed, because no s->ax25_dev equals to ax25_dev in position (4). This patch adds decrements of refcounts in ax25_release() and use lock_sock() to do synchronization. If refcounts decrease in ax25_release(), the decrements of refcounts in ax25_kill_by_device() will not be executed and vice versa. Fixes: d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") Fixes: 87563a043cef ("ax25: fix reference count leaks of ax25_dev") Fixes: feef318c855a ("ax25: fix UAF bugs of net_device caused by rebinding operation") Reported-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-03-18 08:54:04 +08:00
if (sk->sk_socket) {
netdev_put(ax25_dev->dev,
&ax25_dev->dev_tracker);
ax25: Fix refcount leaks caused by ax25_cb_del() The previous commit d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") and commit feef318c855a ("ax25: fix UAF bugs of net_device caused by rebinding operation") increase the refcounts of ax25_dev and net_device in ax25_bind() and decrease the matching refcounts in ax25_kill_by_device() in order to prevent UAF bugs, but there are reference count leaks. The root cause of refcount leaks is shown below: (Thread 1) | (Thread 2) ax25_bind() | ... | ax25_addr_ax25dev() | ax25_dev_hold() //(1) | ... | dev_hold_track() //(2) | ... | ax25_destroy_socket() | ax25_cb_del() | ... | hlist_del_init() //(3) | | (Thread 3) | ax25_kill_by_device() | ... | ax25_for_each(s, &ax25_list) { | if (s->ax25_dev == ax25_dev) //(4) | ... | Firstly, we use ax25_bind() to increase the refcount of ax25_dev in position (1) and increase the refcount of net_device in position (2). Then, we use ax25_cb_del() invoked by ax25_destroy_socket() to delete ax25_cb in hlist in position (3) before calling ax25_kill_by_device(). Finally, the decrements of refcounts in ax25_kill_by_device() will not be executed, because no s->ax25_dev equals to ax25_dev in position (4). This patch adds decrements of refcounts in ax25_release() and use lock_sock() to do synchronization. If refcounts decrease in ax25_release(), the decrements of refcounts in ax25_kill_by_device() will not be executed and vice versa. Fixes: d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") Fixes: 87563a043cef ("ax25: fix reference count leaks of ax25_dev") Fixes: feef318c855a ("ax25: fix UAF bugs of net_device caused by rebinding operation") Reported-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-03-18 08:54:04 +08:00
ax25_dev_put(ax25_dev);
}
ax25: Fix ax25 session cleanup problems There are session cleanup problems in ax25_release() and ax25_disconnect(). If we setup a session and then disconnect, the disconnected session is still in "LISTENING" state that is shown below. Active AX.25 sockets Dest Source Device State Vr/Vs Send-Q Recv-Q DL9SAU-4 DL9SAU-3 ??? LISTENING 000/000 0 0 DL9SAU-3 DL9SAU-4 ??? LISTENING 000/000 0 0 The first reason is caused by del_timer_sync() in ax25_release(). The timers of ax25 are used for correct session cleanup. If we use ax25_release() to close ax25 sessions and ax25_dev is not null, the del_timer_sync() functions in ax25_release() will execute. As a result, the sessions could not be cleaned up correctly, because the timers have stopped. In order to solve this problem, this patch adds a device_up flag in ax25_dev in order to judge whether the device is up. If there are sessions to be cleaned up, the del_timer_sync() in ax25_release() will not execute. What's more, we add ax25_cb_del() in ax25_kill_by_device(), because the timers have been stopped and there are no functions that could delete ax25_cb if we do not call ax25_release(). Finally, we reorder the position of ax25_list_lock in ax25_cb_del() in order to synchronize among different functions that call ax25_cb_del(). The second reason is caused by improper check in ax25_disconnect(). The incoming ax25 sessions which ax25->sk is null will close heartbeat timer, because the check "if(!ax25->sk || ..)" is satisfied. As a result, the session could not be cleaned up properly. In order to solve this problem, this patch changes the improper check to "if(ax25->sk && ..)" in ax25_disconnect(). What`s more, the ax25_disconnect() may be called twice, which is not necessary. For example, ax25_kill_by_device() calls ax25_disconnect() and sets ax25->state to AX25_STATE_0, but ax25_release() calls ax25_disconnect() again. In order to solve this problem, this patch add a check in ax25_release(). If the flag of ax25->sk equals to SOCK_DEAD, the ax25_disconnect() in ax25_release() should not be executed. Fixes: 82e31755e55f ("ax25: Fix UAF bugs in ax25 timers") Fixes: 8a367e74c012 ("ax25: Fix segfault after sock connection timeout") Reported-and-tested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Link: https://lore.kernel.org/r/20220530152158.108619-1-duoming@zju.edu.cn Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-05-30 23:21:58 +08:00
ax25_cb_del(s);
release_sock(sk);
[AX25] af_ax25: Possible circular locking. Bernard Pidoux F6BVP reported: > When I killall kissattach I can see the following message. > > This happens on kernel 2.6.24-rc5 already patched with the 6 previously > patches I sent recently. > > > ======================================================= > [ INFO: possible circular locking dependency detected ] > 2.6.23.9 #1 > ------------------------------------------------------- > kissattach/2906 is trying to acquire lock: > (linkfail_lock){-+..}, at: [<d8bd4603>] ax25_link_failed+0x11/0x39 [ax25] > > but task is already holding lock: > (ax25_list_lock){-+..}, at: [<d8bd7c7c>] ax25_device_event+0x38/0x84 > [ax25] > > which lock already depends on the new lock. > > > the existing dependency chain (in reverse order) is: ... lockdep is worried about the different order here: #1 (rose_neigh_list_lock){-+..}: #3 (ax25_list_lock){-+..}: #0 (linkfail_lock){-+..}: #1 (rose_neigh_list_lock){-+..}: #3 (ax25_list_lock){-+..}: #0 (linkfail_lock){-+..}: So, ax25_list_lock could be taken before and after linkfail_lock. I don't know if this three-thread clutch is very probable (or possible at all), but it seems another bug reported by Bernard ("[...] system impossible to reboot with linux-2.6.24-rc5") could have similar source - namely ax25_list_lock held by ax25_kill_by_device() during ax25_disconnect(). It looks like the only place which calls ax25_disconnect() this way, so I guess, it isn't necessary. This patch is breaking the lock for ax25_disconnect(). Reported-and-tested-by: Bernard Pidoux <f6bvp@free.fr> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-01-11 13:21:20 +08:00
spin_lock_bh(&ax25_list_lock);
ax25: improve the incomplete fix to avoid UAF and NPD bugs The previous commit 1ade48d0c27d ("ax25: NPD bug when detaching AX25 device") introduce lock_sock() into ax25_kill_by_device to prevent NPD bug. But the concurrency NPD or UAF bug will occur, when lock_sock() or release_sock() dereferences the ax25_cb->sock. The NULL pointer dereference bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() | ax25_cb_del() ... | ... | ax25->sk=NULL; lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is set to null before dereference site (1) or (2). Therefore, this patch extracts the ax25_cb->sock in advance, and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() and ensure the value of sock is not null before dereference sites. The concurrency UAF bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() ... | ... | sock_put(sk); //FREE lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is released before dereference site (1) or (2). Therefore, this patch uses sock_hold() to increase the refcount of sock and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() in ax25_destroy_socket() and ensure the sock wil not be released before dereference sites. Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-01-28 12:47:15 +08:00
sock_put(sk);
[AX25] af_ax25: Possible circular locking. Bernard Pidoux F6BVP reported: > When I killall kissattach I can see the following message. > > This happens on kernel 2.6.24-rc5 already patched with the 6 previously > patches I sent recently. > > > ======================================================= > [ INFO: possible circular locking dependency detected ] > 2.6.23.9 #1 > ------------------------------------------------------- > kissattach/2906 is trying to acquire lock: > (linkfail_lock){-+..}, at: [<d8bd4603>] ax25_link_failed+0x11/0x39 [ax25] > > but task is already holding lock: > (ax25_list_lock){-+..}, at: [<d8bd7c7c>] ax25_device_event+0x38/0x84 > [ax25] > > which lock already depends on the new lock. > > > the existing dependency chain (in reverse order) is: ... lockdep is worried about the different order here: #1 (rose_neigh_list_lock){-+..}: #3 (ax25_list_lock){-+..}: #0 (linkfail_lock){-+..}: #1 (rose_neigh_list_lock){-+..}: #3 (ax25_list_lock){-+..}: #0 (linkfail_lock){-+..}: So, ax25_list_lock could be taken before and after linkfail_lock. I don't know if this three-thread clutch is very probable (or possible at all), but it seems another bug reported by Bernard ("[...] system impossible to reboot with linux-2.6.24-rc5") could have similar source - namely ax25_list_lock held by ax25_kill_by_device() during ax25_disconnect(). It looks like the only place which calls ax25_disconnect() this way, so I guess, it isn't necessary. This patch is breaking the lock for ax25_disconnect(). Reported-and-tested-by: Bernard Pidoux <f6bvp@free.fr> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-01-11 13:21:20 +08:00
/* The entry could have been deleted from the
* list meanwhile and thus the next pointer is
* no longer valid. Play it safe and restart
* the scan. Forward progress is ensured
* because we set s->ax25_dev to NULL and we
* are never passed a NULL 'dev' argument.
*/
goto again;
}
}
spin_unlock_bh(&ax25_list_lock);
}
/*
* Handle device status changes.
*/
static int ax25_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
/* Reject non AX.25 devices */
if (dev->type != ARPHRD_AX25)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
ax25_dev_device_up(dev);
break;
case NETDEV_DOWN:
ax25_kill_by_device(dev);
ax25_rt_device_down(dev);
ax25_dev_device_down(dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
void ax25_cb_add(ax25_cb *ax25)
{
spin_lock_bh(&ax25_list_lock);
ax25_cb_hold(ax25);
hlist_add_head(&ax25->ax25_node, &ax25_list);
spin_unlock_bh(&ax25_list_lock);
}
/*
* Find a socket that wants to accept the SABM we have just
* received.
*/
struct sock *ax25_find_listener(ax25_address *addr, int digi,
struct net_device *dev, int type)
{
ax25_cb *s;
spin_lock(&ax25_list_lock);
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
ax25_for_each(s, &ax25_list) {
if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
continue;
if (s->sk && !ax25cmp(&s->source_addr, addr) &&
s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) {
/* If device is null we match any device */
if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
sock_hold(s->sk);
spin_unlock(&ax25_list_lock);
return s->sk;
}
}
}
spin_unlock(&ax25_list_lock);
return NULL;
}
/*
* Find an AX.25 socket given both ends.
*/
struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
int type)
{
struct sock *sk = NULL;
ax25_cb *s;
spin_lock(&ax25_list_lock);
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
ax25_for_each(s, &ax25_list) {
if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
!ax25cmp(&s->dest_addr, dest_addr) &&
s->sk->sk_type == type) {
sk = s->sk;
sock_hold(sk);
break;
}
}
spin_unlock(&ax25_list_lock);
return sk;
}
/*
* Find an AX.25 control block given both ends. It will only pick up
* floating AX.25 control blocks or non Raw socket bound control blocks.
*/
ax25_cb *ax25_find_cb(const ax25_address *src_addr, ax25_address *dest_addr,
ax25_digi *digi, struct net_device *dev)
{
ax25_cb *s;
spin_lock_bh(&ax25_list_lock);
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
ax25_for_each(s, &ax25_list) {
if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
continue;
if (s->ax25_dev == NULL)
continue;
if (ax25cmp(&s->source_addr, src_addr) == 0 && ax25cmp(&s->dest_addr, dest_addr) == 0 && s->ax25_dev->dev == dev) {
if (digi != NULL && digi->ndigi != 0) {
if (s->digipeat == NULL)
continue;
if (ax25digicmp(s->digipeat, digi) != 0)
continue;
} else {
if (s->digipeat != NULL && s->digipeat->ndigi != 0)
continue;
}
ax25_cb_hold(s);
spin_unlock_bh(&ax25_list_lock);
return s;
}
}
spin_unlock_bh(&ax25_list_lock);
return NULL;
}
EXPORT_SYMBOL(ax25_find_cb);
void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
{
ax25_cb *s;
struct sk_buff *copy;
spin_lock(&ax25_list_lock);
hlist: drop the node parameter from iterators I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
ax25_for_each(s, &ax25_list) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
s->sk->sk_type == SOCK_RAW &&
s->sk->sk_protocol == proto &&
s->ax25_dev->dev == skb->dev &&
atomic_read(&s->sk->sk_rmem_alloc) <= s->sk->sk_rcvbuf) {
if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL)
continue;
if (sock_queue_rcv_skb(s->sk, copy) != 0)
kfree_skb(copy);
}
}
spin_unlock(&ax25_list_lock);
}
/*
* Deferred destroy.
*/
void ax25_destroy_socket(ax25_cb *);
/*
* Handler for deferred kills.
*/
static void ax25_destroy_timer(struct timer_list *t)
{
ax25_cb *ax25 = from_timer(ax25, t, dtimer);
struct sock *sk;
sk=ax25->sk;
bh_lock_sock(sk);
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
sock_put(sk);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void ax25_destroy_socket(ax25_cb *ax25)
{
struct sk_buff *skb;
ax25_cb_del(ax25);
ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
ax25_clear_queues(ax25); /* Flush the queues */
if (ax25->sk != NULL) {
while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) {
if (skb->sk != ax25->sk) {
/* A pending connection */
ax25_cb *sax25 = sk_to_ax25(skb->sk);
/* Queue the unaccepted socket for death */
sock_orphan(skb->sk);
/* 9A4GL: hack to release unaccepted sockets */
skb->sk->sk_state = TCP_LISTEN;
ax25_start_heartbeat(sax25);
sax25->state = AX25_STATE_0;
}
kfree_skb(skb);
}
skb_queue_purge(&ax25->sk->sk_write_queue);
}
if (ax25->sk != NULL) {
if (sk_has_allocations(ax25->sk)) {
/* Defer: outstanding buffers */
timer_setup(&ax25->dtimer, ax25_destroy_timer, 0);
ax25->dtimer.expires = jiffies + 2 * HZ;
add_timer(&ax25->dtimer);
} else {
struct sock *sk=ax25->sk;
ax25->sk=NULL;
sock_put(sk);
}
} else {
ax25_cb_put(ax25);
}
}
/*
* dl1bke 960311: set parameters for existing AX.25 connections,
* includes a KILL command to abort any connection.
* VERY useful for debugging ;-)
*/
static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
{
struct ax25_ctl_struct ax25_ctl;
ax25_digi digi;
ax25_dev *ax25_dev;
ax25_cb *ax25;
unsigned int k;
int ret = 0;
if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
return -EFAULT;
if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
return -EINVAL;
if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
return -EINVAL;
ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr);
if (!ax25_dev)
return -ENODEV;
digi.ndigi = ax25_ctl.digi_count;
for (k = 0; k < digi.ndigi; k++)
digi.calls[k] = ax25_ctl.digi_addr[k];
ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev);
if (!ax25) {
ax25_dev_put(ax25_dev);
return -ENOTCONN;
}
switch (ax25_ctl.cmd) {
case AX25_KILL:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
#ifdef CONFIG_AX25_DAMA_SLAVE
if (ax25_dev->dama.slave && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
ax25_dama_off(ax25);
#endif
ax25_disconnect(ax25, ENETRESET);
break;
case AX25_WINDOW:
if (ax25->modulus == AX25_MODULUS) {
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7)
goto einval_put;
} else {
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63)
goto einval_put;
}
ax25->window = ax25_ctl.arg;
break;
case AX25_T1:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->rtt = (ax25_ctl.arg * HZ) / 2;
ax25->t1 = ax25_ctl.arg * HZ;
break;
case AX25_T2:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->t2 = ax25_ctl.arg * HZ;
break;
case AX25_N2:
if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
goto einval_put;
ax25->n2count = 0;
ax25->n2 = ax25_ctl.arg;
break;
case AX25_T3:
if (ax25_ctl.arg > ULONG_MAX / HZ)
goto einval_put;
ax25->t3 = ax25_ctl.arg * HZ;
break;
case AX25_IDLE:
if (ax25_ctl.arg > ULONG_MAX / (60 * HZ))
goto einval_put;
ax25->idle = ax25_ctl.arg * 60 * HZ;
break;
case AX25_PACLEN:
if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535)
goto einval_put;
ax25->paclen = ax25_ctl.arg;
break;
default:
goto einval_put;
}
out_put:
ax25_dev_put(ax25_dev);
ax25_cb_put(ax25);
return ret;
einval_put:
ret = -EINVAL;
goto out_put;
}
static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)
{
ax25->rtt = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2;
ax25->t1 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]);
ax25->t2 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T2]);
ax25->t3 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T3]);
ax25->n2 = ax25_dev->values[AX25_VALUES_N2];
ax25->paclen = ax25_dev->values[AX25_VALUES_PACLEN];
ax25->idle = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_IDLE]);
ax25->backoff = ax25_dev->values[AX25_VALUES_BACKOFF];
if (ax25_dev->values[AX25_VALUES_AXDEFMODE]) {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW];
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_WINDOW];
}
}
/*
* Fill in a created AX.25 created control block with the default
* values for a particular device.
*/
void ax25_fillin_cb(ax25_cb *ax25, ax25_dev *ax25_dev)
{
ax25->ax25_dev = ax25_dev;
if (ax25->ax25_dev != NULL) {
ax25_fillin_cb_from_dev(ax25, ax25_dev);
return;
}
/*
* No device, use kernel / AX.25 spec default values
*/
ax25->rtt = msecs_to_jiffies(AX25_DEF_T1) / 2;
ax25->t1 = msecs_to_jiffies(AX25_DEF_T1);
ax25->t2 = msecs_to_jiffies(AX25_DEF_T2);
ax25->t3 = msecs_to_jiffies(AX25_DEF_T3);
ax25->n2 = AX25_DEF_N2;
ax25->paclen = AX25_DEF_PACLEN;
ax25->idle = msecs_to_jiffies(AX25_DEF_IDLE);
ax25->backoff = AX25_DEF_BACKOFF;
if (AX25_DEF_AXDEFMODE) {
ax25->modulus = AX25_EMODULUS;
ax25->window = AX25_DEF_EWINDOW;
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = AX25_DEF_WINDOW;
}
}
/*
* Create an empty AX.25 control block.
*/
ax25_cb *ax25_create_cb(void)
{
ax25_cb *ax25;
if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
return NULL;
refcount_set(&ax25->refcount, 1);
skb_queue_head_init(&ax25->write_queue);
skb_queue_head_init(&ax25->frag_queue);
skb_queue_head_init(&ax25->ack_queue);
skb_queue_head_init(&ax25->reseq_queue);
ax25_setup_timers(ax25);
ax25_fillin_cb(ax25, NULL);
ax25->state = AX25_STATE_0;
return ax25;
}
/*
* Handling for system calls applied via the various interfaces to an
* AX25 socket object
*/
static int ax25_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
ax25_cb *ax25;
struct net_device *dev;
char devname[IFNAMSIZ];
unsigned int opt;
int res = 0;
if (level != SOL_AX25)
return -ENOPROTOOPT;
if (optlen < sizeof(unsigned int))
return -EINVAL;
if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
return -EFAULT;
lock_sock(sk);
ax25 = sk_to_ax25(sk);
switch (optname) {
case AX25_WINDOW:
if (ax25->modulus == AX25_MODULUS) {
if (opt < 1 || opt > 7) {
res = -EINVAL;
break;
}
} else {
if (opt < 1 || opt > 63) {
res = -EINVAL;
break;
}
}
ax25->window = opt;
break;
case AX25_T1:
if (opt < 1 || opt > UINT_MAX / HZ) {
res = -EINVAL;
break;
}
ax25->rtt = (opt * HZ) >> 1;
ax25->t1 = opt * HZ;
break;
case AX25_T2:
if (opt < 1 || opt > UINT_MAX / HZ) {
res = -EINVAL;
break;
}
ax25->t2 = opt * HZ;
break;
case AX25_N2:
if (opt < 1 || opt > 31) {
res = -EINVAL;
break;
}
ax25->n2 = opt;
break;
case AX25_T3:
if (opt < 1 || opt > UINT_MAX / HZ) {
res = -EINVAL;
break;
}
ax25->t3 = opt * HZ;
break;
case AX25_IDLE:
if (opt > UINT_MAX / (60 * HZ)) {
res = -EINVAL;
break;
}
ax25->idle = opt * 60 * HZ;
break;
case AX25_BACKOFF:
if (opt > 2) {
res = -EINVAL;
break;
}
ax25->backoff = opt;
break;
case AX25_EXTSEQ:
ax25->modulus = opt ? AX25_EMODULUS : AX25_MODULUS;
break;
case AX25_PIDINCL:
ax25->pidincl = opt ? 1 : 0;
break;
case AX25_IAMDIGI:
ax25->iamdigi = opt ? 1 : 0;
break;
case AX25_PACLEN:
if (opt < 16 || opt > 65535) {
res = -EINVAL;
break;
}
ax25->paclen = opt;
break;
case SO_BINDTODEVICE:
ax25: fix setsockopt(SO_BINDTODEVICE) syzbot was able to trigger this trace [1], probably by using a zero optlen. While we are at it, cap optlen to IFNAMSIZ - 1 instead of IFNAMSIZ. [1] BUG: KMSAN: uninit-value in strnlen+0xf9/0x170 lib/string.c:569 CPU: 0 PID: 8807 Comm: syz-executor483 Not tainted 5.7.0-rc4-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1c9/0x220 lib/dump_stack.c:118 kmsan_report+0xf7/0x1e0 mm/kmsan/kmsan_report.c:121 __msan_warning+0x58/0xa0 mm/kmsan/kmsan_instr.c:215 strnlen+0xf9/0x170 lib/string.c:569 dev_name_hash net/core/dev.c:207 [inline] netdev_name_node_lookup net/core/dev.c:277 [inline] __dev_get_by_name+0x75/0x2b0 net/core/dev.c:778 ax25_setsockopt+0xfa3/0x1170 net/ax25/af_ax25.c:654 __compat_sys_setsockopt+0x4ed/0x910 net/compat.c:403 __do_compat_sys_setsockopt net/compat.c:413 [inline] __se_compat_sys_setsockopt+0xdd/0x100 net/compat.c:410 __ia32_compat_sys_setsockopt+0x62/0x80 net/compat.c:410 do_syscall_32_irqs_on arch/x86/entry/common.c:339 [inline] do_fast_syscall_32+0x3bf/0x6d0 arch/x86/entry/common.c:398 entry_SYSENTER_compat+0x68/0x77 arch/x86/entry/entry_64_compat.S:139 RIP: 0023:0xf7f57dd9 Code: 90 e8 0b 00 00 00 f3 90 0f ae e8 eb f9 8d 74 26 00 89 3c 24 c3 90 90 90 90 90 90 90 90 90 90 90 90 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 eb 0d 90 90 90 90 90 90 90 90 90 90 90 90 RSP: 002b:00000000ffae8c1c EFLAGS: 00000217 ORIG_RAX: 000000000000016e RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000000000101 RDX: 0000000000000019 RSI: 0000000020000000 RDI: 0000000000000004 RBP: 0000000000000012 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 Local variable ----devname@ax25_setsockopt created at: ax25_setsockopt+0xe6/0x1170 net/ax25/af_ax25.c:536 ax25_setsockopt+0xe6/0x1170 net/ax25/af_ax25.c:536 Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-20 09:24:43 +08:00
if (optlen > IFNAMSIZ - 1)
optlen = IFNAMSIZ - 1;
memset(devname, 0, sizeof(devname));
if (copy_from_sockptr(devname, optval, optlen)) {
res = -EFAULT;
break;
}
if (sk->sk_type == SOCK_SEQPACKET &&
(sock->state != SS_UNCONNECTED ||
sk->sk_state == TCP_LISTEN)) {
res = -EADDRNOTAVAIL;
break;
}
rtnl_lock();
dev = __dev_get_by_name(&init_net, devname);
if (!dev) {
rtnl_unlock();
res = -ENODEV;
break;
}
ax25->ax25_dev = ax25_dev_ax25dev(dev);
if (!ax25->ax25_dev) {
rtnl_unlock();
res = -ENODEV;
break;
}
ax25_fillin_cb(ax25, ax25->ax25_dev);
rtnl_unlock();
break;
default:
res = -ENOPROTOOPT;
}
release_sock(sk);
return res;
}
static int ax25_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
ax25_cb *ax25;
struct ax25_dev *ax25_dev;
char devname[IFNAMSIZ];
void *valptr;
int val = 0;
int maxlen, length;
if (level != SOL_AX25)
return -ENOPROTOOPT;
if (get_user(maxlen, optlen))
return -EFAULT;
if (maxlen < 1)
return -EFAULT;
valptr = &val;
length = min_t(unsigned int, maxlen, sizeof(int));
lock_sock(sk);
ax25 = sk_to_ax25(sk);
switch (optname) {
case AX25_WINDOW:
val = ax25->window;
break;
case AX25_T1:
val = ax25->t1 / HZ;
break;
case AX25_T2:
val = ax25->t2 / HZ;
break;
case AX25_N2:
val = ax25->n2;
break;
case AX25_T3:
val = ax25->t3 / HZ;
break;
case AX25_IDLE:
val = ax25->idle / (60 * HZ);
break;
case AX25_BACKOFF:
val = ax25->backoff;
break;
case AX25_EXTSEQ:
val = (ax25->modulus == AX25_EMODULUS);
break;
case AX25_PIDINCL:
val = ax25->pidincl;
break;
case AX25_IAMDIGI:
val = ax25->iamdigi;
break;
case AX25_PACLEN:
val = ax25->paclen;
break;
case SO_BINDTODEVICE:
ax25_dev = ax25->ax25_dev;
if (ax25_dev != NULL && ax25_dev->dev != NULL) {
strscpy(devname, ax25_dev->dev->name, sizeof(devname));
length = strlen(devname) + 1;
} else {
*devname = '\0';
length = 1;
}
valptr = devname;
break;
default:
release_sock(sk);
return -ENOPROTOOPT;
}
release_sock(sk);
if (put_user(length, optlen))
return -EFAULT;
return copy_to_user(optval, valptr, length) ? -EFAULT : 0;
}
static int ax25_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int res = 0;
lock_sock(sk);
if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_LISTEN) {
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
goto out;
}
res = -EOPNOTSUPP;
out:
release_sock(sk);
return res;
}
/*
* XXX: when creating ax25_sock we should update the .obj_size setting
* below.
*/
static struct proto ax25_proto = {
.name = "AX25",
.owner = THIS_MODULE,
.obj_size = sizeof(struct ax25_sock),
};
static int ax25_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
ax25_cb *ax25;
if (protocol < 0 || protocol > U8_MAX)
return -EINVAL;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
switch (sock->type) {
case SOCK_DGRAM:
if (protocol == 0 || protocol == PF_AX25)
protocol = AX25_P_TEXT;
break;
case SOCK_SEQPACKET:
switch (protocol) {
case 0:
case PF_AX25: /* For CLX */
protocol = AX25_P_TEXT;
break;
case AX25_P_SEGMENT:
#ifdef CONFIG_INET
case AX25_P_ARP:
case AX25_P_IP:
#endif
#ifdef CONFIG_NETROM
case AX25_P_NETROM:
#endif
#ifdef CONFIG_ROSE
case AX25_P_ROSE:
#endif
return -ESOCKTNOSUPPORT;
#ifdef CONFIG_NETROM_MODULE
case AX25_P_NETROM:
if (ax25_protocol_is_registered(AX25_P_NETROM))
return -ESOCKTNOSUPPORT;
break;
#endif
#ifdef CONFIG_ROSE_MODULE
case AX25_P_ROSE:
if (ax25_protocol_is_registered(AX25_P_ROSE))
return -ESOCKTNOSUPPORT;
break;
#endif
default:
break;
}
break;
case SOCK_RAW:
if (!capable(CAP_NET_RAW))
return -EPERM;
break;
default:
return -ESOCKTNOSUPPORT;
}
sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, kern);
if (sk == NULL)
return -ENOMEM;
ax25 = ax25_sk(sk)->cb = ax25_create_cb();
if (!ax25) {
sk_free(sk);
return -ENOMEM;
}
sock_init_data(sock, sk);
sk->sk_destruct = ax25_free_sock;
sock->ops = &ax25_proto_ops;
sk->sk_protocol = protocol;
ax25->sk = sk;
return 0;
}
struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
{
struct sock *sk;
ax25_cb *ax25, *oax25;
sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot, 0);
if (sk == NULL)
return NULL;
if ((ax25 = ax25_create_cb()) == NULL) {
sk_free(sk);
return NULL;
}
switch (osk->sk_type) {
case SOCK_DGRAM:
break;
case SOCK_SEQPACKET:
break;
default:
sk_free(sk);
ax25_cb_put(ax25);
return NULL;
}
sock_init_data(NULL, sk);
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
oax25 = sk_to_ax25(osk);
ax25->modulus = oax25->modulus;
ax25->backoff = oax25->backoff;
ax25->pidincl = oax25->pidincl;
ax25->iamdigi = oax25->iamdigi;
ax25->rtt = oax25->rtt;
ax25->t1 = oax25->t1;
ax25->t2 = oax25->t2;
ax25->t3 = oax25->t3;
ax25->n2 = oax25->n2;
ax25->idle = oax25->idle;
ax25->paclen = oax25->paclen;
ax25->window = oax25->window;
ax25->ax25_dev = ax25_dev;
ax25->source_addr = oax25->source_addr;
if (oax25->digipeat != NULL) {
ax25->digipeat = kmemdup(oax25->digipeat, sizeof(ax25_digi),
GFP_ATOMIC);
if (ax25->digipeat == NULL) {
sk_free(sk);
ax25_cb_put(ax25);
return NULL;
}
}
ax25_sk(sk)->cb = ax25;
sk->sk_destruct = ax25_free_sock;
ax25->sk = sk;
return sk;
}
static int ax25_release(struct socket *sock)
{
struct sock *sk = sock->sk;
ax25_cb *ax25;
ax25: Fix refcount leaks caused by ax25_cb_del() The previous commit d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") and commit feef318c855a ("ax25: fix UAF bugs of net_device caused by rebinding operation") increase the refcounts of ax25_dev and net_device in ax25_bind() and decrease the matching refcounts in ax25_kill_by_device() in order to prevent UAF bugs, but there are reference count leaks. The root cause of refcount leaks is shown below: (Thread 1) | (Thread 2) ax25_bind() | ... | ax25_addr_ax25dev() | ax25_dev_hold() //(1) | ... | dev_hold_track() //(2) | ... | ax25_destroy_socket() | ax25_cb_del() | ... | hlist_del_init() //(3) | | (Thread 3) | ax25_kill_by_device() | ... | ax25_for_each(s, &ax25_list) { | if (s->ax25_dev == ax25_dev) //(4) | ... | Firstly, we use ax25_bind() to increase the refcount of ax25_dev in position (1) and increase the refcount of net_device in position (2). Then, we use ax25_cb_del() invoked by ax25_destroy_socket() to delete ax25_cb in hlist in position (3) before calling ax25_kill_by_device(). Finally, the decrements of refcounts in ax25_kill_by_device() will not be executed, because no s->ax25_dev equals to ax25_dev in position (4). This patch adds decrements of refcounts in ax25_release() and use lock_sock() to do synchronization. If refcounts decrease in ax25_release(), the decrements of refcounts in ax25_kill_by_device() will not be executed and vice versa. Fixes: d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") Fixes: 87563a043cef ("ax25: fix reference count leaks of ax25_dev") Fixes: feef318c855a ("ax25: fix UAF bugs of net_device caused by rebinding operation") Reported-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-03-18 08:54:04 +08:00
ax25_dev *ax25_dev;
if (sk == NULL)
return 0;
sock_hold(sk);
lock_sock(sk);
ax25: Fix refcount leaks caused by ax25_cb_del() The previous commit d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") and commit feef318c855a ("ax25: fix UAF bugs of net_device caused by rebinding operation") increase the refcounts of ax25_dev and net_device in ax25_bind() and decrease the matching refcounts in ax25_kill_by_device() in order to prevent UAF bugs, but there are reference count leaks. The root cause of refcount leaks is shown below: (Thread 1) | (Thread 2) ax25_bind() | ... | ax25_addr_ax25dev() | ax25_dev_hold() //(1) | ... | dev_hold_track() //(2) | ... | ax25_destroy_socket() | ax25_cb_del() | ... | hlist_del_init() //(3) | | (Thread 3) | ax25_kill_by_device() | ... | ax25_for_each(s, &ax25_list) { | if (s->ax25_dev == ax25_dev) //(4) | ... | Firstly, we use ax25_bind() to increase the refcount of ax25_dev in position (1) and increase the refcount of net_device in position (2). Then, we use ax25_cb_del() invoked by ax25_destroy_socket() to delete ax25_cb in hlist in position (3) before calling ax25_kill_by_device(). Finally, the decrements of refcounts in ax25_kill_by_device() will not be executed, because no s->ax25_dev equals to ax25_dev in position (4). This patch adds decrements of refcounts in ax25_release() and use lock_sock() to do synchronization. If refcounts decrease in ax25_release(), the decrements of refcounts in ax25_kill_by_device() will not be executed and vice versa. Fixes: d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") Fixes: 87563a043cef ("ax25: fix reference count leaks of ax25_dev") Fixes: feef318c855a ("ax25: fix UAF bugs of net_device caused by rebinding operation") Reported-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-03-18 08:54:04 +08:00
sock_orphan(sk);
ax25 = sk_to_ax25(sk);
ax25: Fix refcount leaks caused by ax25_cb_del() The previous commit d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") and commit feef318c855a ("ax25: fix UAF bugs of net_device caused by rebinding operation") increase the refcounts of ax25_dev and net_device in ax25_bind() and decrease the matching refcounts in ax25_kill_by_device() in order to prevent UAF bugs, but there are reference count leaks. The root cause of refcount leaks is shown below: (Thread 1) | (Thread 2) ax25_bind() | ... | ax25_addr_ax25dev() | ax25_dev_hold() //(1) | ... | dev_hold_track() //(2) | ... | ax25_destroy_socket() | ax25_cb_del() | ... | hlist_del_init() //(3) | | (Thread 3) | ax25_kill_by_device() | ... | ax25_for_each(s, &ax25_list) { | if (s->ax25_dev == ax25_dev) //(4) | ... | Firstly, we use ax25_bind() to increase the refcount of ax25_dev in position (1) and increase the refcount of net_device in position (2). Then, we use ax25_cb_del() invoked by ax25_destroy_socket() to delete ax25_cb in hlist in position (3) before calling ax25_kill_by_device(). Finally, the decrements of refcounts in ax25_kill_by_device() will not be executed, because no s->ax25_dev equals to ax25_dev in position (4). This patch adds decrements of refcounts in ax25_release() and use lock_sock() to do synchronization. If refcounts decrease in ax25_release(), the decrements of refcounts in ax25_kill_by_device() will not be executed and vice versa. Fixes: d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") Fixes: 87563a043cef ("ax25: fix reference count leaks of ax25_dev") Fixes: feef318c855a ("ax25: fix UAF bugs of net_device caused by rebinding operation") Reported-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-03-18 08:54:04 +08:00
ax25_dev = ax25->ax25_dev;
if (sk->sk_type == SOCK_SEQPACKET) {
switch (ax25->state) {
case AX25_STATE_0:
ax25: Fix ax25 session cleanup problems There are session cleanup problems in ax25_release() and ax25_disconnect(). If we setup a session and then disconnect, the disconnected session is still in "LISTENING" state that is shown below. Active AX.25 sockets Dest Source Device State Vr/Vs Send-Q Recv-Q DL9SAU-4 DL9SAU-3 ??? LISTENING 000/000 0 0 DL9SAU-3 DL9SAU-4 ??? LISTENING 000/000 0 0 The first reason is caused by del_timer_sync() in ax25_release(). The timers of ax25 are used for correct session cleanup. If we use ax25_release() to close ax25 sessions and ax25_dev is not null, the del_timer_sync() functions in ax25_release() will execute. As a result, the sessions could not be cleaned up correctly, because the timers have stopped. In order to solve this problem, this patch adds a device_up flag in ax25_dev in order to judge whether the device is up. If there are sessions to be cleaned up, the del_timer_sync() in ax25_release() will not execute. What's more, we add ax25_cb_del() in ax25_kill_by_device(), because the timers have been stopped and there are no functions that could delete ax25_cb if we do not call ax25_release(). Finally, we reorder the position of ax25_list_lock in ax25_cb_del() in order to synchronize among different functions that call ax25_cb_del(). The second reason is caused by improper check in ax25_disconnect(). The incoming ax25 sessions which ax25->sk is null will close heartbeat timer, because the check "if(!ax25->sk || ..)" is satisfied. As a result, the session could not be cleaned up properly. In order to solve this problem, this patch changes the improper check to "if(ax25->sk && ..)" in ax25_disconnect(). What`s more, the ax25_disconnect() may be called twice, which is not necessary. For example, ax25_kill_by_device() calls ax25_disconnect() and sets ax25->state to AX25_STATE_0, but ax25_release() calls ax25_disconnect() again. In order to solve this problem, this patch add a check in ax25_release(). If the flag of ax25->sk equals to SOCK_DEAD, the ax25_disconnect() in ax25_release() should not be executed. Fixes: 82e31755e55f ("ax25: Fix UAF bugs in ax25 timers") Fixes: 8a367e74c012 ("ax25: Fix segfault after sock connection timeout") Reported-and-tested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Link: https://lore.kernel.org/r/20220530152158.108619-1-duoming@zju.edu.cn Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-05-30 23:21:58 +08:00
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
release_sock(sk);
ax25_disconnect(ax25, 0);
lock_sock(sk);
}
ax25_destroy_socket(ax25);
break;
case AX25_STATE_1:
case AX25_STATE_2:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
release_sock(sk);
ax25_disconnect(ax25, 0);
lock_sock(sk);
if (!sock_flag(ax25->sk, SOCK_DESTROY))
ax25_destroy_socket(ax25);
break;
case AX25_STATE_3:
case AX25_STATE_4:
ax25_clear_queues(ax25);
ax25->n2count = 0;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_send_control(ax25,
AX25_DISC,
AX25_POLLON,
AX25_COMMAND);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
break;
#endif
}
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25->state = AX25_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
} else {
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
ax25_destroy_socket(ax25);
}
ax25: fix UAF bug in ax25_send_control() There are UAF bugs in ax25_send_control(), when we call ax25_release() to deallocate ax25_dev. The possible race condition is shown below: (Thread 1) | (Thread 2) ax25_dev_device_up() //(1) | | ax25_kill_by_device() ax25_bind() //(2) | ax25_connect() | ... ax25->state = AX25_STATE_1 | ... | ax25_dev_device_down() //(3) (Thread 3) ax25_release() | ax25_dev_put() //(4) FREE | case AX25_STATE_1: | ax25_send_control() | alloc_skb() //USE | The refcount of ax25_dev increases in position (1) and (2), and decreases in position (3) and (4). The ax25_dev will be freed before dereference sites in ax25_send_control(). The following is part of the report: [ 102.297448] BUG: KASAN: use-after-free in ax25_send_control+0x33/0x210 [ 102.297448] Read of size 8 at addr ffff888009e6e408 by task ax25_close/602 [ 102.297448] Call Trace: [ 102.303751] ax25_send_control+0x33/0x210 [ 102.303751] ax25_release+0x356/0x450 [ 102.305431] __sock_release+0x6d/0x120 [ 102.305431] sock_close+0xf/0x20 [ 102.305431] __fput+0x11f/0x420 [ 102.305431] task_work_run+0x86/0xd0 [ 102.307130] get_signal+0x1075/0x1220 [ 102.308253] arch_do_signal_or_restart+0x1df/0xc00 [ 102.308253] exit_to_user_mode_prepare+0x150/0x1e0 [ 102.308253] syscall_exit_to_user_mode+0x19/0x50 [ 102.308253] do_syscall_64+0x48/0x90 [ 102.308253] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 102.308253] RIP: 0033:0x405ae7 This patch defers the free operation of ax25_dev and net_device after all corresponding dereference sites in ax25_release() to avoid UAF. Fixes: 9fd75b66b8f6 ("ax25: Fix refcount leaks caused by ax25_cb_del()") Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-03-28 21:00:14 +08:00
if (ax25_dev) {
ax25: Fix ax25 session cleanup problems There are session cleanup problems in ax25_release() and ax25_disconnect(). If we setup a session and then disconnect, the disconnected session is still in "LISTENING" state that is shown below. Active AX.25 sockets Dest Source Device State Vr/Vs Send-Q Recv-Q DL9SAU-4 DL9SAU-3 ??? LISTENING 000/000 0 0 DL9SAU-3 DL9SAU-4 ??? LISTENING 000/000 0 0 The first reason is caused by del_timer_sync() in ax25_release(). The timers of ax25 are used for correct session cleanup. If we use ax25_release() to close ax25 sessions and ax25_dev is not null, the del_timer_sync() functions in ax25_release() will execute. As a result, the sessions could not be cleaned up correctly, because the timers have stopped. In order to solve this problem, this patch adds a device_up flag in ax25_dev in order to judge whether the device is up. If there are sessions to be cleaned up, the del_timer_sync() in ax25_release() will not execute. What's more, we add ax25_cb_del() in ax25_kill_by_device(), because the timers have been stopped and there are no functions that could delete ax25_cb if we do not call ax25_release(). Finally, we reorder the position of ax25_list_lock in ax25_cb_del() in order to synchronize among different functions that call ax25_cb_del(). The second reason is caused by improper check in ax25_disconnect(). The incoming ax25 sessions which ax25->sk is null will close heartbeat timer, because the check "if(!ax25->sk || ..)" is satisfied. As a result, the session could not be cleaned up properly. In order to solve this problem, this patch changes the improper check to "if(ax25->sk && ..)" in ax25_disconnect(). What`s more, the ax25_disconnect() may be called twice, which is not necessary. For example, ax25_kill_by_device() calls ax25_disconnect() and sets ax25->state to AX25_STATE_0, but ax25_release() calls ax25_disconnect() again. In order to solve this problem, this patch add a check in ax25_release(). If the flag of ax25->sk equals to SOCK_DEAD, the ax25_disconnect() in ax25_release() should not be executed. Fixes: 82e31755e55f ("ax25: Fix UAF bugs in ax25 timers") Fixes: 8a367e74c012 ("ax25: Fix segfault after sock connection timeout") Reported-and-tested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Link: https://lore.kernel.org/r/20220530152158.108619-1-duoming@zju.edu.cn Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-05-30 23:21:58 +08:00
if (!ax25_dev->device_up) {
del_timer_sync(&ax25->timer);
del_timer_sync(&ax25->t1timer);
del_timer_sync(&ax25->t2timer);
del_timer_sync(&ax25->t3timer);
del_timer_sync(&ax25->idletimer);
}
netdev_put(ax25_dev->dev, &ax25->dev_tracker);
ax25: fix UAF bug in ax25_send_control() There are UAF bugs in ax25_send_control(), when we call ax25_release() to deallocate ax25_dev. The possible race condition is shown below: (Thread 1) | (Thread 2) ax25_dev_device_up() //(1) | | ax25_kill_by_device() ax25_bind() //(2) | ax25_connect() | ... ax25->state = AX25_STATE_1 | ... | ax25_dev_device_down() //(3) (Thread 3) ax25_release() | ax25_dev_put() //(4) FREE | case AX25_STATE_1: | ax25_send_control() | alloc_skb() //USE | The refcount of ax25_dev increases in position (1) and (2), and decreases in position (3) and (4). The ax25_dev will be freed before dereference sites in ax25_send_control(). The following is part of the report: [ 102.297448] BUG: KASAN: use-after-free in ax25_send_control+0x33/0x210 [ 102.297448] Read of size 8 at addr ffff888009e6e408 by task ax25_close/602 [ 102.297448] Call Trace: [ 102.303751] ax25_send_control+0x33/0x210 [ 102.303751] ax25_release+0x356/0x450 [ 102.305431] __sock_release+0x6d/0x120 [ 102.305431] sock_close+0xf/0x20 [ 102.305431] __fput+0x11f/0x420 [ 102.305431] task_work_run+0x86/0xd0 [ 102.307130] get_signal+0x1075/0x1220 [ 102.308253] arch_do_signal_or_restart+0x1df/0xc00 [ 102.308253] exit_to_user_mode_prepare+0x150/0x1e0 [ 102.308253] syscall_exit_to_user_mode+0x19/0x50 [ 102.308253] do_syscall_64+0x48/0x90 [ 102.308253] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 102.308253] RIP: 0033:0x405ae7 This patch defers the free operation of ax25_dev and net_device after all corresponding dereference sites in ax25_release() to avoid UAF. Fixes: 9fd75b66b8f6 ("ax25: Fix refcount leaks caused by ax25_cb_del()") Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-03-28 21:00:14 +08:00
ax25_dev_put(ax25_dev);
}
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
/*
* We support a funny extension here so you can (as root) give any callsign
* digipeated via a local address as source. This hack is obsolete now
* that we've implemented support for SO_BINDTODEVICE. It is however small
* and trivially backward compatible.
*/
static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
ax25_dev *ax25_dev = NULL;
ax25_uid_assoc *user;
ax25_address call;
ax25_cb *ax25;
int err = 0;
if (addr_len != sizeof(struct sockaddr_ax25) &&
addr_len != sizeof(struct full_sockaddr_ax25))
/* support for old structure may go away some time
* ax25_bind(): uses old (6 digipeater) socket structure.
*/
if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
(addr_len > sizeof(struct full_sockaddr_ax25)))
return -EINVAL;
if (addr->fsa_ax25.sax25_family != AF_AX25)
return -EINVAL;
user = ax25_findbyuid(current_euid());
if (user) {
call = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_ADMIN))
return -EACCES;
call = addr->fsa_ax25.sax25_call;
}
lock_sock(sk);
ax25 = sk_to_ax25(sk);
if (!sock_flag(sk, SOCK_ZAPPED)) {
err = -EINVAL;
goto out;
}
ax25->source_addr = call;
/*
* User already set interface with SO_BINDTODEVICE
*/
if (ax25->ax25_dev != NULL)
goto done;
if (addr_len > sizeof(struct sockaddr_ax25) && addr->fsa_ax25.sax25_ndigis == 1) {
if (ax25cmp(&addr->fsa_digipeater[0], &null_ax25_address) != 0 &&
(ax25_dev = ax25_addr_ax25dev(&addr->fsa_digipeater[0])) == NULL) {
err = -EADDRNOTAVAIL;
goto out;
}
} else {
if ((ax25_dev = ax25_addr_ax25dev(&addr->fsa_ax25.sax25_call)) == NULL) {
err = -EADDRNOTAVAIL;
goto out;
}
}
ax25: fix UAF bugs of net_device caused by rebinding operation The ax25_kill_by_device() will set s->ax25_dev = NULL and call ax25_disconnect() to change states of ax25_cb and sock, if we call ax25_bind() before ax25_kill_by_device(). However, if we call ax25_bind() again between the window of ax25_kill_by_device() and ax25_dev_device_down(), the values and states changed by ax25_kill_by_device() will be reassigned. Finally, ax25_dev_device_down() will deallocate net_device. If we dereference net_device in syscall functions such as ax25_release(), ax25_sendmsg(), ax25_getsockopt(), ax25_getname() and ax25_info_show(), a UAF bug will occur. One of the possible race conditions is shown below: (USE) | (FREE) ax25_bind() | | ax25_kill_by_device() ax25_bind() | ax25_connect() | ... | ax25_dev_device_down() | ... | dev_put_track(dev, ...) //FREE ax25_release() | ... ax25_send_control() | alloc_skb() //USE | the corresponding fail log is shown below: =============================================================== BUG: KASAN: use-after-free in ax25_send_control+0x43/0x210 ... Call Trace: ... ax25_send_control+0x43/0x210 ax25_release+0x2db/0x3b0 __sock_release+0x6d/0x120 sock_close+0xf/0x20 __fput+0x11f/0x420 ... Allocated by task 1283: ... __kasan_kmalloc+0x81/0xa0 alloc_netdev_mqs+0x5a/0x680 mkiss_open+0x6c/0x380 tty_ldisc_open+0x55/0x90 ... Freed by task 1969: ... kfree+0xa3/0x2c0 device_release+0x54/0xe0 kobject_put+0xa5/0x120 tty_ldisc_kill+0x3e/0x80 ... In order to fix these UAF bugs caused by rebinding operation, this patch adds dev_hold_track() into ax25_bind() and corresponding dev_put_track() into ax25_kill_by_device(). Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-02-09 20:53:45 +08:00
if (ax25_dev) {
ax25_fillin_cb(ax25, ax25_dev);
netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
ax25: fix UAF bugs of net_device caused by rebinding operation The ax25_kill_by_device() will set s->ax25_dev = NULL and call ax25_disconnect() to change states of ax25_cb and sock, if we call ax25_bind() before ax25_kill_by_device(). However, if we call ax25_bind() again between the window of ax25_kill_by_device() and ax25_dev_device_down(), the values and states changed by ax25_kill_by_device() will be reassigned. Finally, ax25_dev_device_down() will deallocate net_device. If we dereference net_device in syscall functions such as ax25_release(), ax25_sendmsg(), ax25_getsockopt(), ax25_getname() and ax25_info_show(), a UAF bug will occur. One of the possible race conditions is shown below: (USE) | (FREE) ax25_bind() | | ax25_kill_by_device() ax25_bind() | ax25_connect() | ... | ax25_dev_device_down() | ... | dev_put_track(dev, ...) //FREE ax25_release() | ... ax25_send_control() | alloc_skb() //USE | the corresponding fail log is shown below: =============================================================== BUG: KASAN: use-after-free in ax25_send_control+0x43/0x210 ... Call Trace: ... ax25_send_control+0x43/0x210 ax25_release+0x2db/0x3b0 __sock_release+0x6d/0x120 sock_close+0xf/0x20 __fput+0x11f/0x420 ... Allocated by task 1283: ... __kasan_kmalloc+0x81/0xa0 alloc_netdev_mqs+0x5a/0x680 mkiss_open+0x6c/0x380 tty_ldisc_open+0x55/0x90 ... Freed by task 1969: ... kfree+0xa3/0x2c0 device_release+0x54/0xe0 kobject_put+0xa5/0x120 tty_ldisc_kill+0x3e/0x80 ... In order to fix these UAF bugs caused by rebinding operation, this patch adds dev_hold_track() into ax25_bind() and corresponding dev_put_track() into ax25_kill_by_device(). Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-02-09 20:53:45 +08:00
}
done:
ax25_cb_add(ax25);
sock_reset_flag(sk, SOCK_ZAPPED);
out:
release_sock(sk);
return err;
}
/*
* FIXME: nonblock behaviour looks like it may have a bug.
*/
static int __must_check ax25_connect(struct socket *sock,
struct sockaddr *uaddr, int addr_len, int flags)
{
struct sock *sk = sock->sk;
ax25_cb *ax25 = sk_to_ax25(sk), *ax25t;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr;
ax25_digi *digi = NULL;
int ct = 0, err = 0;
/*
* some sanity checks. code further down depends on this
*/
if (addr_len == sizeof(struct sockaddr_ax25))
/* support for this will go away in early 2.5.x
* ax25_connect(): uses obsolete socket structure
*/
;
else if (addr_len != sizeof(struct full_sockaddr_ax25))
/* support for old structure may go away some time
* ax25_connect(): uses old (6 digipeater) socket structure.
*/
if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
(addr_len > sizeof(struct full_sockaddr_ax25)))
return -EINVAL;
if (fsa->fsa_ax25.sax25_family != AF_AX25)
return -EINVAL;
lock_sock(sk);
/* deal with restarts */
if (sock->state == SS_CONNECTING) {
switch (sk->sk_state) {
case TCP_SYN_SENT: /* still trying */
err = -EINPROGRESS;
goto out_release;
case TCP_ESTABLISHED: /* connection established */
sock->state = SS_CONNECTED;
goto out_release;
case TCP_CLOSE: /* connection refused */
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
}
if (sk->sk_state == TCP_ESTABLISHED && sk->sk_type == SOCK_SEQPACKET) {
err = -EISCONN; /* No reconnect on a seqpacket socket */
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
kfree(ax25->digipeat);
ax25->digipeat = NULL;
/*
* Handle digi-peaters to be used.
*/
if (addr_len > sizeof(struct sockaddr_ax25) &&
fsa->fsa_ax25.sax25_ndigis != 0) {
/* Valid number of digipeaters ? */
if (fsa->fsa_ax25.sax25_ndigis < 1 ||
fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
addr_len < sizeof(struct sockaddr_ax25) +
sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
err = -EINVAL;
goto out_release;
}
if ((digi = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) {
err = -ENOBUFS;
goto out_release;
}
digi->ndigi = fsa->fsa_ax25.sax25_ndigis;
digi->lastrepeat = -1;
while (ct < fsa->fsa_ax25.sax25_ndigis) {
if ((fsa->fsa_digipeater[ct].ax25_call[6] &
AX25_HBIT) && ax25->iamdigi) {
digi->repeated[ct] = 1;
digi->lastrepeat = ct;
} else {
digi->repeated[ct] = 0;
}
digi->calls[ct] = fsa->fsa_digipeater[ct];
ct++;
}
}
/*
* Must bind first - autobinding in this may or may not work. If
* the socket is already bound, check to see if the device has
* been filled in, error if it hasn't.
*/
if (sock_flag(sk, SOCK_ZAPPED)) {
/* check if we can remove this feature. It is broken. */
printk(KERN_WARNING "ax25_connect(): %s uses autobind, please contact jreuter@yaina.de\n",
current->comm);
if ((err = ax25_rt_autobind(ax25, &fsa->fsa_ax25.sax25_call)) < 0) {
kfree(digi);
goto out_release;
}
ax25_fillin_cb(ax25, ax25->ax25_dev);
ax25_cb_add(ax25);
} else {
if (ax25->ax25_dev == NULL) {
kfree(digi);
err = -EHOSTUNREACH;
goto out_release;
}
}
if (sk->sk_type == SOCK_SEQPACKET &&
(ax25t=ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi,
ax25->ax25_dev->dev))) {
kfree(digi);
err = -EADDRINUSE; /* Already such a connection */
ax25_cb_put(ax25t);
goto out_release;
}
ax25->dest_addr = fsa->fsa_ax25.sax25_call;
ax25->digipeat = digi;
/* First the easy one */
if (sk->sk_type != SOCK_SEQPACKET) {
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
goto out_release;
}
/* Move to connecting socket, ax.25 lapb WAIT_UA.. */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_establish_data_link(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
if (ax25->ax25_dev->dama.slave)
ax25_ds_establish_data_link(ax25);
else
ax25_std_establish_data_link(ax25);
break;
#endif
}
ax25->state = AX25_STATE_1;
ax25_start_heartbeat(ax25);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
/* Not in ABM, not in WAIT_UA -> failed */
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
err = 0;
out_release:
release_sock(sk);
return err;
}
net: Work around lockdep limitation in sockets that use sockets Lockdep issues a circular dependency warning when AFS issues an operation through AF_RXRPC from a context in which the VFS/VM holds the mmap_sem. The theory lockdep comes up with is as follows: (1) If the pagefault handler decides it needs to read pages from AFS, it calls AFS with mmap_sem held and AFS begins an AF_RXRPC call, but creating a call requires the socket lock: mmap_sem must be taken before sk_lock-AF_RXRPC (2) afs_open_socket() opens an AF_RXRPC socket and binds it. rxrpc_bind() binds the underlying UDP socket whilst holding its socket lock. inet_bind() takes its own socket lock: sk_lock-AF_RXRPC must be taken before sk_lock-AF_INET (3) Reading from a TCP socket into a userspace buffer might cause a fault and thus cause the kernel to take the mmap_sem, but the TCP socket is locked whilst doing this: sk_lock-AF_INET must be taken before mmap_sem However, lockdep's theory is wrong in this instance because it deals only with lock classes and not individual locks. The AF_INET lock in (2) isn't really equivalent to the AF_INET lock in (3) as the former deals with a socket entirely internal to the kernel that never sees userspace. This is a limitation in the design of lockdep. Fix the general case by: (1) Double up all the locking keys used in sockets so that one set are used if the socket is created by userspace and the other set is used if the socket is created by the kernel. (2) Store the kern parameter passed to sk_alloc() in a variable in the sock struct (sk_kern_sock). This informs sock_lock_init(), sock_init_data() and sk_clone_lock() as to the lock keys to be used. Note that the child created by sk_clone_lock() inherits the parent's kern setting. (3) Add a 'kern' parameter to ->accept() that is analogous to the one passed in to ->create() that distinguishes whether kernel_accept() or sys_accept4() was the caller and can be passed to sk_alloc(). Note that a lot of accept functions merely dequeue an already allocated socket. I haven't touched these as the new socket already exists before we get the parameter. Note also that there are a couple of places where I've made the accepted socket unconditionally kernel-based: irda_accept() rds_rcp_accept_one() tcp_accept_from_sock() because they follow a sock_create_kern() and accept off of that. Whilst creating this, I noticed that lustre and ocfs don't create sockets through sock_create_kern() and thus they aren't marked as for-kernel, though they appear to be internal. I wonder if these should do that so that they use the new set of lock keys. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-09 16:09:05 +08:00
static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if (sock->state != SS_UNCONNECTED)
return -EINVAL;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out;
}
/*
* The read queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
kfree_skb(skb);
sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
out:
release_sock(sk);
return err;
}
static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
net: make getname() functions return length rather than use int* parameter Changes since v1: Added changes in these files: drivers/infiniband/hw/usnic/usnic_transport.c drivers/staging/lustre/lnet/lnet/lib-socket.c drivers/target/iscsi/iscsi_target_login.c drivers/vhost/net.c fs/dlm/lowcomms.c fs/ocfs2/cluster/tcp.c security/tomoyo/network.c Before: All these functions either return a negative error indicator, or store length of sockaddr into "int *socklen" parameter and return zero on success. "int *socklen" parameter is awkward. For example, if caller does not care, it still needs to provide on-stack storage for the value it does not need. None of the many FOO_getname() functions of various protocols ever used old value of *socklen. They always just overwrite it. This change drops this parameter, and makes all these functions, on success, return length of sockaddr. It's always >= 0 and can be differentiated from an error. Tests in callers are changed from "if (err)" to "if (err < 0)", where needed. rpc_sockname() lost "int buflen" parameter, since its only use was to be passed to kernel_getsockname() as &buflen and subsequently not used in any way. Userspace API is not changed. text data bss dec hex filename 30108430 2633624 873672 33615726 200ef6e vmlinux.before.o 30108109 2633612 873672 33615393 200ee21 vmlinux.o Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com> CC: David S. Miller <davem@davemloft.net> CC: linux-kernel@vger.kernel.org CC: netdev@vger.kernel.org CC: linux-bluetooth@vger.kernel.org CC: linux-decnet-user@lists.sourceforge.net CC: linux-wireless@vger.kernel.org CC: linux-rdma@vger.kernel.org CC: linux-sctp@vger.kernel.org CC: linux-nfs@vger.kernel.org CC: linux-x25@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
2018-02-13 03:00:20 +08:00
int peer)
{
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr;
struct sock *sk = sock->sk;
unsigned char ndigi, i;
ax25_cb *ax25;
int err = 0;
memset(fsa, 0, sizeof(*fsa));
lock_sock(sk);
ax25 = sk_to_ax25(sk);
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
fsa->fsa_ax25.sax25_family = AF_AX25;
fsa->fsa_ax25.sax25_call = ax25->dest_addr;
if (ax25->digipeat != NULL) {
ndigi = ax25->digipeat->ndigi;
fsa->fsa_ax25.sax25_ndigis = ndigi;
for (i = 0; i < ndigi; i++)
fsa->fsa_digipeater[i] =
ax25->digipeat->calls[i];
}
} else {
fsa->fsa_ax25.sax25_family = AF_AX25;
fsa->fsa_ax25.sax25_call = ax25->source_addr;
fsa->fsa_ax25.sax25_ndigis = 1;
if (ax25->ax25_dev != NULL) {
memcpy(&fsa->fsa_digipeater[0],
ax25->ax25_dev->dev->dev_addr, AX25_ADDR_LEN);
} else {
fsa->fsa_digipeater[0] = null_ax25_address;
}
}
net: make getname() functions return length rather than use int* parameter Changes since v1: Added changes in these files: drivers/infiniband/hw/usnic/usnic_transport.c drivers/staging/lustre/lnet/lnet/lib-socket.c drivers/target/iscsi/iscsi_target_login.c drivers/vhost/net.c fs/dlm/lowcomms.c fs/ocfs2/cluster/tcp.c security/tomoyo/network.c Before: All these functions either return a negative error indicator, or store length of sockaddr into "int *socklen" parameter and return zero on success. "int *socklen" parameter is awkward. For example, if caller does not care, it still needs to provide on-stack storage for the value it does not need. None of the many FOO_getname() functions of various protocols ever used old value of *socklen. They always just overwrite it. This change drops this parameter, and makes all these functions, on success, return length of sockaddr. It's always >= 0 and can be differentiated from an error. Tests in callers are changed from "if (err)" to "if (err < 0)", where needed. rpc_sockname() lost "int buflen" parameter, since its only use was to be passed to kernel_getsockname() as &buflen and subsequently not used in any way. Userspace API is not changed. text data bss dec hex filename 30108430 2633624 873672 33615726 200ef6e vmlinux.before.o 30108109 2633612 873672 33615393 200ee21 vmlinux.o Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com> CC: David S. Miller <davem@davemloft.net> CC: linux-kernel@vger.kernel.org CC: netdev@vger.kernel.org CC: linux-bluetooth@vger.kernel.org CC: linux-decnet-user@lists.sourceforge.net CC: linux-wireless@vger.kernel.org CC: linux-rdma@vger.kernel.org CC: linux-sctp@vger.kernel.org CC: linux-nfs@vger.kernel.org CC: linux-x25@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
2018-02-13 03:00:20 +08:00
err = sizeof (struct full_sockaddr_ax25);
out:
release_sock(sk);
return err;
}
static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
DECLARE_SOCKADDR(struct sockaddr_ax25 *, usax, msg->msg_name);
struct sock *sk = sock->sk;
struct sockaddr_ax25 sax;
struct sk_buff *skb;
ax25_digi dtmp, *dp;
ax25_cb *ax25;
size_t size;
int lv, err, addr_len = msg->msg_namelen;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
lock_sock(sk);
ax25 = sk_to_ax25(sk);
if (sock_flag(sk, SOCK_ZAPPED)) {
err = -EADDRNOTAVAIL;
goto out;
}
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
goto out;
}
if (ax25->ax25_dev == NULL) {
err = -ENETUNREACH;
goto out;
}
if (len > ax25->ax25_dev->dev->mtu) {
err = -EMSGSIZE;
goto out;
}
if (usax != NULL) {
if (usax->sax25_family != AF_AX25) {
err = -EINVAL;
goto out;
}
if (addr_len == sizeof(struct sockaddr_ax25))
/* ax25_sendmsg(): uses obsolete socket structure */
;
else if (addr_len != sizeof(struct full_sockaddr_ax25))
/* support for old structure may go away some time
* ax25_sendmsg(): uses old (6 digipeater)
* socket structure.
*/
if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) ||
(addr_len > sizeof(struct full_sockaddr_ax25))) {
err = -EINVAL;
goto out;
}
if (addr_len > sizeof(struct sockaddr_ax25) && usax->sax25_ndigis != 0) {
int ct = 0;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
/* Valid number of digipeaters ? */
if (usax->sax25_ndigis < 1 ||
usax->sax25_ndigis > AX25_MAX_DIGIS ||
addr_len < sizeof(struct sockaddr_ax25) +
sizeof(ax25_address) * usax->sax25_ndigis) {
err = -EINVAL;
goto out;
}
dtmp.ndigi = usax->sax25_ndigis;
while (ct < usax->sax25_ndigis) {
dtmp.repeated[ct] = 0;
dtmp.calls[ct] = fsa->fsa_digipeater[ct];
ct++;
}
dtmp.lastrepeat = 0;
}
sax = *usax;
if (sk->sk_type == SOCK_SEQPACKET &&
ax25cmp(&ax25->dest_addr, &sax.sax25_call)) {
err = -EISCONN;
goto out;
}
if (usax->sax25_ndigis == 0)
dp = NULL;
else
dp = &dtmp;
} else {
/*
* FIXME: 1003.1g - if the socket is like this because
* it has become closed (not started closed) and is VC
* we ought to SIGPIPE, EPIPE
*/
if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
sax.sax25_family = AF_AX25;
sax.sax25_call = ax25->dest_addr;
dp = ax25->digipeat;
}
/* Build a packet */
/* Assume the worst case */
size = len + ax25->ax25_dev->dev->hard_header_len;
skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
skb_reserve(skb, size - len);
/* User data follows immediately after the AX.25 data */
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
err = -EFAULT;
kfree_skb(skb);
goto out;
}
skb_reset_network_header(skb);
/* Add the PID if one is not supplied by the user in the skb */
if (!ax25->pidincl)
*(u8 *)skb_push(skb, 1) = sk->sk_protocol;
if (sk->sk_type == SOCK_SEQPACKET) {
/* Connected mode sockets go via the LAPB machine */
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
err = -ENOTCONN;
goto out;
}
/* Shove it onto the queue and kick */
ax25_output(ax25, ax25->paclen, skb);
err = len;
goto out;
}
skb_push(skb, 1 + ax25_addr_size(dp));
/* Building AX.25 Header */
/* Build an AX.25 header */
lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call,
dp, AX25_COMMAND, AX25_MODULUS);
skb_set_transport_header(skb, lv);
*skb_transport_header(skb) = AX25_UI;
/* Datagram frames go straight out of the door as UI */
ax25_queue_xmit(skb, ax25->ax25_dev->dev);
err = len;
out:
release_sock(sk);
return err;
}
static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
net: ax25: Fix deadlock caused by skb_recv_datagram in ax25_recvmsg The skb_recv_datagram() in ax25_recvmsg() will hold lock_sock and block until it receives a packet from the remote. If the client doesn`t connect to server and calls read() directly, it will not receive any packets forever. As a result, the deadlock will happen. The fail log caused by deadlock is shown below: [ 369.606973] INFO: task ax25_deadlock:157 blocked for more than 245 seconds. [ 369.608919] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 369.613058] Call Trace: [ 369.613315] <TASK> [ 369.614072] __schedule+0x2f9/0xb20 [ 369.615029] schedule+0x49/0xb0 [ 369.615734] __lock_sock+0x92/0x100 [ 369.616763] ? destroy_sched_domains_rcu+0x20/0x20 [ 369.617941] lock_sock_nested+0x6e/0x70 [ 369.618809] ax25_bind+0xaa/0x210 [ 369.619736] __sys_bind+0xca/0xf0 [ 369.620039] ? do_futex+0xae/0x1b0 [ 369.620387] ? __x64_sys_futex+0x7c/0x1c0 [ 369.620601] ? fpregs_assert_state_consistent+0x19/0x40 [ 369.620613] __x64_sys_bind+0x11/0x20 [ 369.621791] do_syscall_64+0x3b/0x90 [ 369.622423] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 369.623319] RIP: 0033:0x7f43c8aa8af7 [ 369.624301] RSP: 002b:00007f43c8197ef8 EFLAGS: 00000246 ORIG_RAX: 0000000000000031 [ 369.625756] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f43c8aa8af7 [ 369.626724] RDX: 0000000000000010 RSI: 000055768e2021d0 RDI: 0000000000000005 [ 369.628569] RBP: 00007f43c8197f00 R08: 0000000000000011 R09: 00007f43c8198700 [ 369.630208] R10: 0000000000000000 R11: 0000000000000246 R12: 00007fff845e6afe [ 369.632240] R13: 00007fff845e6aff R14: 00007f43c8197fc0 R15: 00007f43c8198700 This patch replaces skb_recv_datagram() with an open-coded variant of it releasing the socket lock before the __skb_wait_for_more_packets() call and re-acquiring it after such call in order that other functions that need socket lock could be executed. what's more, the socket lock will be released only when recvmsg() will block and that should produce nicer overall behavior. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Suggested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Reported-by: Thomas Habets <thomas@@habets.se> Acked-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-06-14 17:25:57 +08:00
struct sk_buff *skb, *last;
struct sk_buff_head *sk_queue;
int copied;
int err = 0;
net: ax25: Fix deadlock caused by skb_recv_datagram in ax25_recvmsg The skb_recv_datagram() in ax25_recvmsg() will hold lock_sock and block until it receives a packet from the remote. If the client doesn`t connect to server and calls read() directly, it will not receive any packets forever. As a result, the deadlock will happen. The fail log caused by deadlock is shown below: [ 369.606973] INFO: task ax25_deadlock:157 blocked for more than 245 seconds. [ 369.608919] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 369.613058] Call Trace: [ 369.613315] <TASK> [ 369.614072] __schedule+0x2f9/0xb20 [ 369.615029] schedule+0x49/0xb0 [ 369.615734] __lock_sock+0x92/0x100 [ 369.616763] ? destroy_sched_domains_rcu+0x20/0x20 [ 369.617941] lock_sock_nested+0x6e/0x70 [ 369.618809] ax25_bind+0xaa/0x210 [ 369.619736] __sys_bind+0xca/0xf0 [ 369.620039] ? do_futex+0xae/0x1b0 [ 369.620387] ? __x64_sys_futex+0x7c/0x1c0 [ 369.620601] ? fpregs_assert_state_consistent+0x19/0x40 [ 369.620613] __x64_sys_bind+0x11/0x20 [ 369.621791] do_syscall_64+0x3b/0x90 [ 369.622423] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 369.623319] RIP: 0033:0x7f43c8aa8af7 [ 369.624301] RSP: 002b:00007f43c8197ef8 EFLAGS: 00000246 ORIG_RAX: 0000000000000031 [ 369.625756] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f43c8aa8af7 [ 369.626724] RDX: 0000000000000010 RSI: 000055768e2021d0 RDI: 0000000000000005 [ 369.628569] RBP: 00007f43c8197f00 R08: 0000000000000011 R09: 00007f43c8198700 [ 369.630208] R10: 0000000000000000 R11: 0000000000000246 R12: 00007fff845e6afe [ 369.632240] R13: 00007fff845e6aff R14: 00007f43c8197fc0 R15: 00007f43c8198700 This patch replaces skb_recv_datagram() with an open-coded variant of it releasing the socket lock before the __skb_wait_for_more_packets() call and re-acquiring it after such call in order that other functions that need socket lock could be executed. what's more, the socket lock will be released only when recvmsg() will block and that should produce nicer overall behavior. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Suggested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Reported-by: Thomas Habets <thomas@@habets.se> Acked-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-06-14 17:25:57 +08:00
int off = 0;
long timeo;
lock_sock(sk);
/*
* This works for seqpacket too. The receiver has ordered the
* queue for us! We do one quick check first though
*/
if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
net: ax25: Fix deadlock caused by skb_recv_datagram in ax25_recvmsg The skb_recv_datagram() in ax25_recvmsg() will hold lock_sock and block until it receives a packet from the remote. If the client doesn`t connect to server and calls read() directly, it will not receive any packets forever. As a result, the deadlock will happen. The fail log caused by deadlock is shown below: [ 369.606973] INFO: task ax25_deadlock:157 blocked for more than 245 seconds. [ 369.608919] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 369.613058] Call Trace: [ 369.613315] <TASK> [ 369.614072] __schedule+0x2f9/0xb20 [ 369.615029] schedule+0x49/0xb0 [ 369.615734] __lock_sock+0x92/0x100 [ 369.616763] ? destroy_sched_domains_rcu+0x20/0x20 [ 369.617941] lock_sock_nested+0x6e/0x70 [ 369.618809] ax25_bind+0xaa/0x210 [ 369.619736] __sys_bind+0xca/0xf0 [ 369.620039] ? do_futex+0xae/0x1b0 [ 369.620387] ? __x64_sys_futex+0x7c/0x1c0 [ 369.620601] ? fpregs_assert_state_consistent+0x19/0x40 [ 369.620613] __x64_sys_bind+0x11/0x20 [ 369.621791] do_syscall_64+0x3b/0x90 [ 369.622423] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 369.623319] RIP: 0033:0x7f43c8aa8af7 [ 369.624301] RSP: 002b:00007f43c8197ef8 EFLAGS: 00000246 ORIG_RAX: 0000000000000031 [ 369.625756] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f43c8aa8af7 [ 369.626724] RDX: 0000000000000010 RSI: 000055768e2021d0 RDI: 0000000000000005 [ 369.628569] RBP: 00007f43c8197f00 R08: 0000000000000011 R09: 00007f43c8198700 [ 369.630208] R10: 0000000000000000 R11: 0000000000000246 R12: 00007fff845e6afe [ 369.632240] R13: 00007fff845e6aff R14: 00007f43c8197fc0 R15: 00007f43c8198700 This patch replaces skb_recv_datagram() with an open-coded variant of it releasing the socket lock before the __skb_wait_for_more_packets() call and re-acquiring it after such call in order that other functions that need socket lock could be executed. what's more, the socket lock will be released only when recvmsg() will block and that should produce nicer overall behavior. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Suggested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Reported-by: Thomas Habets <thomas@@habets.se> Acked-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-06-14 17:25:57 +08:00
/* We need support for non-blocking reads. */
sk_queue = &sk->sk_receive_queue;
skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off, &err, &last);
/* If no packet is available, release_sock(sk) and try again. */
if (!skb) {
if (err != -EAGAIN)
goto out;
release_sock(sk);
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
while (timeo && !__skb_wait_for_more_packets(sk, sk_queue, &err,
&timeo, last)) {
skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off,
&err, &last);
if (skb)
break;
if (err != -EAGAIN)
goto done;
}
if (!skb)
goto done;
lock_sock(sk);
}
if (!sk_to_ax25(sk)->pidincl)
skb_pull(skb, 1); /* Remove PID */
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_msg(skb, 0, msg, copied);
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <davem@davemloft.net> Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2013-11-21 10:14:22 +08:00
if (msg->msg_name) {
ax25_digi digi;
ax25_address src;
const unsigned char *mac = skb_mac_header(skb);
DECLARE_SOCKADDR(struct sockaddr_ax25 *, sax, msg->msg_name);
memset(sax, 0, sizeof(struct full_sockaddr_ax25));
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
&digi, NULL, NULL);
sax->sax25_family = AF_AX25;
/* We set this correctly, even though we may not let the
application know the digi calls further down (because it
did NOT ask to know them). This could get political... **/
sax->sax25_ndigis = digi.ndigi;
sax->sax25_call = src;
if (sax->sax25_ndigis != 0) {
int ct;
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax;
for (ct = 0; ct < digi.ndigi; ct++)
fsa->fsa_digipeater[ct] = digi.calls[ct];
}
msg->msg_namelen = sizeof(struct full_sockaddr_ax25);
}
skb_free_datagram(sk, skb);
err = copied;
out:
release_sock(sk);
net: ax25: Fix deadlock caused by skb_recv_datagram in ax25_recvmsg The skb_recv_datagram() in ax25_recvmsg() will hold lock_sock and block until it receives a packet from the remote. If the client doesn`t connect to server and calls read() directly, it will not receive any packets forever. As a result, the deadlock will happen. The fail log caused by deadlock is shown below: [ 369.606973] INFO: task ax25_deadlock:157 blocked for more than 245 seconds. [ 369.608919] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 369.613058] Call Trace: [ 369.613315] <TASK> [ 369.614072] __schedule+0x2f9/0xb20 [ 369.615029] schedule+0x49/0xb0 [ 369.615734] __lock_sock+0x92/0x100 [ 369.616763] ? destroy_sched_domains_rcu+0x20/0x20 [ 369.617941] lock_sock_nested+0x6e/0x70 [ 369.618809] ax25_bind+0xaa/0x210 [ 369.619736] __sys_bind+0xca/0xf0 [ 369.620039] ? do_futex+0xae/0x1b0 [ 369.620387] ? __x64_sys_futex+0x7c/0x1c0 [ 369.620601] ? fpregs_assert_state_consistent+0x19/0x40 [ 369.620613] __x64_sys_bind+0x11/0x20 [ 369.621791] do_syscall_64+0x3b/0x90 [ 369.622423] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 369.623319] RIP: 0033:0x7f43c8aa8af7 [ 369.624301] RSP: 002b:00007f43c8197ef8 EFLAGS: 00000246 ORIG_RAX: 0000000000000031 [ 369.625756] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f43c8aa8af7 [ 369.626724] RDX: 0000000000000010 RSI: 000055768e2021d0 RDI: 0000000000000005 [ 369.628569] RBP: 00007f43c8197f00 R08: 0000000000000011 R09: 00007f43c8198700 [ 369.630208] R10: 0000000000000000 R11: 0000000000000246 R12: 00007fff845e6afe [ 369.632240] R13: 00007fff845e6aff R14: 00007f43c8197fc0 R15: 00007f43c8198700 This patch replaces skb_recv_datagram() with an open-coded variant of it releasing the socket lock before the __skb_wait_for_more_packets() call and re-acquiring it after such call in order that other functions that need socket lock could be executed. what's more, the socket lock will be released only when recvmsg() will block and that should produce nicer overall behavior. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Suggested-by: Thomas Osterried <thomas@osterried.de> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Reported-by: Thomas Habets <thomas@@habets.se> Acked-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-06-14 17:25:57 +08:00
done:
return err;
}
static int ax25_shutdown(struct socket *sk, int how)
{
/* FIXME - generate DM and RNR states */
return -EOPNOTSUPP;
}
static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
int res = 0;
lock_sock(sk);
switch (cmd) {
case TIOCOUTQ: {
long amount;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
res = put_user(amount, (int __user *)argp);
break;
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
res = put_user(amount, (int __user *) argp);
break;
}
case SIOCAX25ADDUID: /* Add a uid to the uid/call map table */
case SIOCAX25DELUID: /* Delete a uid from the uid/call map table */
case SIOCAX25GETUID: {
struct sockaddr_ax25 sax25;
if (copy_from_user(&sax25, argp, sizeof(sax25))) {
res = -EFAULT;
break;
}
res = ax25_uid_ioctl(cmd, &sax25);
break;
}
case SIOCAX25NOUID: { /* Set the default policy (default/bar) */
long amount;
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
if (get_user(amount, (long __user *)argp)) {
res = -EFAULT;
break;
}
if (amount < 0 || amount > AX25_NOUID_BLOCK) {
res = -EINVAL;
break;
}
ax25_uid_policy = amount;
res = 0;
break;
}
case SIOCADDRT:
case SIOCDELRT:
case SIOCAX25OPTRT:
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
res = ax25_rt_ioctl(cmd, argp);
break;
case SIOCAX25CTLCON:
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
res = ax25_ctl_ioctl(cmd, argp);
break;
case SIOCAX25GETINFO:
case SIOCAX25GETINFOOLD: {
ax25_cb *ax25 = sk_to_ax25(sk);
struct ax25_info_struct ax25_info;
ax25_info.t1 = ax25->t1 / HZ;
ax25_info.t2 = ax25->t2 / HZ;
ax25_info.t3 = ax25->t3 / HZ;
ax25_info.idle = ax25->idle / (60 * HZ);
ax25_info.n2 = ax25->n2;
ax25_info.t1timer = ax25_display_timer(&ax25->t1timer) / HZ;
ax25_info.t2timer = ax25_display_timer(&ax25->t2timer) / HZ;
ax25_info.t3timer = ax25_display_timer(&ax25->t3timer) / HZ;
ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ);
ax25_info.n2count = ax25->n2count;
ax25_info.state = ax25->state;
ax25_info.rcv_q = sk_rmem_alloc_get(sk);
ax25_info.snd_q = sk_wmem_alloc_get(sk);
ax25_info.vs = ax25->vs;
ax25_info.vr = ax25->vr;
ax25_info.va = ax25->va;
ax25_info.vs_max = ax25->vs; /* reserved */
ax25_info.paclen = ax25->paclen;
ax25_info.window = ax25->window;
/* old structure? */
if (cmd == SIOCAX25GETINFOOLD) {
static int warned = 0;
if (!warned) {
printk(KERN_INFO "%s uses old SIOCAX25GETINFO\n",
current->comm);
warned=1;
}
if (copy_to_user(argp, &ax25_info, sizeof(struct ax25_info_struct_deprecated))) {
res = -EFAULT;
break;
}
} else {
if (copy_to_user(argp, &ax25_info, sizeof(struct ax25_info_struct))) {
res = -EINVAL;
break;
}
}
res = 0;
break;
}
case SIOCAX25ADDFWD:
case SIOCAX25DELFWD: {
struct ax25_fwd_struct ax25_fwd;
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
if (copy_from_user(&ax25_fwd, argp, sizeof(ax25_fwd))) {
res = -EFAULT;
break;
}
res = ax25_fwd_ioctl(cmd, &ax25_fwd);
break;
}
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
res = -EINVAL;
break;
default:
res = -ENOIOCTLCMD;
break;
}
release_sock(sk);
return res;
}
#ifdef CONFIG_PROC_FS
static void *ax25_info_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_list_lock)
{
spin_lock_bh(&ax25_list_lock);
return seq_hlist_start(&ax25_list, *pos);
}
static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &ax25_list, pos);
}
static void ax25_info_stop(struct seq_file *seq, void *v)
__releases(ax25_list_lock)
{
spin_unlock_bh(&ax25_list_lock);
}
static int ax25_info_show(struct seq_file *seq, void *v)
{
ax25_cb *ax25 = hlist_entry(v, struct ax25_cb, ax25_node);
char buf[11];
int k;
/*
* New format:
* magic dev src_addr dest_addr,digi1,digi2,.. st vs vr va t1 t1 t2 t2 t3 t3 idle idle n2 n2 rtt window paclen Snd-Q Rcv-Q inode
*/
seq_printf(seq, "%p %s %s%s ",
ax25,
ax25->ax25_dev == NULL? "???" : ax25->ax25_dev->dev->name,
ax2asc(buf, &ax25->source_addr),
ax25->iamdigi? "*":"");
seq_printf(seq, "%s", ax2asc(buf, &ax25->dest_addr));
for (k=0; (ax25->digipeat != NULL) && (k < ax25->digipeat->ndigi); k++) {
seq_printf(seq, ",%s%s",
ax2asc(buf, &ax25->digipeat->calls[k]),
ax25->digipeat->repeated[k]? "*":"");
}
seq_printf(seq, " %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %d %d",
ax25->state,
ax25->vs, ax25->vr, ax25->va,
ax25_display_timer(&ax25->t1timer) / HZ, ax25->t1 / HZ,
ax25_display_timer(&ax25->t2timer) / HZ, ax25->t2 / HZ,
ax25_display_timer(&ax25->t3timer) / HZ, ax25->t3 / HZ,
ax25_display_timer(&ax25->idletimer) / (60 * HZ),
ax25->idle / (60 * HZ),
ax25->n2count, ax25->n2,
ax25->rtt / HZ,
ax25->window,
ax25->paclen);
if (ax25->sk != NULL) {
seq_printf(seq, " %d %d %lu\n",
sk_wmem_alloc_get(ax25->sk),
sk_rmem_alloc_get(ax25->sk),
sock_i_ino(ax25->sk));
} else {
seq_puts(seq, " * * *\n");
}
return 0;
}
static const struct seq_operations ax25_info_seqops = {
.start = ax25_info_start,
.next = ax25_info_next,
.stop = ax25_info_stop,
.show = ax25_info_show,
};
#endif
static const struct net_proto_family ax25_family_ops = {
.family = PF_AX25,
.create = ax25_create,
.owner = THIS_MODULE,
};
static const struct proto_ops ax25_proto_ops = {
.family = PF_AX25,
.owner = THIS_MODULE,
.release = ax25_release,
.bind = ax25_bind,
.connect = ax25_connect,
.socketpair = sock_no_socketpair,
.accept = ax25_accept,
.getname = ax25_getname,
.poll = datagram_poll,
.ioctl = ax25_ioctl,
.gettstamp = sock_gettstamp,
.listen = ax25_listen,
.shutdown = ax25_shutdown,
.setsockopt = ax25_setsockopt,
.getsockopt = ax25_getsockopt,
.sendmsg = ax25_sendmsg,
.recvmsg = ax25_recvmsg,
.mmap = sock_no_mmap,
};
/*
* Called by socket.c on kernel start up
*/
static struct packet_type ax25_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_AX25),
.func = ax25_kiss_rcv,
};
static struct notifier_block ax25_dev_notifier = {
.notifier_call = ax25_device_event,
};
static int __init ax25_init(void)
{
int rc = proto_register(&ax25_proto, 0);
if (rc != 0)
goto out;
sock_register(&ax25_family_ops);
dev_add_pack(&ax25_packet_type);
register_netdevice_notifier(&ax25_dev_notifier);
proc_create_seq("ax25_route", 0444, init_net.proc_net, &ax25_rt_seqops);
proc_create_seq("ax25", 0444, init_net.proc_net, &ax25_info_seqops);
proc_create_seq("ax25_calls", 0444, init_net.proc_net,
&ax25_uid_seqops);
out:
return rc;
}
module_init(ax25_init);
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The amateur radio AX.25 link layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_AX25);
static void __exit ax25_exit(void)
{
remove_proc_entry("ax25_route", init_net.proc_net);
remove_proc_entry("ax25", init_net.proc_net);
remove_proc_entry("ax25_calls", init_net.proc_net);
unregister_netdevice_notifier(&ax25_dev_notifier);
dev_remove_pack(&ax25_packet_type);
sock_unregister(PF_AX25);
proto_unregister(&ax25_proto);
ax25_rt_free();
ax25_uid_free();
ax25_dev_free();
}
module_exit(ax25_exit);