linux/fs/ksmbd/transport_ipc.c

921 lines
22 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2018 Samsung Electronics Co., Ltd.
*/
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/hashtable.h>
#include <net/net_namespace.h>
#include <net/genetlink.h>
#include <linux/socket.h>
#include <linux/workqueue.h>
#include "vfs_cache.h"
#include "transport_ipc.h"
#include "server.h"
#include "smb_common.h"
#include "mgmt/user_config.h"
#include "mgmt/share_config.h"
#include "mgmt/user_session.h"
#include "mgmt/tree_connect.h"
#include "mgmt/ksmbd_ida.h"
#include "connection.h"
#include "transport_tcp.h"
#include "transport_rdma.h"
#define IPC_WAIT_TIMEOUT (2 * HZ)
#define IPC_MSG_HASH_BITS 3
static DEFINE_HASHTABLE(ipc_msg_table, IPC_MSG_HASH_BITS);
static DECLARE_RWSEM(ipc_msg_table_lock);
static DEFINE_MUTEX(startup_lock);
static DEFINE_IDA(ipc_ida);
static unsigned int ksmbd_tools_pid;
static bool ksmbd_ipc_validate_version(struct genl_info *m)
{
if (m->genlhdr->version != KSMBD_GENL_VERSION) {
pr_err("%s. ksmbd: %d, kernel module: %d. %s.\n",
"Daemon and kernel module version mismatch",
m->genlhdr->version,
KSMBD_GENL_VERSION,
"User-space ksmbd should terminate");
return false;
}
return true;
}
struct ksmbd_ipc_msg {
unsigned int type;
unsigned int sz;
unsigned char payload[];
};
struct ipc_msg_table_entry {
unsigned int handle;
unsigned int type;
wait_queue_head_t wait;
struct hlist_node ipc_table_hlist;
void *response;
unsigned int msg_sz;
};
static struct delayed_work ipc_timer_work;
static int handle_startup_event(struct sk_buff *skb, struct genl_info *info);
static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info);
static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
static int ksmbd_ipc_heartbeat_request(void);
ksmbd: fix global oob in ksmbd_nl_policy [ Upstream commit ebeae8adf89d9a82359f6659b1663d09beec2faa ] Similar to a reported issue (check the commit b33fb5b801c6 ("net: qualcomm: rmnet: fix global oob in rmnet_policy"), my local fuzzer finds another global out-of-bounds read for policy ksmbd_nl_policy. See bug trace below: ================================================================== BUG: KASAN: global-out-of-bounds in validate_nla lib/nlattr.c:386 [inline] BUG: KASAN: global-out-of-bounds in __nla_validate_parse+0x24af/0x2750 lib/nlattr.c:600 Read of size 1 at addr ffffffff8f24b100 by task syz-executor.1/62810 CPU: 0 PID: 62810 Comm: syz-executor.1 Tainted: G N 6.1.0 #3 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x8b/0xb3 lib/dump_stack.c:106 print_address_description mm/kasan/report.c:284 [inline] print_report+0x172/0x475 mm/kasan/report.c:395 kasan_report+0xbb/0x1c0 mm/kasan/report.c:495 validate_nla lib/nlattr.c:386 [inline] __nla_validate_parse+0x24af/0x2750 lib/nlattr.c:600 __nla_parse+0x3e/0x50 lib/nlattr.c:697 __nlmsg_parse include/net/netlink.h:748 [inline] genl_family_rcv_msg_attrs_parse.constprop.0+0x1b0/0x290 net/netlink/genetlink.c:565 genl_family_rcv_msg_doit+0xda/0x330 net/netlink/genetlink.c:734 genl_family_rcv_msg net/netlink/genetlink.c:833 [inline] genl_rcv_msg+0x441/0x780 net/netlink/genetlink.c:850 netlink_rcv_skb+0x14f/0x410 net/netlink/af_netlink.c:2540 genl_rcv+0x24/0x40 net/netlink/genetlink.c:861 netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline] netlink_unicast+0x54e/0x800 net/netlink/af_netlink.c:1345 netlink_sendmsg+0x930/0xe50 net/netlink/af_netlink.c:1921 sock_sendmsg_nosec net/socket.c:714 [inline] sock_sendmsg+0x154/0x190 net/socket.c:734 ____sys_sendmsg+0x6df/0x840 net/socket.c:2482 ___sys_sendmsg+0x110/0x1b0 net/socket.c:2536 __sys_sendmsg+0xf3/0x1c0 net/socket.c:2565 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3b/0x90 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x7fdd66a8f359 Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 f1 19 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fdd65e00168 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 00007fdd66bbcf80 RCX: 00007fdd66a8f359 RDX: 0000000000000000 RSI: 0000000020000500 RDI: 0000000000000003 RBP: 00007fdd66ada493 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00007ffc84b81aff R14: 00007fdd65e00300 R15: 0000000000022000 </TASK> The buggy address belongs to the variable: ksmbd_nl_policy+0x100/0xa80 The buggy address belongs to the physical page: page:0000000034f47940 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x1ccc4b flags: 0x200000000001000(reserved|node=0|zone=2) raw: 0200000000001000 ffffea00073312c8 ffffea00073312c8 0000000000000000 raw: 0000000000000000 0000000000000000 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffffffff8f24b000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffffffff8f24b080: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 >ffffffff8f24b100: f9 f9 f9 f9 00 00 f9 f9 f9 f9 f9 f9 00 00 07 f9 ^ ffffffff8f24b180: f9 f9 f9 f9 00 05 f9 f9 f9 f9 f9 f9 00 00 00 05 ffffffff8f24b200: f9 f9 f9 f9 00 00 03 f9 f9 f9 f9 f9 00 00 04 f9 ================================================================== To fix it, add a placeholder named __KSMBD_EVENT_MAX and let KSMBD_EVENT_MAX to be its original value - 1 according to what other netlink families do. Also change two sites that refer the KSMBD_EVENT_MAX to correct value. Cc: stable@vger.kernel.org Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers") Signed-off-by: Lin Ma <linma@zju.edu.cn> Acked-by: Namjae Jeon <linkinjeon@kernel.org> Signed-off-by: Steve French <stfrench@microsoft.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
2024-01-21 15:35:06 +08:00
static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX + 1] = {
[KSMBD_EVENT_UNSPEC] = {
.len = 0,
},
[KSMBD_EVENT_HEARTBEAT_REQUEST] = {
.len = sizeof(struct ksmbd_heartbeat),
},
[KSMBD_EVENT_STARTING_UP] = {
.len = sizeof(struct ksmbd_startup_request),
},
[KSMBD_EVENT_SHUTTING_DOWN] = {
.len = sizeof(struct ksmbd_shutdown_request),
},
[KSMBD_EVENT_LOGIN_REQUEST] = {
.len = sizeof(struct ksmbd_login_request),
},
[KSMBD_EVENT_LOGIN_RESPONSE] = {
.len = sizeof(struct ksmbd_login_response),
},
[KSMBD_EVENT_SHARE_CONFIG_REQUEST] = {
.len = sizeof(struct ksmbd_share_config_request),
},
[KSMBD_EVENT_SHARE_CONFIG_RESPONSE] = {
.len = sizeof(struct ksmbd_share_config_response),
},
[KSMBD_EVENT_TREE_CONNECT_REQUEST] = {
.len = sizeof(struct ksmbd_tree_connect_request),
},
[KSMBD_EVENT_TREE_CONNECT_RESPONSE] = {
.len = sizeof(struct ksmbd_tree_connect_response),
},
[KSMBD_EVENT_TREE_DISCONNECT_REQUEST] = {
.len = sizeof(struct ksmbd_tree_disconnect_request),
},
[KSMBD_EVENT_LOGOUT_REQUEST] = {
.len = sizeof(struct ksmbd_logout_request),
},
[KSMBD_EVENT_RPC_REQUEST] = {
},
[KSMBD_EVENT_RPC_RESPONSE] = {
},
[KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST] = {
},
[KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE] = {
},
};
static struct genl_ops ksmbd_genl_ops[] = {
{
.cmd = KSMBD_EVENT_UNSPEC,
.doit = handle_unsupported_event,
},
{
.cmd = KSMBD_EVENT_HEARTBEAT_REQUEST,
.doit = handle_unsupported_event,
},
{
.cmd = KSMBD_EVENT_STARTING_UP,
.doit = handle_startup_event,
},
{
.cmd = KSMBD_EVENT_SHUTTING_DOWN,
.doit = handle_unsupported_event,
},
{
.cmd = KSMBD_EVENT_LOGIN_REQUEST,
.doit = handle_unsupported_event,
},
{
.cmd = KSMBD_EVENT_LOGIN_RESPONSE,
.doit = handle_generic_event,
},
{
.cmd = KSMBD_EVENT_SHARE_CONFIG_REQUEST,
.doit = handle_unsupported_event,
},
{
.cmd = KSMBD_EVENT_SHARE_CONFIG_RESPONSE,
.doit = handle_generic_event,
},
{
.cmd = KSMBD_EVENT_TREE_CONNECT_REQUEST,
.doit = handle_unsupported_event,
},
{
.cmd = KSMBD_EVENT_TREE_CONNECT_RESPONSE,
.doit = handle_generic_event,
},
{
.cmd = KSMBD_EVENT_TREE_DISCONNECT_REQUEST,
.doit = handle_unsupported_event,
},
{
.cmd = KSMBD_EVENT_LOGOUT_REQUEST,
.doit = handle_unsupported_event,
},
{
.cmd = KSMBD_EVENT_RPC_REQUEST,
.doit = handle_unsupported_event,
},
{
.cmd = KSMBD_EVENT_RPC_RESPONSE,
.doit = handle_generic_event,
},
{
.cmd = KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
.doit = handle_unsupported_event,
},
{
.cmd = KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE,
.doit = handle_generic_event,
},
};
static struct genl_family ksmbd_genl_family = {
.name = KSMBD_GENL_NAME,
.version = KSMBD_GENL_VERSION,
.hdrsize = 0,
.maxattr = KSMBD_EVENT_MAX,
.netnsok = true,
.module = THIS_MODULE,
.ops = ksmbd_genl_ops,
.n_ops = ARRAY_SIZE(ksmbd_genl_ops),
};
static void ksmbd_nl_init_fixup(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(ksmbd_genl_ops); i++)
ksmbd_genl_ops[i].validate = GENL_DONT_VALIDATE_STRICT |
GENL_DONT_VALIDATE_DUMP;
ksmbd_genl_family.policy = ksmbd_nl_policy;
}
static int rpc_context_flags(struct ksmbd_session *sess)
{
if (user_guest(sess->user))
return KSMBD_RPC_RESTRICTED_CONTEXT;
return 0;
}
static void ipc_update_last_active(void)
{
if (server_conf.ipc_timeout)
server_conf.ipc_last_active = jiffies;
}
static struct ksmbd_ipc_msg *ipc_msg_alloc(size_t sz)
{
struct ksmbd_ipc_msg *msg;
size_t msg_sz = sz + sizeof(struct ksmbd_ipc_msg);
msg = kvzalloc(msg_sz, GFP_KERNEL);
if (msg)
msg->sz = sz;
return msg;
}
static void ipc_msg_free(struct ksmbd_ipc_msg *msg)
{
kvfree(msg);
}
static void ipc_msg_handle_free(int handle)
{
if (handle >= 0)
ksmbd_release_id(&ipc_ida, handle);
}
static int handle_response(int type, void *payload, size_t sz)
{
unsigned int handle = *(unsigned int *)payload;
struct ipc_msg_table_entry *entry;
int ret = 0;
ipc_update_last_active();
down_read(&ipc_msg_table_lock);
hash_for_each_possible(ipc_msg_table, entry, ipc_table_hlist, handle) {
if (handle != entry->handle)
continue;
entry->response = NULL;
/*
* Response message type value should be equal to
* request message type + 1.
*/
if (entry->type + 1 != type) {
pr_err("Waiting for IPC type %d, got %d. Ignore.\n",
entry->type + 1, type);
}
entry->response = kvzalloc(sz, GFP_KERNEL);
if (!entry->response) {
ret = -ENOMEM;
break;
}
memcpy(entry->response, payload, sz);
entry->msg_sz = sz;
wake_up_interruptible(&entry->wait);
ret = 0;
break;
}
up_read(&ipc_msg_table_lock);
return ret;
}
static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
{
int ret;
ksmbd_set_fd_limit(req->file_max);
server_conf.flags = req->flags;
server_conf.signing = req->signing;
server_conf.tcp_port = req->tcp_port;
server_conf.ipc_timeout = req->ipc_timeout * HZ;
server_conf.deadtime = req->deadtime * SMB_ECHO_INTERVAL;
server_conf.share_fake_fscaps = req->share_fake_fscaps;
ksmbd_init_domain(req->sub_auth);
if (req->smb2_max_read)
init_smb2_max_read_size(req->smb2_max_read);
if (req->smb2_max_write)
init_smb2_max_write_size(req->smb2_max_write);
if (req->smb2_max_trans)
init_smb2_max_trans_size(req->smb2_max_trans);
if (req->smb2_max_credits)
init_smb2_max_credits(req->smb2_max_credits);
if (req->smbd_max_io_size)
init_smbd_max_io_size(req->smbd_max_io_size);
if (req->max_connections)
server_conf.max_connections = req->max_connections;
ret = ksmbd_set_netbios_name(req->netbios_name);
ret |= ksmbd_set_server_string(req->server_string);
ret |= ksmbd_set_work_group(req->work_group);
ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req),
req->ifc_list_sz);
if (ret) {
pr_err("Server configuration error: %s %s %s\n",
req->netbios_name, req->server_string,
req->work_group);
return ret;
}
if (req->min_prot[0]) {
ret = ksmbd_lookup_protocol_idx(req->min_prot);
if (ret >= 0)
server_conf.min_protocol = ret;
}
if (req->max_prot[0]) {
ret = ksmbd_lookup_protocol_idx(req->max_prot);
if (ret >= 0)
server_conf.max_protocol = ret;
}
if (server_conf.ipc_timeout)
schedule_delayed_work(&ipc_timer_work, server_conf.ipc_timeout);
return 0;
}
static int handle_startup_event(struct sk_buff *skb, struct genl_info *info)
{
int ret = 0;
#ifdef CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
#endif
if (!ksmbd_ipc_validate_version(info))
return -EINVAL;
if (!info->attrs[KSMBD_EVENT_STARTING_UP])
return -EINVAL;
mutex_lock(&startup_lock);
if (!ksmbd_server_configurable()) {
mutex_unlock(&startup_lock);
pr_err("Server reset is in progress, can't start daemon\n");
return -EINVAL;
}
if (ksmbd_tools_pid) {
if (ksmbd_ipc_heartbeat_request() == 0) {
ret = -EINVAL;
goto out;
}
pr_err("Reconnect to a new user space daemon\n");
} else {
struct ksmbd_startup_request *req;
req = nla_data(info->attrs[info->genlhdr->cmd]);
ret = ipc_server_config_on_startup(req);
if (ret)
goto out;
server_queue_ctrl_init_work();
}
ksmbd_tools_pid = info->snd_portid;
ipc_update_last_active();
out:
mutex_unlock(&startup_lock);
return ret;
}
static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info)
{
pr_err("Unknown IPC event: %d, ignore.\n", info->genlhdr->cmd);
return -EINVAL;
}
static int handle_generic_event(struct sk_buff *skb, struct genl_info *info)
{
void *payload;
int sz;
int type = info->genlhdr->cmd;
#ifdef CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
#endif
ksmbd: fix global oob in ksmbd_nl_policy [ Upstream commit ebeae8adf89d9a82359f6659b1663d09beec2faa ] Similar to a reported issue (check the commit b33fb5b801c6 ("net: qualcomm: rmnet: fix global oob in rmnet_policy"), my local fuzzer finds another global out-of-bounds read for policy ksmbd_nl_policy. See bug trace below: ================================================================== BUG: KASAN: global-out-of-bounds in validate_nla lib/nlattr.c:386 [inline] BUG: KASAN: global-out-of-bounds in __nla_validate_parse+0x24af/0x2750 lib/nlattr.c:600 Read of size 1 at addr ffffffff8f24b100 by task syz-executor.1/62810 CPU: 0 PID: 62810 Comm: syz-executor.1 Tainted: G N 6.1.0 #3 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x8b/0xb3 lib/dump_stack.c:106 print_address_description mm/kasan/report.c:284 [inline] print_report+0x172/0x475 mm/kasan/report.c:395 kasan_report+0xbb/0x1c0 mm/kasan/report.c:495 validate_nla lib/nlattr.c:386 [inline] __nla_validate_parse+0x24af/0x2750 lib/nlattr.c:600 __nla_parse+0x3e/0x50 lib/nlattr.c:697 __nlmsg_parse include/net/netlink.h:748 [inline] genl_family_rcv_msg_attrs_parse.constprop.0+0x1b0/0x290 net/netlink/genetlink.c:565 genl_family_rcv_msg_doit+0xda/0x330 net/netlink/genetlink.c:734 genl_family_rcv_msg net/netlink/genetlink.c:833 [inline] genl_rcv_msg+0x441/0x780 net/netlink/genetlink.c:850 netlink_rcv_skb+0x14f/0x410 net/netlink/af_netlink.c:2540 genl_rcv+0x24/0x40 net/netlink/genetlink.c:861 netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline] netlink_unicast+0x54e/0x800 net/netlink/af_netlink.c:1345 netlink_sendmsg+0x930/0xe50 net/netlink/af_netlink.c:1921 sock_sendmsg_nosec net/socket.c:714 [inline] sock_sendmsg+0x154/0x190 net/socket.c:734 ____sys_sendmsg+0x6df/0x840 net/socket.c:2482 ___sys_sendmsg+0x110/0x1b0 net/socket.c:2536 __sys_sendmsg+0xf3/0x1c0 net/socket.c:2565 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3b/0x90 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x7fdd66a8f359 Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 f1 19 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fdd65e00168 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 00007fdd66bbcf80 RCX: 00007fdd66a8f359 RDX: 0000000000000000 RSI: 0000000020000500 RDI: 0000000000000003 RBP: 00007fdd66ada493 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00007ffc84b81aff R14: 00007fdd65e00300 R15: 0000000000022000 </TASK> The buggy address belongs to the variable: ksmbd_nl_policy+0x100/0xa80 The buggy address belongs to the physical page: page:0000000034f47940 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x1ccc4b flags: 0x200000000001000(reserved|node=0|zone=2) raw: 0200000000001000 ffffea00073312c8 ffffea00073312c8 0000000000000000 raw: 0000000000000000 0000000000000000 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffffffff8f24b000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffffffff8f24b080: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 >ffffffff8f24b100: f9 f9 f9 f9 00 00 f9 f9 f9 f9 f9 f9 00 00 07 f9 ^ ffffffff8f24b180: f9 f9 f9 f9 00 05 f9 f9 f9 f9 f9 f9 00 00 00 05 ffffffff8f24b200: f9 f9 f9 f9 00 00 03 f9 f9 f9 f9 f9 00 00 04 f9 ================================================================== To fix it, add a placeholder named __KSMBD_EVENT_MAX and let KSMBD_EVENT_MAX to be its original value - 1 according to what other netlink families do. Also change two sites that refer the KSMBD_EVENT_MAX to correct value. Cc: stable@vger.kernel.org Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers") Signed-off-by: Lin Ma <linma@zju.edu.cn> Acked-by: Namjae Jeon <linkinjeon@kernel.org> Signed-off-by: Steve French <stfrench@microsoft.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
2024-01-21 15:35:06 +08:00
if (type > KSMBD_EVENT_MAX) {
WARN_ON(1);
return -EINVAL;
}
if (!ksmbd_ipc_validate_version(info))
return -EINVAL;
if (!info->attrs[type])
return -EINVAL;
payload = nla_data(info->attrs[info->genlhdr->cmd]);
sz = nla_len(info->attrs[info->genlhdr->cmd]);
return handle_response(type, payload, sz);
}
static int ipc_msg_send(struct ksmbd_ipc_msg *msg)
{
struct genlmsghdr *nlh;
struct sk_buff *skb;
int ret = -EINVAL;
if (!ksmbd_tools_pid)
return ret;
skb = genlmsg_new(msg->sz, GFP_KERNEL);
if (!skb)
return -ENOMEM;
nlh = genlmsg_put(skb, 0, 0, &ksmbd_genl_family, 0, msg->type);
if (!nlh)
goto out;
ret = nla_put(skb, msg->type, msg->sz, msg->payload);
if (ret) {
genlmsg_cancel(skb, nlh);
goto out;
}
genlmsg_end(skb, nlh);
ret = genlmsg_unicast(&init_net, skb, ksmbd_tools_pid);
if (!ret)
ipc_update_last_active();
return ret;
out:
nlmsg_free(skb);
return ret;
}
static int ipc_validate_msg(struct ipc_msg_table_entry *entry)
{
unsigned int msg_sz = entry->msg_sz;
if (entry->type == KSMBD_EVENT_RPC_REQUEST) {
struct ksmbd_rpc_command *resp = entry->response;
msg_sz = sizeof(struct ksmbd_rpc_command) + resp->payload_sz;
} else if (entry->type == KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST) {
struct ksmbd_spnego_authen_response *resp = entry->response;
msg_sz = sizeof(struct ksmbd_spnego_authen_response) +
resp->session_key_len + resp->spnego_blob_len;
} else if (entry->type == KSMBD_EVENT_SHARE_CONFIG_REQUEST) {
struct ksmbd_share_config_response *resp = entry->response;
if (resp->payload_sz) {
if (resp->payload_sz < resp->veto_list_sz)
return -EINVAL;
msg_sz = sizeof(struct ksmbd_share_config_response) +
resp->payload_sz;
}
}
return entry->msg_sz != msg_sz ? -EINVAL : 0;
}
static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle)
{
struct ipc_msg_table_entry entry;
int ret;
if ((int)handle < 0)
return NULL;
entry.type = msg->type;
entry.response = NULL;
init_waitqueue_head(&entry.wait);
down_write(&ipc_msg_table_lock);
entry.handle = handle;
hash_add(ipc_msg_table, &entry.ipc_table_hlist, entry.handle);
up_write(&ipc_msg_table_lock);
ret = ipc_msg_send(msg);
if (ret)
goto out;
ret = wait_event_interruptible_timeout(entry.wait,
entry.response != NULL,
IPC_WAIT_TIMEOUT);
if (entry.response) {
ret = ipc_validate_msg(&entry);
if (ret) {
kvfree(entry.response);
entry.response = NULL;
}
}
out:
down_write(&ipc_msg_table_lock);
hash_del(&entry.ipc_table_hlist);
up_write(&ipc_msg_table_lock);
return entry.response;
}
static int ksmbd_ipc_heartbeat_request(void)
{
struct ksmbd_ipc_msg *msg;
int ret;
msg = ipc_msg_alloc(sizeof(struct ksmbd_heartbeat));
if (!msg)
return -EINVAL;
msg->type = KSMBD_EVENT_HEARTBEAT_REQUEST;
ret = ipc_msg_send(msg);
ipc_msg_free(msg);
return ret;
}
struct ksmbd_login_response *ksmbd_ipc_login_request(const char *account)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_login_request *req;
struct ksmbd_login_response *resp;
if (strlen(account) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
return NULL;
msg = ipc_msg_alloc(sizeof(struct ksmbd_login_request));
if (!msg)
return NULL;
msg->type = KSMBD_EVENT_LOGIN_REQUEST;
req = (struct ksmbd_login_request *)msg->payload;
req->handle = ksmbd_acquire_id(&ipc_ida);
strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_handle_free(req->handle);
ipc_msg_free(msg);
return resp;
}
struct ksmbd_spnego_authen_response *
ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_spnego_authen_request *req;
struct ksmbd_spnego_authen_response *resp;
msg = ipc_msg_alloc(sizeof(struct ksmbd_spnego_authen_request) +
blob_len + 1);
if (!msg)
return NULL;
msg->type = KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST;
req = (struct ksmbd_spnego_authen_request *)msg->payload;
req->handle = ksmbd_acquire_id(&ipc_ida);
req->spnego_blob_len = blob_len;
memcpy(req->spnego_blob, spnego_blob, blob_len);
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_handle_free(req->handle);
ipc_msg_free(msg);
return resp;
}
struct ksmbd_tree_connect_response *
ksmbd_ipc_tree_connect_request(struct ksmbd_session *sess,
struct ksmbd_share_config *share,
struct ksmbd_tree_connect *tree_conn,
struct sockaddr *peer_addr)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_tree_connect_request *req;
struct ksmbd_tree_connect_response *resp;
if (strlen(user_name(sess->user)) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
return NULL;
if (strlen(share->name) >= KSMBD_REQ_MAX_SHARE_NAME)
return NULL;
msg = ipc_msg_alloc(sizeof(struct ksmbd_tree_connect_request));
if (!msg)
return NULL;
msg->type = KSMBD_EVENT_TREE_CONNECT_REQUEST;
req = (struct ksmbd_tree_connect_request *)msg->payload;
req->handle = ksmbd_acquire_id(&ipc_ida);
req->account_flags = sess->user->flags;
req->session_id = sess->id;
req->connect_id = tree_conn->id;
strscpy(req->account, user_name(sess->user), KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
strscpy(req->share, share->name, KSMBD_REQ_MAX_SHARE_NAME);
snprintf(req->peer_addr, sizeof(req->peer_addr), "%pIS", peer_addr);
if (peer_addr->sa_family == AF_INET6)
req->flags |= KSMBD_TREE_CONN_FLAG_REQUEST_IPV6;
if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2))
req->flags |= KSMBD_TREE_CONN_FLAG_REQUEST_SMB2;
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_handle_free(req->handle);
ipc_msg_free(msg);
return resp;
}
int ksmbd_ipc_tree_disconnect_request(unsigned long long session_id,
unsigned long long connect_id)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_tree_disconnect_request *req;
int ret;
msg = ipc_msg_alloc(sizeof(struct ksmbd_tree_disconnect_request));
if (!msg)
return -ENOMEM;
msg->type = KSMBD_EVENT_TREE_DISCONNECT_REQUEST;
req = (struct ksmbd_tree_disconnect_request *)msg->payload;
req->session_id = session_id;
req->connect_id = connect_id;
ret = ipc_msg_send(msg);
ipc_msg_free(msg);
return ret;
}
int ksmbd_ipc_logout_request(const char *account, int flags)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_logout_request *req;
int ret;
if (strlen(account) >= KSMBD_REQ_MAX_ACCOUNT_NAME_SZ)
return -EINVAL;
msg = ipc_msg_alloc(sizeof(struct ksmbd_logout_request));
if (!msg)
return -ENOMEM;
msg->type = KSMBD_EVENT_LOGOUT_REQUEST;
req = (struct ksmbd_logout_request *)msg->payload;
req->account_flags = flags;
strscpy(req->account, account, KSMBD_REQ_MAX_ACCOUNT_NAME_SZ);
ret = ipc_msg_send(msg);
ipc_msg_free(msg);
return ret;
}
struct ksmbd_share_config_response *
ksmbd_ipc_share_config_request(const char *name)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_share_config_request *req;
struct ksmbd_share_config_response *resp;
if (strlen(name) >= KSMBD_REQ_MAX_SHARE_NAME)
return NULL;
msg = ipc_msg_alloc(sizeof(struct ksmbd_share_config_request));
if (!msg)
return NULL;
msg->type = KSMBD_EVENT_SHARE_CONFIG_REQUEST;
req = (struct ksmbd_share_config_request *)msg->payload;
req->handle = ksmbd_acquire_id(&ipc_ida);
strscpy(req->share_name, name, KSMBD_REQ_MAX_SHARE_NAME);
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_handle_free(req->handle);
ipc_msg_free(msg);
return resp;
}
struct ksmbd_rpc_command *ksmbd_rpc_open(struct ksmbd_session *sess, int handle)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_rpc_command *req;
struct ksmbd_rpc_command *resp;
msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
if (!msg)
return NULL;
msg->type = KSMBD_EVENT_RPC_REQUEST;
req = (struct ksmbd_rpc_command *)msg->payload;
req->handle = handle;
req->flags = ksmbd_session_rpc_method(sess, handle);
req->flags |= KSMBD_RPC_OPEN_METHOD;
req->payload_sz = 0;
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_free(msg);
return resp;
}
struct ksmbd_rpc_command *ksmbd_rpc_close(struct ksmbd_session *sess, int handle)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_rpc_command *req;
struct ksmbd_rpc_command *resp;
msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
if (!msg)
return NULL;
msg->type = KSMBD_EVENT_RPC_REQUEST;
req = (struct ksmbd_rpc_command *)msg->payload;
req->handle = handle;
req->flags = ksmbd_session_rpc_method(sess, handle);
req->flags |= KSMBD_RPC_CLOSE_METHOD;
req->payload_sz = 0;
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_free(msg);
return resp;
}
struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle,
void *payload, size_t payload_sz)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_rpc_command *req;
struct ksmbd_rpc_command *resp;
msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
if (!msg)
return NULL;
msg->type = KSMBD_EVENT_RPC_REQUEST;
req = (struct ksmbd_rpc_command *)msg->payload;
req->handle = handle;
req->flags = ksmbd_session_rpc_method(sess, handle);
req->flags |= rpc_context_flags(sess);
req->flags |= KSMBD_RPC_WRITE_METHOD;
req->payload_sz = payload_sz;
memcpy(req->payload, payload, payload_sz);
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_free(msg);
return resp;
}
struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_rpc_command *req;
struct ksmbd_rpc_command *resp;
msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command));
if (!msg)
return NULL;
msg->type = KSMBD_EVENT_RPC_REQUEST;
req = (struct ksmbd_rpc_command *)msg->payload;
req->handle = handle;
req->flags = ksmbd_session_rpc_method(sess, handle);
req->flags |= rpc_context_flags(sess);
req->flags |= KSMBD_RPC_READ_METHOD;
req->payload_sz = 0;
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_free(msg);
return resp;
}
struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle,
void *payload, size_t payload_sz)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_rpc_command *req;
struct ksmbd_rpc_command *resp;
msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
if (!msg)
return NULL;
msg->type = KSMBD_EVENT_RPC_REQUEST;
req = (struct ksmbd_rpc_command *)msg->payload;
req->handle = handle;
req->flags = ksmbd_session_rpc_method(sess, handle);
req->flags |= rpc_context_flags(sess);
req->flags |= KSMBD_RPC_IOCTL_METHOD;
req->payload_sz = payload_sz;
memcpy(req->payload, payload, payload_sz);
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_free(msg);
return resp;
}
struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload,
size_t payload_sz)
{
struct ksmbd_ipc_msg *msg;
struct ksmbd_rpc_command *req;
struct ksmbd_rpc_command *resp;
msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1);
if (!msg)
return NULL;
msg->type = KSMBD_EVENT_RPC_REQUEST;
req = (struct ksmbd_rpc_command *)msg->payload;
req->handle = ksmbd_acquire_id(&ipc_ida);
req->flags = rpc_context_flags(sess);
req->flags |= KSMBD_RPC_RAP_METHOD;
req->payload_sz = payload_sz;
memcpy(req->payload, payload, payload_sz);
resp = ipc_msg_send_request(msg, req->handle);
ipc_msg_handle_free(req->handle);
ipc_msg_free(msg);
return resp;
}
static int __ipc_heartbeat(void)
{
unsigned long delta;
if (!ksmbd_server_running())
return 0;
if (time_after(jiffies, server_conf.ipc_last_active)) {
delta = (jiffies - server_conf.ipc_last_active);
} else {
ipc_update_last_active();
schedule_delayed_work(&ipc_timer_work,
server_conf.ipc_timeout);
return 0;
}
if (delta < server_conf.ipc_timeout) {
schedule_delayed_work(&ipc_timer_work,
server_conf.ipc_timeout - delta);
return 0;
}
if (ksmbd_ipc_heartbeat_request() == 0) {
schedule_delayed_work(&ipc_timer_work,
server_conf.ipc_timeout);
return 0;
}
mutex_lock(&startup_lock);
WRITE_ONCE(server_conf.state, SERVER_STATE_RESETTING);
server_conf.ipc_last_active = 0;
ksmbd_tools_pid = 0;
pr_err("No IPC daemon response for %lus\n", delta / HZ);
mutex_unlock(&startup_lock);
return -EINVAL;
}
static void ipc_timer_heartbeat(struct work_struct *w)
{
if (__ipc_heartbeat())
server_queue_ctrl_reset_work();
}
int ksmbd_ipc_id_alloc(void)
{
return ksmbd_acquire_id(&ipc_ida);
}
void ksmbd_rpc_id_free(int handle)
{
ksmbd_release_id(&ipc_ida, handle);
}
void ksmbd_ipc_release(void)
{
cancel_delayed_work_sync(&ipc_timer_work);
genl_unregister_family(&ksmbd_genl_family);
}
void ksmbd_ipc_soft_reset(void)
{
mutex_lock(&startup_lock);
ksmbd_tools_pid = 0;
cancel_delayed_work_sync(&ipc_timer_work);
mutex_unlock(&startup_lock);
}
int ksmbd_ipc_init(void)
{
int ret = 0;
ksmbd_nl_init_fixup();
INIT_DELAYED_WORK(&ipc_timer_work, ipc_timer_heartbeat);
ret = genl_register_family(&ksmbd_genl_family);
if (ret) {
pr_err("Failed to register KSMBD netlink interface %d\n", ret);
cancel_delayed_work_sync(&ipc_timer_work);
}
cifsd: fix error handling in ksmbd_server_init() The error handling in ksmbd_server_init() uses "one function to free everything style" which is impossible to audit and leads to several canonical bugs. When we free something that wasn't allocated it may be uninitialized, an error pointer, freed in a different function or we try freeing "foo->bar" when "foo" is a NULL pointer. And since the code is impossible to audit then it leads to memory leaks. In the ksmbd_server_init() function, every goto will lead to a crash because we have not allocated the work queue but we call ksmbd_workqueue_destroy() which tries to flush a NULL work queue. Another bug is if ksmbd_init_buffer_pools() fails then it leads to a double free because we free "work_cache" twice. A third type of bug is that we forgot to call ksmbd_release_inode_hash() so that is a resource leak. A better way to write error handling is for every function to clean up after itself and never leave things partially allocated. Then we can use "free the last successfully allocated resource" style. That way when someone is reading the code they can just track the last resource in their head and verify that the goto matches what they expect. In this patch I modified ksmbd_ipc_init() to clean up after itself and then I converted ksmbd_server_init() to use gotos to clean up. Fixes: cabcebc31de4 ("cifsd: introduce SMB3 kernel server") Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Namjae Jeon <namjae.jeon@samsung.com> Signed-off-by: Steve French <stfrench@microsoft.com>
2021-03-23 21:27:04 +08:00
return ret;
}