linux/fs/dlm/requestqueue.c
Alexander Aring f217d7ccb9 fs: dlm: avoid false-positive checker warning
This patch avoid the false-positive checker warning about writing 112
bytes into a 88 bytes field "e->request", see:

[   54.891560] dlm: csmb1: dlm_recover_directory 23 out 2 messages
[   54.990542] ------------[ cut here ]------------
[   54.991012] memcpy: detected field-spanning write (size 112) of single field "&e->request" at fs/dlm/requestqueue.c:47 (size 88)
[   54.992150] WARNING: CPU: 0 PID: 297 at fs/dlm/requestqueue.c:47 dlm_add_requestqueue+0x177/0x180
[   54.993002] CPU: 0 PID: 297 Comm: kworker/u4:3 Not tainted 6.1.0-rc5-00008-ge01d50cbd6ee #248
[   54.993878] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.0-1.fc36 04/01/2014
[   54.994718] Workqueue: dlm_recv process_recv_sockets
[   54.995230] RIP: 0010:dlm_add_requestqueue+0x177/0x180
[   54.995731] Code: e7 01 0f 85 3b ff ff ff b9 58 00 00 00 48 c7 c2 c0 41 74 82 4c 89 ee 48 c7 c7 20 42 74 82 c6 05 8b 8d 30 02 01 e8 51 07 be 00 <0f> 0b e9 12 ff ff ff 66 90 0f 1f 44 00 00 41 57 48 8d 87 10 08 00
[   54.997483] RSP: 0018:ffffc90000b1fbe8 EFLAGS: 00010282
[   54.997990] RAX: 0000000000000000 RBX: ffff888024fc3d00 RCX: 0000000000000000
[   54.998667] RDX: 0000000000000001 RSI: ffffffff81155014 RDI: fffff52000163f73
[   54.999342] RBP: ffff88800dbac000 R08: 0000000000000001 R09: ffffc90000b1fa5f
[   54.999997] R10: fffff52000163f4b R11: 203a7970636d656d R12: ffff88800cfb0018
[   55.000673] R13: 0000000000000070 R14: ffff888024fc3d18 R15: 0000000000000000
[   55.001344] FS:  0000000000000000(0000) GS:ffff88806d600000(0000) knlGS:0000000000000000
[   55.002078] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   55.002603] CR2: 00007f35d4f0b9a0 CR3: 0000000025495002 CR4: 0000000000770ef0
[   55.003258] PKRU: 55555554
[   55.003514] Call Trace:
[   55.003756]  <TASK>
[   55.003953]  dlm_receive_buffer+0x1c0/0x200
[   55.004348]  dlm_process_incoming_buffer+0x46d/0x780
[   55.004786]  ? kernel_recvmsg+0x8b/0xc0
[   55.005150]  receive_from_sock.isra.0+0x168/0x420
[   55.005582]  ? process_listen_recv_socket+0x10/0x10
[   55.006018]  ? finish_task_switch.isra.0+0xe0/0x400
[   55.006469]  ? __switch_to+0x2fe/0x6a0
[   55.006808]  ? read_word_at_a_time+0xe/0x20
[   55.007197]  ? strscpy+0x146/0x190
[   55.007505]  process_one_work+0x3d0/0x6b0
[   55.007863]  worker_thread+0x8d/0x620
[   55.008209]  ? __kthread_parkme+0xd8/0xf0
[   55.008565]  ? process_one_work+0x6b0/0x6b0
[   55.008937]  kthread+0x171/0x1a0
[   55.009251]  ? kthread_exit+0x60/0x60
[   55.009582]  ret_from_fork+0x1f/0x30
[   55.009903]  </TASK>
[   55.010120] ---[ end trace 0000000000000000 ]---
[   55.025783] dlm: csmb1: dlm_recover 5 generation 3 done: 201 ms
[   55.026466] gfs2: fsid=smbcluster:csmb1.0: recover generation 3 done

It seems the checker is unable to detect the additional length bytes
which was allocated additionally for the flexible array in struct
dlm_message. To solve it we split the memcpy() into copy for the 88 bytes
struct and another memcpy() for the flexible array m_extra field.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
2022-11-21 09:45:49 -06:00

174 lines
5.0 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*******************************************************************************
**
** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
**
**
*******************************************************************************
******************************************************************************/
#include "dlm_internal.h"
#include "member.h"
#include "lock.h"
#include "dir.h"
#include "config.h"
#include "requestqueue.h"
#include "util.h"
struct rq_entry {
struct list_head list;
uint32_t recover_seq;
int nodeid;
struct dlm_message request;
};
/*
* Requests received while the lockspace is in recovery get added to the
* request queue and processed when recovery is complete. This happens when
* the lockspace is suspended on some nodes before it is on others, or the
* lockspace is enabled on some while still suspended on others.
*/
void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
{
struct rq_entry *e;
int length = le16_to_cpu(ms->m_header.h_length) -
sizeof(struct dlm_message);
e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
if (!e) {
log_print("dlm_add_requestqueue: out of memory len %d", length);
return;
}
e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF;
e->nodeid = nodeid;
memcpy(&e->request, ms, sizeof(*ms));
memcpy(&e->request.m_extra, ms->m_extra, length);
atomic_inc(&ls->ls_requestqueue_cnt);
mutex_lock(&ls->ls_requestqueue_mutex);
list_add_tail(&e->list, &ls->ls_requestqueue);
mutex_unlock(&ls->ls_requestqueue_mutex);
}
/*
* Called by dlm_recoverd to process normal messages saved while recovery was
* happening. Normal locking has been enabled before this is called. dlm_recv
* upon receiving a message, will wait for all saved messages to be drained
* here before processing the message it got. If a new dlm_ls_stop() arrives
* while we're processing these saved messages, it may block trying to suspend
* dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that
* case, we don't abort since locking_stopped is still 0. If dlm_recv is not
* waiting for us, then this processing may be aborted due to locking_stopped.
*/
int dlm_process_requestqueue(struct dlm_ls *ls)
{
struct rq_entry *e;
struct dlm_message *ms;
int error = 0;
mutex_lock(&ls->ls_requestqueue_mutex);
for (;;) {
if (list_empty(&ls->ls_requestqueue)) {
mutex_unlock(&ls->ls_requestqueue_mutex);
error = 0;
break;
}
e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
mutex_unlock(&ls->ls_requestqueue_mutex);
ms = &e->request;
log_limit(ls, "dlm_process_requestqueue msg %d from %d "
"lkid %x remid %x result %d seq %u",
le32_to_cpu(ms->m_type),
le32_to_cpu(ms->m_header.h_nodeid),
le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
from_dlm_errno(le32_to_cpu(ms->m_result)),
e->recover_seq);
dlm_receive_message_saved(ls, &e->request, e->recover_seq);
mutex_lock(&ls->ls_requestqueue_mutex);
list_del(&e->list);
if (atomic_dec_and_test(&ls->ls_requestqueue_cnt))
wake_up(&ls->ls_requestqueue_wait);
kfree(e);
if (dlm_locking_stopped(ls)) {
log_debug(ls, "process_requestqueue abort running");
mutex_unlock(&ls->ls_requestqueue_mutex);
error = -EINTR;
break;
}
schedule();
}
return error;
}
/*
* After recovery is done, locking is resumed and dlm_recoverd takes all the
* saved requests and processes them as they would have been by dlm_recv. At
* the same time, dlm_recv will start receiving new requests from remote nodes.
* We want to delay dlm_recv processing new requests until dlm_recoverd has
* finished processing the old saved requests. We don't check for locking
* stopped here because dlm_ls_stop won't stop locking until it's suspended us
* (dlm_recv).
*/
void dlm_wait_requestqueue(struct dlm_ls *ls)
{
wait_event(ls->ls_requestqueue_wait,
atomic_read(&ls->ls_requestqueue_cnt) == 0);
}
static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
{
__le32 type = ms->m_type;
/* the ls is being cleaned up and freed by release_lockspace */
if (!atomic_read(&ls->ls_count))
return 1;
if (dlm_is_removed(ls, nodeid))
return 1;
/* directory operations are always purged because the directory is
always rebuilt during recovery and the lookups resent */
if (type == cpu_to_le32(DLM_MSG_REMOVE) ||
type == cpu_to_le32(DLM_MSG_LOOKUP) ||
type == cpu_to_le32(DLM_MSG_LOOKUP_REPLY))
return 1;
if (!dlm_no_directory(ls))
return 0;
return 1;
}
void dlm_purge_requestqueue(struct dlm_ls *ls)
{
struct dlm_message *ms;
struct rq_entry *e, *safe;
mutex_lock(&ls->ls_requestqueue_mutex);
list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
ms = &e->request;
if (purge_request(ls, ms, e->nodeid)) {
list_del(&e->list);
if (atomic_dec_and_test(&ls->ls_requestqueue_cnt))
wake_up(&ls->ls_requestqueue_wait);
kfree(e);
}
}
mutex_unlock(&ls->ls_requestqueue_mutex);
}