mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
b4b52b881c
Hi Linus, This is my very first pull-request. I've been working full-time as a kernel developer for more than two years now. During this time I've been fixing bugs reported by Coverity all over the tree and, as part of my work, I'm also contributing to the KSPP. My work in the kernel community has been supervised by Greg KH and Kees Cook. OK. So, after the quick introduction above, please, pull the following patches that mark switch cases where we are expecting to fall through. These patches are part of the ongoing efforts to enable -Wimplicit-fallthrough. They have been ignored for a long time (most of them more than 3 months, even after pinging multiple times), which is the reason why I've created this tree. Most of them have been baking in linux-next for a whole development cycle. And with Stephen Rothwell's help, we've had linux-next nag-emails going out for newly introduced code that triggers -Wimplicit-fallthrough to avoid gaining more of these cases while we work to remove the ones that are already present. I'm happy to let you know that we are getting close to completing this work. Currently, there are only 32 of 2311 of these cases left to be addressed in linux-next. I'm auditing every case; I take a look into the code and analyze it in order to determine if I'm dealing with an actual bug or a false positive, as explained here: https://lore.kernel.org/lkml/c2fad584-1705-a5f2-d63c-824e9b96cf50@embeddedor.com/ While working on this, I've found and fixed the following missing break/return bugs, some of them introduced more than 5 years ago:84242b82d8
7850b51b6c
5e420fe635
09186e5034
b5be853181
7264235ee7
cc5034a5d2
479826cc86
5340f23df8
df997abeeb
2f10d82373
307b00c5e6
5d25ff7a54
a7ed5b3e7d
c24bfa8f21
ad0eaee619
9ba8376ce1
dc586a60a1
a8e9b186f1
4e57562b48
60747828ea
c5b974bee9
cc44ba9116
2c930e3d0a
Once this work is finish, we'll be able to universally enable "-Wimplicit-fallthrough" to avoid any of these kinds of bugs from entering the kernel again. Thanks Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEkmRahXBSurMIg1YvRwW0y0cG2zEFAlzQR2IACgkQRwW0y0cG 2zEJbQ//X930OcBtT/9DRW4XL1Jeq0Mjssz/GLX2Vpup5CwwcTROG65no80Zezf/ yQRWnUjGX0OBv/fmUK32/nTxI/7k7NkmIXJHe0HiEF069GEENB7FT6tfDzIPjU8M qQkB8NsSUWJs3IH6BVynb/9MGE1VpGBDbYk7CBZRtRJT1RMM+3kQPucgiZMgUBPo Yd9zKwn4i/8tcOCli++EUdQ29ukMoY2R3qpK4LftdX9sXLKZBWNwQbiCwSkjnvJK I6FDiA7RaWH2wWGlL7BpN5RrvAXp3z8QN/JZnivIGt4ijtAyxFUL/9KOEgQpBQN2 6TBRhfTQFM73NCyzLgGLNzvd8awem1rKGSBBUvevaPbgesgM+Of65wmmTQRhFNCt A7+e286X1GiK3aNcjUKrByKWm7x590EWmDzmpmICxNPdt5DHQ6EkmvBdNjnxCMrO aGA24l78tBN09qN45LR7wtHYuuyR0Jt9bCmeQZmz7+x3ICDHi/+Gw7XPN/eM9+T6 lZbbINiYUyZVxOqwzkYDCsdv9+kUvu3e4rPs20NERWRpV8FEvBIyMjXAg6NAMTue K+ikkyMBxCvyw+NMimHJwtD7ho4FkLPcoeXb2ZGJTRHixiZAEtF1RaQ7dA05Q/SL gbSc0DgLZeHlLBT+BSWC2Z8SDnoIhQFXW49OmuACwCUC68NHKps= =k30z -----END PGP SIGNATURE----- Merge tag 'Wimplicit-fallthrough-5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gustavoars/linux Pull Wimplicit-fallthrough updates from Gustavo A. R. Silva: "Mark switch cases where we are expecting to fall through. This is part of the ongoing efforts to enable -Wimplicit-fallthrough. Most of them have been baking in linux-next for a whole development cycle. And with Stephen Rothwell's help, we've had linux-next nag-emails going out for newly introduced code that triggers -Wimplicit-fallthrough to avoid gaining more of these cases while we work to remove the ones that are already present. We are getting close to completing this work. Currently, there are only 32 of 2311 of these cases left to be addressed in linux-next. I'm auditing every case; I take a look into the code and analyze it in order to determine if I'm dealing with an actual bug or a false positive, as explained here: https://lore.kernel.org/lkml/c2fad584-1705-a5f2-d63c-824e9b96cf50@embeddedor.com/ While working on this, I've found and fixed the several missing break/return bugs, some of them introduced more than 5 years ago. Once this work is finished, we'll be able to universally enable "-Wimplicit-fallthrough" to avoid any of these kinds of bugs from entering the kernel again" * tag 'Wimplicit-fallthrough-5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gustavoars/linux: (27 commits) memstick: mark expected switch fall-throughs drm/nouveau/nvkm: mark expected switch fall-throughs NFC: st21nfca: Fix fall-through warnings NFC: pn533: mark expected switch fall-throughs block: Mark expected switch fall-throughs ASN.1: mark expected switch fall-through lib/cmdline.c: mark expected switch fall-throughs lib: zstd: Mark expected switch fall-throughs scsi: sym53c8xx_2: sym_nvram: Mark expected switch fall-through scsi: sym53c8xx_2: sym_hipd: mark expected switch fall-throughs scsi: ppa: mark expected switch fall-through scsi: osst: mark expected switch fall-throughs scsi: lpfc: lpfc_scsi: Mark expected switch fall-throughs scsi: lpfc: lpfc_nvme: Mark expected switch fall-through scsi: lpfc: lpfc_nportdisc: Mark expected switch fall-through scsi: lpfc: lpfc_hbadisc: Mark expected switch fall-throughs scsi: lpfc: lpfc_els: Mark expected switch fall-throughs scsi: lpfc: lpfc_ct: Mark expected switch fall-throughs scsi: imm: mark expected switch fall-throughs scsi: csiostor: csio_wr: mark expected switch fall-through ...
741 lines
18 KiB
C
741 lines
18 KiB
C
/* AFS Cache Manager Service
|
|
*
|
|
* Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/ip.h>
|
|
#include "internal.h"
|
|
#include "afs_cm.h"
|
|
#include "protocol_yfs.h"
|
|
|
|
static int afs_deliver_cb_init_call_back_state(struct afs_call *);
|
|
static int afs_deliver_cb_init_call_back_state3(struct afs_call *);
|
|
static int afs_deliver_cb_probe(struct afs_call *);
|
|
static int afs_deliver_cb_callback(struct afs_call *);
|
|
static int afs_deliver_cb_probe_uuid(struct afs_call *);
|
|
static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *);
|
|
static void afs_cm_destructor(struct afs_call *);
|
|
static void SRXAFSCB_CallBack(struct work_struct *);
|
|
static void SRXAFSCB_InitCallBackState(struct work_struct *);
|
|
static void SRXAFSCB_Probe(struct work_struct *);
|
|
static void SRXAFSCB_ProbeUuid(struct work_struct *);
|
|
static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
|
|
|
|
static int afs_deliver_yfs_cb_callback(struct afs_call *);
|
|
|
|
#define CM_NAME(name) \
|
|
char afs_SRXCB##name##_name[] __tracepoint_string = \
|
|
"CB." #name
|
|
|
|
/*
|
|
* CB.CallBack operation type
|
|
*/
|
|
static CM_NAME(CallBack);
|
|
static const struct afs_call_type afs_SRXCBCallBack = {
|
|
.name = afs_SRXCBCallBack_name,
|
|
.deliver = afs_deliver_cb_callback,
|
|
.destructor = afs_cm_destructor,
|
|
.work = SRXAFSCB_CallBack,
|
|
};
|
|
|
|
/*
|
|
* CB.InitCallBackState operation type
|
|
*/
|
|
static CM_NAME(InitCallBackState);
|
|
static const struct afs_call_type afs_SRXCBInitCallBackState = {
|
|
.name = afs_SRXCBInitCallBackState_name,
|
|
.deliver = afs_deliver_cb_init_call_back_state,
|
|
.destructor = afs_cm_destructor,
|
|
.work = SRXAFSCB_InitCallBackState,
|
|
};
|
|
|
|
/*
|
|
* CB.InitCallBackState3 operation type
|
|
*/
|
|
static CM_NAME(InitCallBackState3);
|
|
static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
|
|
.name = afs_SRXCBInitCallBackState3_name,
|
|
.deliver = afs_deliver_cb_init_call_back_state3,
|
|
.destructor = afs_cm_destructor,
|
|
.work = SRXAFSCB_InitCallBackState,
|
|
};
|
|
|
|
/*
|
|
* CB.Probe operation type
|
|
*/
|
|
static CM_NAME(Probe);
|
|
static const struct afs_call_type afs_SRXCBProbe = {
|
|
.name = afs_SRXCBProbe_name,
|
|
.deliver = afs_deliver_cb_probe,
|
|
.destructor = afs_cm_destructor,
|
|
.work = SRXAFSCB_Probe,
|
|
};
|
|
|
|
/*
|
|
* CB.ProbeUuid operation type
|
|
*/
|
|
static CM_NAME(ProbeUuid);
|
|
static const struct afs_call_type afs_SRXCBProbeUuid = {
|
|
.name = afs_SRXCBProbeUuid_name,
|
|
.deliver = afs_deliver_cb_probe_uuid,
|
|
.destructor = afs_cm_destructor,
|
|
.work = SRXAFSCB_ProbeUuid,
|
|
};
|
|
|
|
/*
|
|
* CB.TellMeAboutYourself operation type
|
|
*/
|
|
static CM_NAME(TellMeAboutYourself);
|
|
static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
|
|
.name = afs_SRXCBTellMeAboutYourself_name,
|
|
.deliver = afs_deliver_cb_tell_me_about_yourself,
|
|
.destructor = afs_cm_destructor,
|
|
.work = SRXAFSCB_TellMeAboutYourself,
|
|
};
|
|
|
|
/*
|
|
* YFS CB.CallBack operation type
|
|
*/
|
|
static CM_NAME(YFS_CallBack);
|
|
static const struct afs_call_type afs_SRXYFSCB_CallBack = {
|
|
.name = afs_SRXCBYFS_CallBack_name,
|
|
.deliver = afs_deliver_yfs_cb_callback,
|
|
.destructor = afs_cm_destructor,
|
|
.work = SRXAFSCB_CallBack,
|
|
};
|
|
|
|
/*
|
|
* route an incoming cache manager call
|
|
* - return T if supported, F if not
|
|
*/
|
|
bool afs_cm_incoming_call(struct afs_call *call)
|
|
{
|
|
_enter("{%u, CB.OP %u}", call->service_id, call->operation_ID);
|
|
|
|
call->epoch = rxrpc_kernel_get_epoch(call->net->socket, call->rxcall);
|
|
|
|
switch (call->operation_ID) {
|
|
case CBCallBack:
|
|
call->type = &afs_SRXCBCallBack;
|
|
return true;
|
|
case CBInitCallBackState:
|
|
call->type = &afs_SRXCBInitCallBackState;
|
|
return true;
|
|
case CBInitCallBackState3:
|
|
call->type = &afs_SRXCBInitCallBackState3;
|
|
return true;
|
|
case CBProbe:
|
|
call->type = &afs_SRXCBProbe;
|
|
return true;
|
|
case CBProbeUuid:
|
|
call->type = &afs_SRXCBProbeUuid;
|
|
return true;
|
|
case CBTellMeAboutYourself:
|
|
call->type = &afs_SRXCBTellMeAboutYourself;
|
|
return true;
|
|
case YFSCBCallBack:
|
|
if (call->service_id != YFS_CM_SERVICE)
|
|
return false;
|
|
call->type = &afs_SRXYFSCB_CallBack;
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Record a probe to the cache manager from a server.
|
|
*/
|
|
static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server)
|
|
{
|
|
_enter("");
|
|
|
|
if (test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags) &&
|
|
!test_bit(AFS_SERVER_FL_PROBING, &server->flags)) {
|
|
if (server->cm_epoch == call->epoch)
|
|
return 0;
|
|
|
|
if (!server->probe.said_rebooted) {
|
|
pr_notice("kAFS: FS rebooted %pU\n", &server->uuid);
|
|
server->probe.said_rebooted = true;
|
|
}
|
|
}
|
|
|
|
spin_lock(&server->probe_lock);
|
|
|
|
if (!test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) {
|
|
server->cm_epoch = call->epoch;
|
|
server->probe.cm_epoch = call->epoch;
|
|
goto out;
|
|
}
|
|
|
|
if (server->probe.cm_probed &&
|
|
call->epoch != server->probe.cm_epoch &&
|
|
!server->probe.said_inconsistent) {
|
|
pr_notice("kAFS: FS endpoints inconsistent %pU\n",
|
|
&server->uuid);
|
|
server->probe.said_inconsistent = true;
|
|
}
|
|
|
|
if (!server->probe.cm_probed || call->epoch == server->cm_epoch)
|
|
server->probe.cm_epoch = server->cm_epoch;
|
|
|
|
out:
|
|
server->probe.cm_probed = true;
|
|
spin_unlock(&server->probe_lock);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Find the server record by peer address and record a probe to the cache
|
|
* manager from a server.
|
|
*/
|
|
static int afs_find_cm_server_by_peer(struct afs_call *call)
|
|
{
|
|
struct sockaddr_rxrpc srx;
|
|
struct afs_server *server;
|
|
|
|
rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
|
|
|
|
server = afs_find_server(call->net, &srx);
|
|
if (!server) {
|
|
trace_afs_cm_no_server(call, &srx);
|
|
return 0;
|
|
}
|
|
|
|
call->cm_server = server;
|
|
return afs_record_cm_probe(call, server);
|
|
}
|
|
|
|
/*
|
|
* Find the server record by server UUID and record a probe to the cache
|
|
* manager from a server.
|
|
*/
|
|
static int afs_find_cm_server_by_uuid(struct afs_call *call,
|
|
struct afs_uuid *uuid)
|
|
{
|
|
struct afs_server *server;
|
|
|
|
rcu_read_lock();
|
|
server = afs_find_server_by_uuid(call->net, call->request);
|
|
rcu_read_unlock();
|
|
if (!server) {
|
|
trace_afs_cm_no_server_u(call, call->request);
|
|
return 0;
|
|
}
|
|
|
|
call->cm_server = server;
|
|
return afs_record_cm_probe(call, server);
|
|
}
|
|
|
|
/*
|
|
* Clean up a cache manager call.
|
|
*/
|
|
static void afs_cm_destructor(struct afs_call *call)
|
|
{
|
|
kfree(call->buffer);
|
|
call->buffer = NULL;
|
|
}
|
|
|
|
/*
|
|
* The server supplied a list of callbacks that it wanted to break.
|
|
*/
|
|
static void SRXAFSCB_CallBack(struct work_struct *work)
|
|
{
|
|
struct afs_call *call = container_of(work, struct afs_call, work);
|
|
|
|
_enter("");
|
|
|
|
/* We need to break the callbacks before sending the reply as the
|
|
* server holds up change visibility till it receives our reply so as
|
|
* to maintain cache coherency.
|
|
*/
|
|
if (call->cm_server)
|
|
afs_break_callbacks(call->cm_server, call->count, call->request);
|
|
|
|
afs_send_empty_reply(call);
|
|
afs_put_call(call);
|
|
_leave("");
|
|
}
|
|
|
|
/*
|
|
* deliver request data to a CB.CallBack call
|
|
*/
|
|
static int afs_deliver_cb_callback(struct afs_call *call)
|
|
{
|
|
struct afs_callback_break *cb;
|
|
__be32 *bp;
|
|
int ret, loop;
|
|
|
|
_enter("{%u}", call->unmarshall);
|
|
|
|
switch (call->unmarshall) {
|
|
case 0:
|
|
afs_extract_to_tmp(call);
|
|
call->unmarshall++;
|
|
|
|
/* extract the FID array and its count in two steps */
|
|
/* fall through */
|
|
case 1:
|
|
_debug("extract FID count");
|
|
ret = afs_extract_data(call, true);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
call->count = ntohl(call->tmp);
|
|
_debug("FID count: %u", call->count);
|
|
if (call->count > AFSCBMAX)
|
|
return afs_protocol_error(call, -EBADMSG,
|
|
afs_eproto_cb_fid_count);
|
|
|
|
call->buffer = kmalloc(array3_size(call->count, 3, 4),
|
|
GFP_KERNEL);
|
|
if (!call->buffer)
|
|
return -ENOMEM;
|
|
afs_extract_to_buf(call, call->count * 3 * 4);
|
|
call->unmarshall++;
|
|
|
|
/* Fall through */
|
|
case 2:
|
|
_debug("extract FID array");
|
|
ret = afs_extract_data(call, true);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
_debug("unmarshall FID array");
|
|
call->request = kcalloc(call->count,
|
|
sizeof(struct afs_callback_break),
|
|
GFP_KERNEL);
|
|
if (!call->request)
|
|
return -ENOMEM;
|
|
|
|
cb = call->request;
|
|
bp = call->buffer;
|
|
for (loop = call->count; loop > 0; loop--, cb++) {
|
|
cb->fid.vid = ntohl(*bp++);
|
|
cb->fid.vnode = ntohl(*bp++);
|
|
cb->fid.unique = ntohl(*bp++);
|
|
}
|
|
|
|
afs_extract_to_tmp(call);
|
|
call->unmarshall++;
|
|
|
|
/* extract the callback array and its count in two steps */
|
|
/* fall through */
|
|
case 3:
|
|
_debug("extract CB count");
|
|
ret = afs_extract_data(call, true);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
call->count2 = ntohl(call->tmp);
|
|
_debug("CB count: %u", call->count2);
|
|
if (call->count2 != call->count && call->count2 != 0)
|
|
return afs_protocol_error(call, -EBADMSG,
|
|
afs_eproto_cb_count);
|
|
call->_iter = &call->iter;
|
|
iov_iter_discard(&call->iter, READ, call->count2 * 3 * 4);
|
|
call->unmarshall++;
|
|
|
|
/* Fall through */
|
|
case 4:
|
|
_debug("extract discard %zu/%u",
|
|
iov_iter_count(&call->iter), call->count2 * 3 * 4);
|
|
|
|
ret = afs_extract_data(call, false);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
call->unmarshall++;
|
|
case 5:
|
|
break;
|
|
}
|
|
|
|
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
|
|
return afs_io_error(call, afs_io_error_cm_reply);
|
|
|
|
/* we'll need the file server record as that tells us which set of
|
|
* vnodes to operate upon */
|
|
return afs_find_cm_server_by_peer(call);
|
|
}
|
|
|
|
/*
|
|
* allow the fileserver to request callback state (re-)initialisation
|
|
*/
|
|
static void SRXAFSCB_InitCallBackState(struct work_struct *work)
|
|
{
|
|
struct afs_call *call = container_of(work, struct afs_call, work);
|
|
|
|
_enter("{%p}", call->cm_server);
|
|
|
|
if (call->cm_server)
|
|
afs_init_callback_state(call->cm_server);
|
|
afs_send_empty_reply(call);
|
|
afs_put_call(call);
|
|
_leave("");
|
|
}
|
|
|
|
/*
|
|
* deliver request data to a CB.InitCallBackState call
|
|
*/
|
|
static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
|
|
{
|
|
int ret;
|
|
|
|
_enter("");
|
|
|
|
afs_extract_discard(call, 0);
|
|
ret = afs_extract_data(call, false);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
/* we'll need the file server record as that tells us which set of
|
|
* vnodes to operate upon */
|
|
return afs_find_cm_server_by_peer(call);
|
|
}
|
|
|
|
/*
|
|
* deliver request data to a CB.InitCallBackState3 call
|
|
*/
|
|
static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
|
|
{
|
|
struct afs_uuid *r;
|
|
unsigned loop;
|
|
__be32 *b;
|
|
int ret;
|
|
|
|
_enter("");
|
|
|
|
_enter("{%u}", call->unmarshall);
|
|
|
|
switch (call->unmarshall) {
|
|
case 0:
|
|
call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL);
|
|
if (!call->buffer)
|
|
return -ENOMEM;
|
|
afs_extract_to_buf(call, 11 * sizeof(__be32));
|
|
call->unmarshall++;
|
|
|
|
/* Fall through */
|
|
case 1:
|
|
_debug("extract UUID");
|
|
ret = afs_extract_data(call, false);
|
|
switch (ret) {
|
|
case 0: break;
|
|
case -EAGAIN: return 0;
|
|
default: return ret;
|
|
}
|
|
|
|
_debug("unmarshall UUID");
|
|
call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
|
|
if (!call->request)
|
|
return -ENOMEM;
|
|
|
|
b = call->buffer;
|
|
r = call->request;
|
|
r->time_low = b[0];
|
|
r->time_mid = htons(ntohl(b[1]));
|
|
r->time_hi_and_version = htons(ntohl(b[2]));
|
|
r->clock_seq_hi_and_reserved = ntohl(b[3]);
|
|
r->clock_seq_low = ntohl(b[4]);
|
|
|
|
for (loop = 0; loop < 6; loop++)
|
|
r->node[loop] = ntohl(b[loop + 5]);
|
|
|
|
call->unmarshall++;
|
|
|
|
case 2:
|
|
break;
|
|
}
|
|
|
|
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
|
|
return afs_io_error(call, afs_io_error_cm_reply);
|
|
|
|
/* we'll need the file server record as that tells us which set of
|
|
* vnodes to operate upon */
|
|
return afs_find_cm_server_by_uuid(call, call->request);
|
|
}
|
|
|
|
/*
|
|
* allow the fileserver to see if the cache manager is still alive
|
|
*/
|
|
static void SRXAFSCB_Probe(struct work_struct *work)
|
|
{
|
|
struct afs_call *call = container_of(work, struct afs_call, work);
|
|
|
|
_enter("");
|
|
afs_send_empty_reply(call);
|
|
afs_put_call(call);
|
|
_leave("");
|
|
}
|
|
|
|
/*
|
|
* deliver request data to a CB.Probe call
|
|
*/
|
|
static int afs_deliver_cb_probe(struct afs_call *call)
|
|
{
|
|
int ret;
|
|
|
|
_enter("");
|
|
|
|
afs_extract_discard(call, 0);
|
|
ret = afs_extract_data(call, false);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
|
|
return afs_io_error(call, afs_io_error_cm_reply);
|
|
return afs_find_cm_server_by_peer(call);
|
|
}
|
|
|
|
/*
|
|
* allow the fileserver to quickly find out if the fileserver has been rebooted
|
|
*/
|
|
static void SRXAFSCB_ProbeUuid(struct work_struct *work)
|
|
{
|
|
struct afs_call *call = container_of(work, struct afs_call, work);
|
|
struct afs_uuid *r = call->request;
|
|
|
|
struct {
|
|
__be32 match;
|
|
} reply;
|
|
|
|
_enter("");
|
|
|
|
if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
|
|
reply.match = htonl(0);
|
|
else
|
|
reply.match = htonl(1);
|
|
|
|
afs_send_simple_reply(call, &reply, sizeof(reply));
|
|
afs_put_call(call);
|
|
_leave("");
|
|
}
|
|
|
|
/*
|
|
* deliver request data to a CB.ProbeUuid call
|
|
*/
|
|
static int afs_deliver_cb_probe_uuid(struct afs_call *call)
|
|
{
|
|
struct afs_uuid *r;
|
|
unsigned loop;
|
|
__be32 *b;
|
|
int ret;
|
|
|
|
_enter("{%u}", call->unmarshall);
|
|
|
|
switch (call->unmarshall) {
|
|
case 0:
|
|
call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL);
|
|
if (!call->buffer)
|
|
return -ENOMEM;
|
|
afs_extract_to_buf(call, 11 * sizeof(__be32));
|
|
call->unmarshall++;
|
|
|
|
/* Fall through */
|
|
case 1:
|
|
_debug("extract UUID");
|
|
ret = afs_extract_data(call, false);
|
|
switch (ret) {
|
|
case 0: break;
|
|
case -EAGAIN: return 0;
|
|
default: return ret;
|
|
}
|
|
|
|
_debug("unmarshall UUID");
|
|
call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
|
|
if (!call->request)
|
|
return -ENOMEM;
|
|
|
|
b = call->buffer;
|
|
r = call->request;
|
|
r->time_low = b[0];
|
|
r->time_mid = htons(ntohl(b[1]));
|
|
r->time_hi_and_version = htons(ntohl(b[2]));
|
|
r->clock_seq_hi_and_reserved = ntohl(b[3]);
|
|
r->clock_seq_low = ntohl(b[4]);
|
|
|
|
for (loop = 0; loop < 6; loop++)
|
|
r->node[loop] = ntohl(b[loop + 5]);
|
|
|
|
call->unmarshall++;
|
|
|
|
case 2:
|
|
break;
|
|
}
|
|
|
|
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
|
|
return afs_io_error(call, afs_io_error_cm_reply);
|
|
return afs_find_cm_server_by_uuid(call, call->request);
|
|
}
|
|
|
|
/*
|
|
* allow the fileserver to ask about the cache manager's capabilities
|
|
*/
|
|
static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
|
|
{
|
|
struct afs_interface *ifs;
|
|
struct afs_call *call = container_of(work, struct afs_call, work);
|
|
int loop, nifs;
|
|
|
|
struct {
|
|
struct /* InterfaceAddr */ {
|
|
__be32 nifs;
|
|
__be32 uuid[11];
|
|
__be32 ifaddr[32];
|
|
__be32 netmask[32];
|
|
__be32 mtu[32];
|
|
} ia;
|
|
struct /* Capabilities */ {
|
|
__be32 capcount;
|
|
__be32 caps[1];
|
|
} cap;
|
|
} reply;
|
|
|
|
_enter("");
|
|
|
|
nifs = 0;
|
|
ifs = kcalloc(32, sizeof(*ifs), GFP_KERNEL);
|
|
if (ifs) {
|
|
nifs = afs_get_ipv4_interfaces(call->net, ifs, 32, false);
|
|
if (nifs < 0) {
|
|
kfree(ifs);
|
|
ifs = NULL;
|
|
nifs = 0;
|
|
}
|
|
}
|
|
|
|
memset(&reply, 0, sizeof(reply));
|
|
reply.ia.nifs = htonl(nifs);
|
|
|
|
reply.ia.uuid[0] = call->net->uuid.time_low;
|
|
reply.ia.uuid[1] = htonl(ntohs(call->net->uuid.time_mid));
|
|
reply.ia.uuid[2] = htonl(ntohs(call->net->uuid.time_hi_and_version));
|
|
reply.ia.uuid[3] = htonl((s8) call->net->uuid.clock_seq_hi_and_reserved);
|
|
reply.ia.uuid[4] = htonl((s8) call->net->uuid.clock_seq_low);
|
|
for (loop = 0; loop < 6; loop++)
|
|
reply.ia.uuid[loop + 5] = htonl((s8) call->net->uuid.node[loop]);
|
|
|
|
if (ifs) {
|
|
for (loop = 0; loop < nifs; loop++) {
|
|
reply.ia.ifaddr[loop] = ifs[loop].address.s_addr;
|
|
reply.ia.netmask[loop] = ifs[loop].netmask.s_addr;
|
|
reply.ia.mtu[loop] = htonl(ifs[loop].mtu);
|
|
}
|
|
kfree(ifs);
|
|
}
|
|
|
|
reply.cap.capcount = htonl(1);
|
|
reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION);
|
|
afs_send_simple_reply(call, &reply, sizeof(reply));
|
|
afs_put_call(call);
|
|
_leave("");
|
|
}
|
|
|
|
/*
|
|
* deliver request data to a CB.TellMeAboutYourself call
|
|
*/
|
|
static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call)
|
|
{
|
|
int ret;
|
|
|
|
_enter("");
|
|
|
|
afs_extract_discard(call, 0);
|
|
ret = afs_extract_data(call, false);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
|
|
return afs_io_error(call, afs_io_error_cm_reply);
|
|
return afs_find_cm_server_by_peer(call);
|
|
}
|
|
|
|
/*
|
|
* deliver request data to a YFS CB.CallBack call
|
|
*/
|
|
static int afs_deliver_yfs_cb_callback(struct afs_call *call)
|
|
{
|
|
struct afs_callback_break *cb;
|
|
struct yfs_xdr_YFSFid *bp;
|
|
size_t size;
|
|
int ret, loop;
|
|
|
|
_enter("{%u}", call->unmarshall);
|
|
|
|
switch (call->unmarshall) {
|
|
case 0:
|
|
afs_extract_to_tmp(call);
|
|
call->unmarshall++;
|
|
|
|
/* extract the FID array and its count in two steps */
|
|
/* Fall through */
|
|
case 1:
|
|
_debug("extract FID count");
|
|
ret = afs_extract_data(call, true);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
call->count = ntohl(call->tmp);
|
|
_debug("FID count: %u", call->count);
|
|
if (call->count > YFSCBMAX)
|
|
return afs_protocol_error(call, -EBADMSG,
|
|
afs_eproto_cb_fid_count);
|
|
|
|
size = array_size(call->count, sizeof(struct yfs_xdr_YFSFid));
|
|
call->buffer = kmalloc(size, GFP_KERNEL);
|
|
if (!call->buffer)
|
|
return -ENOMEM;
|
|
afs_extract_to_buf(call, size);
|
|
call->unmarshall++;
|
|
|
|
/* Fall through */
|
|
case 2:
|
|
_debug("extract FID array");
|
|
ret = afs_extract_data(call, false);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
_debug("unmarshall FID array");
|
|
call->request = kcalloc(call->count,
|
|
sizeof(struct afs_callback_break),
|
|
GFP_KERNEL);
|
|
if (!call->request)
|
|
return -ENOMEM;
|
|
|
|
cb = call->request;
|
|
bp = call->buffer;
|
|
for (loop = call->count; loop > 0; loop--, cb++) {
|
|
cb->fid.vid = xdr_to_u64(bp->volume);
|
|
cb->fid.vnode = xdr_to_u64(bp->vnode.lo);
|
|
cb->fid.vnode_hi = ntohl(bp->vnode.hi);
|
|
cb->fid.unique = ntohl(bp->vnode.unique);
|
|
bp++;
|
|
}
|
|
|
|
afs_extract_to_tmp(call);
|
|
call->unmarshall++;
|
|
|
|
case 3:
|
|
break;
|
|
}
|
|
|
|
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
|
|
return afs_io_error(call, afs_io_error_cm_reply);
|
|
|
|
/* We'll need the file server record as that tells us which set of
|
|
* vnodes to operate upon.
|
|
*/
|
|
return afs_find_cm_server_by_peer(call);
|
|
}
|