NFSD 6.10 Release Notes

This is a light release containing mostly optimizations, code clean-
 ups, and minor bug fixes. This development cycle has focused on non-
 upstream kernel work:
 
 1. Continuing to build upstream CI for NFSD, based on kdevops
 2. Backporting NFSD filecache-related fixes to selected LTS kernels
 
 One notable new feature in v6.10 NFSD is the addition of a new
 netlink protocol dedicated to configuring NFSD. A new user space
 tool, nfsdctl, is to be added to nfs-utils. Lots more to come here.
 
 As always I am very grateful to NFSD contributors, reviewers,
 testers, and bug reporters who participated during this cycle.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEKLLlsBKG3yQ88j7+M2qzM29mf5cFAmZHdB8ACgkQM2qzM29m
 f5cSMhAApukZZQCSR9lcppVrv48vsTKHFup4qFG5upOtdHR8yuSI4HOfSb5F9gsO
 fHJABFFtvlsTWVFotwUY7ljjtin00PK0bfn9kZnekvcZ1A5Yoly8SJmxK+jjnmAw
 jaXT7XzGYWShYiRkIXb3NYE9uiC1VZOYURYTVbMwklg3jbsyp2M7ylnRKIqUO2Qt
 bin6tdqnDx2H4Hou9k4csMX4sZJlXQZjQxzxhWuL1XrjEMlXREnklfppLzIlnJJt
 eHFxTRhwPdcJ9CbGVsae7GNQeGUdgq7P/AIFuHWIruvxaknY7ZOp2Z/xnxaifeU+
 O2Psh/9G7zmqFkeH01QwItita8rUdBwgTv0r7QPw8/lCd0xMieqFynNGtTGwWv0Q
 1DC8RssM3axeHHfpTgXtkqfwFvKIyE6xKrvTCBZ8Pd8hsrWzbYI4d/oTe8rwXLZ6
 sMD5wgsfagl6fd6G+4/9adFniOgpUi2xHmqJ5yyALyzUDeHiiqsOmxM2Rb0FN5YR
 ixlNj7s9lmYbbMwQshNRhV/fOPQRvKvicHAyKO7Yko/seDf8NxwQfPX6M2j2esUG
 Ld8lW1hGpBDWpF1YnA6AsC+Jr12+A4c2Lg95155R9Svumk6Fv/4MIftiWpO8qf/g
 d66Q35eGr3BSSypP9KFEa7aegZdcJAlUpLhsd0Wj2rbei7gh0kU=
 =tpVD
 -----END PGP SIGNATURE-----

Merge tag 'nfsd-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux

Pull nfsd updates from Chuck Lever:
 "This is a light release containing mostly optimizations, code clean-
  ups, and minor bug fixes. This development cycle has focused on non-
  upstream kernel work:

   1. Continuing to build upstream CI for NFSD, based on kdevops

   2. Backporting NFSD filecache-related fixes to selected LTS kernels

  One notable new feature in v6.10 NFSD is the addition of a new netlink
  protocol dedicated to configuring NFSD. A new user space tool,
  nfsdctl, is to be added to nfs-utils. Lots more to come here.

  As always I am very grateful to NFSD contributors, reviewers, testers,
  and bug reporters who participated during this cycle"

* tag 'nfsd-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux: (29 commits)
  NFSD: Force all NFSv4.2 COPY requests to be synchronous
  SUNRPC: Fix gss_free_in_token_pages()
  NFS/knfsd: Remove the invalid NFS error 'NFSERR_OPNOTSUPP'
  knfsd: LOOKUP can return an illegal error value
  nfsd: set security label during create operations
  NFSD: Add COPY status code to OFFLOAD_STATUS response
  NFSD: Record status of async copy operation in struct nfsd4_copy
  SUNRPC: Remove comment for sp_lock
  NFSD: add listener-{set,get} netlink command
  SUNRPC: add a new svc_find_listener helper
  SUNRPC: introduce svc_xprt_create_from_sa utility routine
  NFSD: add write_version to netlink command
  NFSD: convert write_threads to netlink command
  NFSD: allow callers to pass in scope string to nfsd_svc
  NFSD: move nfsd_mutex handling into nfsd_svc callers
  lockd: host: Remove unnecessary statements'host = NULL;'
  nfsd: don't create nfsv4recoverydir in nfsdfs when not used.
  nfsd: optimise recalculate_deny_mode() for a common case
  nfsd: add tracepoint in mark_client_expired_locked
  nfsd: new tracepoint for check_slot_seqid
  ...
This commit is contained in:
Linus Torvalds 2024-05-18 14:04:20 -07:00
commit 61ea647ed1
29 changed files with 1286 additions and 271 deletions

View File

@ -62,6 +62,59 @@ attribute-sets:
name: compound-ops
type: u32
multi-attr: true
-
name: server
attributes:
-
name: threads
type: u32
multi-attr: true
-
name: gracetime
type: u32
-
name: leasetime
type: u32
-
name: scope
type: string
-
name: version
attributes:
-
name: major
type: u32
-
name: minor
type: u32
-
name: enabled
type: flag
-
name: server-proto
attributes:
-
name: version
type: nest
nested-attributes: version
multi-attr: true
-
name: sock
attributes:
-
name: addr
type: binary
-
name: transport-name
type: string
-
name: server-sock
attributes:
-
name: addr
type: nest
nested-attributes: sock
multi-attr: true
operations:
list:
@ -87,3 +140,60 @@ operations:
- sport
- dport
- compound-ops
-
name: threads-set
doc: set the number of running threads
attribute-set: server
flags: [ admin-perm ]
do:
request:
attributes:
- threads
- gracetime
- leasetime
- scope
-
name: threads-get
doc: get the number of running threads
attribute-set: server
do:
reply:
attributes:
- threads
- gracetime
- leasetime
- scope
-
name: version-set
doc: set nfs enabled versions
attribute-set: server-proto
flags: [ admin-perm ]
do:
request:
attributes:
- version
-
name: version-get
doc: get nfs enabled versions
attribute-set: server-proto
do:
reply:
attributes:
- version
-
name: listener-set
doc: set nfs running sockets
attribute-set: server-sock
flags: [ admin-perm ]
do:
request:
attributes:
- addr
-
name: listener-get
doc: get nfs running listeners
attribute-set: server-sock
do:
reply:
attributes:
- addr

View File

@ -117,7 +117,6 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
if (nsm != NULL)
refcount_inc(&nsm->sm_count);
else {
host = NULL;
nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
ni->hostname, ni->hostname_len);
if (unlikely(nsm == NULL)) {

View File

@ -334,21 +334,25 @@ static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
static int export_stats_init(struct export_stats *stats)
{
stats->start_time = ktime_get_seconds();
return nfsd_percpu_counters_init(stats->counter, EXP_STATS_COUNTERS_NUM);
return percpu_counter_init_many(stats->counter, 0, GFP_KERNEL,
EXP_STATS_COUNTERS_NUM);
}
static void export_stats_reset(struct export_stats *stats)
{
if (stats)
nfsd_percpu_counters_reset(stats->counter,
EXP_STATS_COUNTERS_NUM);
if (stats) {
int i;
for (i = 0; i < EXP_STATS_COUNTERS_NUM; i++)
percpu_counter_set(&stats->counter[i], 0);
}
}
static void export_stats_destroy(struct export_stats *stats)
{
if (stats)
nfsd_percpu_counters_destroy(stats->counter,
EXP_STATS_COUNTERS_NUM);
percpu_counter_destroy_many(stats->counter,
EXP_STATS_COUNTERS_NUM);
}
static void svc_export_put(struct kref *ref)

View File

@ -10,6 +10,36 @@
#include <uapi/linux/nfsd_netlink.h>
/* Common nested types */
const struct nla_policy nfsd_sock_nl_policy[NFSD_A_SOCK_TRANSPORT_NAME + 1] = {
[NFSD_A_SOCK_ADDR] = { .type = NLA_BINARY, },
[NFSD_A_SOCK_TRANSPORT_NAME] = { .type = NLA_NUL_STRING, },
};
const struct nla_policy nfsd_version_nl_policy[NFSD_A_VERSION_ENABLED + 1] = {
[NFSD_A_VERSION_MAJOR] = { .type = NLA_U32, },
[NFSD_A_VERSION_MINOR] = { .type = NLA_U32, },
[NFSD_A_VERSION_ENABLED] = { .type = NLA_FLAG, },
};
/* NFSD_CMD_THREADS_SET - do */
static const struct nla_policy nfsd_threads_set_nl_policy[NFSD_A_SERVER_SCOPE + 1] = {
[NFSD_A_SERVER_THREADS] = { .type = NLA_U32, },
[NFSD_A_SERVER_GRACETIME] = { .type = NLA_U32, },
[NFSD_A_SERVER_LEASETIME] = { .type = NLA_U32, },
[NFSD_A_SERVER_SCOPE] = { .type = NLA_NUL_STRING, },
};
/* NFSD_CMD_VERSION_SET - do */
static const struct nla_policy nfsd_version_set_nl_policy[NFSD_A_SERVER_PROTO_VERSION + 1] = {
[NFSD_A_SERVER_PROTO_VERSION] = NLA_POLICY_NESTED(nfsd_version_nl_policy),
};
/* NFSD_CMD_LISTENER_SET - do */
static const struct nla_policy nfsd_listener_set_nl_policy[NFSD_A_SERVER_SOCK_ADDR + 1] = {
[NFSD_A_SERVER_SOCK_ADDR] = NLA_POLICY_NESTED(nfsd_sock_nl_policy),
};
/* Ops table for nfsd */
static const struct genl_split_ops nfsd_nl_ops[] = {
{
@ -19,6 +49,42 @@ static const struct genl_split_ops nfsd_nl_ops[] = {
.done = nfsd_nl_rpc_status_get_done,
.flags = GENL_CMD_CAP_DUMP,
},
{
.cmd = NFSD_CMD_THREADS_SET,
.doit = nfsd_nl_threads_set_doit,
.policy = nfsd_threads_set_nl_policy,
.maxattr = NFSD_A_SERVER_SCOPE,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
.cmd = NFSD_CMD_THREADS_GET,
.doit = nfsd_nl_threads_get_doit,
.flags = GENL_CMD_CAP_DO,
},
{
.cmd = NFSD_CMD_VERSION_SET,
.doit = nfsd_nl_version_set_doit,
.policy = nfsd_version_set_nl_policy,
.maxattr = NFSD_A_SERVER_PROTO_VERSION,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
.cmd = NFSD_CMD_VERSION_GET,
.doit = nfsd_nl_version_get_doit,
.flags = GENL_CMD_CAP_DO,
},
{
.cmd = NFSD_CMD_LISTENER_SET,
.doit = nfsd_nl_listener_set_doit,
.policy = nfsd_listener_set_nl_policy,
.maxattr = NFSD_A_SERVER_SOCK_ADDR,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
.cmd = NFSD_CMD_LISTENER_GET,
.doit = nfsd_nl_listener_get_doit,
.flags = GENL_CMD_CAP_DO,
},
};
struct genl_family nfsd_nl_family __ro_after_init = {

View File

@ -11,11 +11,21 @@
#include <uapi/linux/nfsd_netlink.h>
/* Common nested types */
extern const struct nla_policy nfsd_sock_nl_policy[NFSD_A_SOCK_TRANSPORT_NAME + 1];
extern const struct nla_policy nfsd_version_nl_policy[NFSD_A_VERSION_ENABLED + 1];
int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb);
int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb);
int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info);
int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info);
int nfsd_nl_version_set_doit(struct sk_buff *skb, struct genl_info *info);
int nfsd_nl_version_get_doit(struct sk_buff *skb, struct genl_info *info);
int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info);
int nfsd_nl_listener_get_doit(struct sk_buff *skb, struct genl_info *info);
extern struct genl_family nfsd_nl_family;

View File

@ -218,6 +218,7 @@ struct nfsd_net {
/* Simple check to find out if a given net was properly initialized */
#define nfsd_netns_ready(nn) ((nn)->sessionid_hashtbl)
extern bool nfsd_support_version(int vers);
extern void nfsd_netns_free_versions(struct nfsd_net *nn);
extern unsigned int nfsd_net_id;

View File

@ -978,12 +978,12 @@ static int max_cb_time(struct net *net)
return max(((u32)nn->nfsd4_lease)/10, 1u) * HZ;
}
static struct workqueue_struct *callback_wq;
static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
{
trace_nfsd_cb_queue(cb->cb_clp, cb);
return queue_work(callback_wq, &cb->cb_work);
struct nfs4_client *clp = cb->cb_clp;
trace_nfsd_cb_queue(clp, cb);
return queue_work(clp->cl_callback_wq, &cb->cb_work);
}
static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
@ -1153,7 +1153,7 @@ void nfsd4_probe_callback(struct nfs4_client *clp)
void nfsd4_probe_callback_sync(struct nfs4_client *clp)
{
nfsd4_probe_callback(clp);
flush_workqueue(callback_wq);
flush_workqueue(clp->cl_callback_wq);
}
void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
@ -1372,19 +1372,6 @@ static const struct rpc_call_ops nfsd4_cb_ops = {
.rpc_release = nfsd4_cb_release,
};
int nfsd4_create_callback_queue(void)
{
callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
if (!callback_wq)
return -ENOMEM;
return 0;
}
void nfsd4_destroy_callback_queue(void)
{
destroy_workqueue(callback_wq);
}
/* must be called under the state lock */
void nfsd4_shutdown_callback(struct nfs4_client *clp)
{
@ -1398,7 +1385,7 @@ void nfsd4_shutdown_callback(struct nfs4_client *clp)
* client, destroy the rpc client, and stop:
*/
nfsd4_run_cb(&clp->cl_cb_null);
flush_workqueue(callback_wq);
flush_workqueue(clp->cl_callback_wq);
nfsd41_cb_inflight_wait_complete(clp);
}
@ -1420,9 +1407,9 @@ static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
/*
* Note there isn't a lot of locking in this code; instead we depend on
* the fact that it is run from the callback_wq, which won't run two
* work items at once. So, for example, callback_wq handles all access
* of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
* the fact that it is run from clp->cl_callback_wq, which won't run two
* work items at once. So, for example, clp->cl_callback_wq handles all
* access of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
*/
static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
{

View File

@ -1737,7 +1737,7 @@ static void cleanup_async_copy(struct nfsd4_copy *copy)
nfs4_put_copy(copy);
}
static void nfsd4_send_cb_offload(struct nfsd4_copy *copy, __be32 nfserr)
static void nfsd4_send_cb_offload(struct nfsd4_copy *copy)
{
struct nfsd4_cb_offload *cbo;
@ -1747,12 +1747,12 @@ static void nfsd4_send_cb_offload(struct nfsd4_copy *copy, __be32 nfserr)
memcpy(&cbo->co_res, &copy->cp_res, sizeof(copy->cp_res));
memcpy(&cbo->co_fh, &copy->fh, sizeof(copy->fh));
cbo->co_nfserr = nfserr;
cbo->co_nfserr = copy->nfserr;
nfsd4_init_cb(&cbo->co_cb, copy->cp_clp, &nfsd4_cb_offload_ops,
NFSPROC4_CLNT_CB_OFFLOAD);
trace_nfsd_cb_offload(copy->cp_clp, &cbo->co_res.cb_stateid,
&cbo->co_fh, copy->cp_count, nfserr);
&cbo->co_fh, copy->cp_count, copy->nfserr);
nfsd4_run_cb(&cbo->co_cb);
}
@ -1766,7 +1766,6 @@ static void nfsd4_send_cb_offload(struct nfsd4_copy *copy, __be32 nfserr)
static int nfsd4_do_async_copy(void *data)
{
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
__be32 nfserr;
trace_nfsd_copy_do_async(copy);
if (nfsd4_ssc_is_inter(copy)) {
@ -1777,24 +1776,25 @@ static int nfsd4_do_async_copy(void *data)
if (IS_ERR(filp)) {
switch (PTR_ERR(filp)) {
case -EBADF:
nfserr = nfserr_wrong_type;
copy->nfserr = nfserr_wrong_type;
break;
default:
nfserr = nfserr_offload_denied;
copy->nfserr = nfserr_offload_denied;
}
/* ss_mnt will be unmounted by the laundromat */
goto do_callback;
}
nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
false);
copy->nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
false);
nfsd4_cleanup_inter_ssc(copy->ss_nsui, filp, copy->nf_dst);
} else {
nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file,
copy->nf_dst->nf_file, false);
copy->nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file,
copy->nf_dst->nf_file, false);
}
do_callback:
nfsd4_send_cb_offload(copy, nfserr);
set_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags);
nfsd4_send_cb_offload(copy);
cleanup_async_copy(copy);
return 0;
}
@ -1807,6 +1807,13 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd4_copy *async_copy = NULL;
/*
* Currently, async COPY is not reliable. Force all COPY
* requests to be synchronous to avoid client application
* hangs waiting for COPY completion.
*/
nfsd4_copy_set_sync(copy, true);
copy->cp_clp = cstate->clp;
if (nfsd4_ssc_is_inter(copy)) {
trace_nfsd_copy_inter(copy);
@ -2003,11 +2010,16 @@ nfsd4_offload_status(struct svc_rqst *rqstp,
struct nfsd4_copy *copy;
struct nfs4_client *clp = cstate->clp;
os->completed = false;
spin_lock(&clp->async_lock);
copy = find_async_copy_locked(clp, &os->stateid);
if (copy)
if (copy) {
os->count = copy->cp_res.wr_bytes_written;
else
if (test_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags)) {
os->completed = true;
os->status = copy->nfserr;
}
} else
status = nfserr_bad_stateid;
spin_unlock(&clp->async_lock);
@ -2154,6 +2166,29 @@ nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return status == nfserr_same ? nfs_ok : status;
}
static __be32
nfsd4_get_dir_delegation(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation;
/*
* RFC 8881, section 18.39.3 says:
*
* "The server may refuse to grant the delegation. In that case, the
* server will return NFS4ERR_DIRDELEG_UNAVAIL."
*
* This is sub-optimal, since it means that the server would need to
* abort compound processing just because the delegation wasn't
* available. RFC8881bis should change this to allow the server to
* return NFS4_OK with a non-fatal status of GDD4_UNAVAIL in this
* situation.
*/
gdd->gddrnf_status = GDD4_UNAVAIL;
return nfs_ok;
}
#ifdef CONFIG_NFSD_PNFS
static const struct nfsd4_layout_ops *
nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
@ -3082,6 +3117,18 @@ static u32 nfsd4_copy_notify_rsize(const struct svc_rqst *rqstp,
* sizeof(__be32);
}
static u32 nfsd4_get_dir_delegation_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* gddr_status */ +
op_encode_verifier_maxsz +
op_encode_stateid_maxsz +
2 /* gddr_notification */ +
2 /* gddr_child_attributes */ +
2 /* gddr_dir_attributes */);
}
#ifdef CONFIG_NFSD_PNFS
static u32 nfsd4_getdeviceinfo_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
@ -3470,6 +3517,12 @@ static const struct nfsd4_operation nfsd4_ops[] = {
.op_get_currentstateid = nfsd4_get_freestateid,
.op_rsize_bop = nfsd4_only_status_rsize,
},
[OP_GET_DIR_DELEGATION] = {
.op_func = nfsd4_get_dir_delegation,
.op_flags = OP_MODIFIES_SOMETHING,
.op_name = "OP_GET_DIR_DELEGATION",
.op_rsize_bop = nfsd4_get_dir_delegation_rsize,
},
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = {
.op_func = nfsd4_getdeviceinfo,

View File

@ -541,7 +541,7 @@ same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
}
static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
struct nfs4_client *clp)
{
struct nfs4_stateowner *so;
@ -558,18 +558,6 @@ find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
return NULL;
}
static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
struct nfs4_client *clp)
{
struct nfs4_openowner *oo;
spin_lock(&clp->cl_lock);
oo = find_openstateowner_str_locked(hashval, open, clp);
spin_unlock(&clp->cl_lock);
return oo;
}
static inline u32
opaque_hashval(const void *ptr, int nbytes)
{
@ -1409,11 +1397,16 @@ static void
recalculate_deny_mode(struct nfs4_file *fp)
{
struct nfs4_ol_stateid *stp;
u32 old_deny;
spin_lock(&fp->fi_lock);
old_deny = fp->fi_share_deny;
fp->fi_share_deny = 0;
list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
if (fp->fi_share_deny == old_deny)
break;
}
spin_unlock(&fp->fi_lock);
}
@ -2245,6 +2238,10 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name,
GFP_KERNEL);
if (!clp->cl_ownerstr_hashtbl)
goto err_no_hashtbl;
clp->cl_callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
if (!clp->cl_callback_wq)
goto err_no_callback_wq;
for (i = 0; i < OWNER_HASH_SIZE; i++)
INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
INIT_LIST_HEAD(&clp->cl_sessions);
@ -2267,6 +2264,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name,
spin_lock_init(&clp->cl_lock);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
err_no_callback_wq:
kfree(clp->cl_ownerstr_hashtbl);
err_no_hashtbl:
kfree(clp->cl_name.data);
err_no_name:
@ -2280,6 +2279,7 @@ static void __free_client(struct kref *k)
struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
free_svc_cred(&clp->cl_cred);
destroy_workqueue(clp->cl_callback_wq);
kfree(clp->cl_ownerstr_hashtbl);
kfree(clp->cl_name.data);
kfree(clp->cl_nii_domain.data);
@ -2352,7 +2352,11 @@ unhash_client(struct nfs4_client *clp)
static __be32 mark_client_expired_locked(struct nfs4_client *clp)
{
if (atomic_read(&clp->cl_rpc_users))
int users = atomic_read(&clp->cl_rpc_users);
trace_nfsd_mark_client_expired(clp, users);
if (users)
return nfserr_jukebox;
unhash_client_locked(clp);
return nfs_ok;
@ -3641,12 +3645,8 @@ out_nolock:
return status;
}
static __be32
check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse)
{
dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
slot_seqid);
/* The slot is in use, and no response has been sent. */
if (slot_inuse) {
if (seqid == slot_seqid)
@ -3823,10 +3823,13 @@ nfsd4_create_session(struct svc_rqst *rqstp,
}
/* RFC 8881 Section 18.36.4 Phase 2: Sequence ID processing. */
if (conf)
if (conf) {
cs_slot = &conf->cl_cs_slot;
else
trace_nfsd_slot_seqid_conf(conf, cr_ses);
} else {
cs_slot = &unconf->cl_cs_slot;
trace_nfsd_slot_seqid_unconf(unconf, cr_ses);
}
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
switch (status) {
case nfs_ok:
@ -4221,6 +4224,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* sr_highest_slotid and the sr_target_slot id to maxslots */
seq->maxslots = session->se_fchannel.maxreqs;
trace_nfsd_slot_seqid_sequence(clp, seq, slot);
status = check_slot_seqid(seq->seqid, slot->sl_seqid,
slot->sl_flags & NFSD4_SLOT_INUSE);
if (status == nfserr_replay_cache) {
@ -4662,21 +4666,32 @@ nfsd4_init_leases_net(struct nfsd_net *nn)
atomic_set(&nn->nfsd_courtesy_clients, 0);
}
enum rp_lock {
RP_UNLOCKED,
RP_LOCKED,
RP_UNHASHED,
};
static void init_nfs4_replay(struct nfs4_replay *rp)
{
rp->rp_status = nfserr_serverfault;
rp->rp_buflen = 0;
rp->rp_buf = rp->rp_ibuf;
mutex_init(&rp->rp_mutex);
atomic_set(&rp->rp_locked, RP_UNLOCKED);
}
static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
struct nfs4_stateowner *so)
static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
struct nfs4_stateowner *so)
{
if (!nfsd4_has_session(cstate)) {
mutex_lock(&so->so_replay.rp_mutex);
wait_var_event(&so->so_replay.rp_locked,
atomic_cmpxchg(&so->so_replay.rp_locked,
RP_UNLOCKED, RP_LOCKED) != RP_LOCKED);
if (atomic_read(&so->so_replay.rp_locked) == RP_UNHASHED)
return -EAGAIN;
cstate->replay_owner = nfs4_get_stateowner(so);
}
return 0;
}
void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
@ -4685,7 +4700,8 @@ void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
if (so != NULL) {
cstate->replay_owner = NULL;
mutex_unlock(&so->so_replay.rp_mutex);
atomic_set(&so->so_replay.rp_locked, RP_UNLOCKED);
wake_up_var(&so->so_replay.rp_locked);
nfs4_put_stateowner(so);
}
}
@ -4866,34 +4882,46 @@ nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
}
static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
struct nfsd4_compound_state *cstate)
find_or_alloc_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
struct nfsd4_compound_state *cstate)
{
struct nfs4_client *clp = cstate->clp;
struct nfs4_openowner *oo, *ret;
struct nfs4_openowner *oo, *new = NULL;
oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
if (!oo)
return NULL;
oo->oo_owner.so_ops = &openowner_ops;
oo->oo_owner.so_is_open_owner = 1;
oo->oo_owner.so_seqid = open->op_seqid;
oo->oo_flags = 0;
if (nfsd4_has_session(cstate))
oo->oo_flags |= NFS4_OO_CONFIRMED;
oo->oo_time = 0;
oo->oo_last_closed_stid = NULL;
INIT_LIST_HEAD(&oo->oo_close_lru);
retry:
spin_lock(&clp->cl_lock);
ret = find_openstateowner_str_locked(strhashval, open, clp);
if (ret == NULL) {
hash_openowner(oo, clp, strhashval);
ret = oo;
} else
nfs4_free_stateowner(&oo->oo_owner);
oo = find_openstateowner_str(strhashval, open, clp);
if (!oo && new) {
hash_openowner(new, clp, strhashval);
spin_unlock(&clp->cl_lock);
return new;
}
spin_unlock(&clp->cl_lock);
return ret;
if (oo && !(oo->oo_flags & NFS4_OO_CONFIRMED)) {
/* Replace unconfirmed owners without checking for replay. */
release_openowner(oo);
oo = NULL;
}
if (oo) {
if (new)
nfs4_free_stateowner(&new->oo_owner);
return oo;
}
new = alloc_stateowner(openowner_slab, &open->op_owner, clp);
if (!new)
return NULL;
new->oo_owner.so_ops = &openowner_ops;
new->oo_owner.so_is_open_owner = 1;
new->oo_owner.so_seqid = open->op_seqid;
new->oo_flags = 0;
if (nfsd4_has_session(cstate))
new->oo_flags |= NFS4_OO_CONFIRMED;
new->oo_time = 0;
new->oo_last_closed_stid = NULL;
INIT_LIST_HEAD(&new->oo_close_lru);
goto retry;
}
static struct nfs4_ol_stateid *
@ -4969,7 +4997,11 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
* Wait for the refcount to drop to 2. Since it has been unhashed,
* there should be no danger of the refcount going back up again at
* this point.
* Some threads with a reference might be waiting for rp_locked,
* so tell them to stop waiting.
*/
atomic_set(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
wake_up_var(&oo->oo_owner.so_replay.rp_locked);
wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
release_all_access(s);
@ -5342,27 +5374,19 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
clp = cstate->clp;
strhashval = ownerstr_hashval(&open->op_owner);
oo = find_openstateowner_str(strhashval, open, clp);
retry:
oo = find_or_alloc_open_stateowner(strhashval, open, cstate);
open->op_openowner = oo;
if (!oo) {
goto new_owner;
}
if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
/* Replace unconfirmed owners without checking for replay. */
release_openowner(oo);
open->op_openowner = NULL;
goto new_owner;
if (!oo)
return nfserr_jukebox;
if (nfsd4_cstate_assign_replay(cstate, &oo->oo_owner) == -EAGAIN) {
nfs4_put_stateowner(&oo->oo_owner);
goto retry;
}
status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
if (status)
return status;
goto alloc_stateid;
new_owner:
oo = alloc_init_open_stateowner(strhashval, open, cstate);
if (oo == NULL)
return nfserr_jukebox;
open->op_openowner = oo;
alloc_stateid:
open->op_stp = nfs4_alloc_open_stateid(clp);
if (!open->op_stp)
return nfserr_jukebox;
@ -6133,12 +6157,8 @@ out:
void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
struct nfsd4_open *open)
{
if (open->op_openowner) {
struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
nfsd4_cstate_assign_replay(cstate, so);
nfs4_put_stateowner(so);
}
if (open->op_openowner)
nfs4_put_stateowner(&open->op_openowner->oo_owner);
if (open->op_file)
kmem_cache_free(file_slab, open->op_file);
if (open->op_stp)
@ -7202,12 +7222,16 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
trace_nfsd_preprocess(seqid, stateid);
*stpp = NULL;
retry:
status = nfsd4_lookup_stateid(cstate, stateid,
typemask, statusmask, &s, nn);
if (status)
return status;
stp = openlockstateid(s);
nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
if (nfsd4_cstate_assign_replay(cstate, stp->st_stateowner) == -EAGAIN) {
nfs4_put_stateowner(stp->st_stateowner);
goto retry;
}
status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
if (!status)
@ -7349,7 +7373,7 @@ out:
return status;
}
static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
static bool nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
{
struct nfs4_client *clp = s->st_stid.sc_client;
bool unhashed;
@ -7366,11 +7390,11 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
list_for_each_entry(stp, &reaplist, st_locks)
nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
free_ol_stateid_reaplist(&reaplist);
return false;
} else {
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
if (unhashed)
move_to_close_lru(s, clp->net);
return unhashed;
}
}
@ -7386,6 +7410,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfs4_ol_stateid *stp;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
bool need_move_to_close_list;
dprintk("NFSD: nfsd4_close on file %pd\n",
cstate->current_fh.fh_dentry);
@ -7410,8 +7435,10 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
*/
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
nfsd4_close_open_stateid(stp);
need_move_to_close_list = nfsd4_close_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
if (need_move_to_close_list)
move_to_close_lru(stp, net);
/* v4.1+ suggests that we send a special stateid in here, since the
* clients should just ignore this anyway. Since this is not useful
@ -8625,12 +8652,6 @@ nfs4_state_start(void)
if (ret)
return ret;
ret = nfsd4_create_callback_queue();
if (ret) {
rhltable_destroy(&nfs4_file_rhltable);
return ret;
}
set_max_delegations();
return 0;
}
@ -8671,7 +8692,6 @@ nfs4_state_shutdown_net(struct net *net)
void
nfs4_state_shutdown(void)
{
nfsd4_destroy_callback_queue();
rhltable_destroy(&nfs4_file_rhltable);
}

View File

@ -1732,6 +1732,35 @@ nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
return nfsd4_decode_stateid4(argp, &free_stateid->fr_stateid);
}
static __be32
nfsd4_decode_get_dir_delegation(struct nfsd4_compoundargs *argp,
union nfsd4_op_u *u)
{
struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation;
__be32 status;
memset(gdd, 0, sizeof(*gdd));
if (xdr_stream_decode_bool(argp->xdr, &gdd->gdda_signal_deleg_avail) < 0)
return nfserr_bad_xdr;
status = nfsd4_decode_bitmap4(argp, gdd->gdda_notification_types,
ARRAY_SIZE(gdd->gdda_notification_types));
if (status)
return status;
status = nfsd4_decode_nfstime4(argp, &gdd->gdda_child_attr_delay);
if (status)
return status;
status = nfsd4_decode_nfstime4(argp, &gdd->gdda_dir_attr_delay);
if (status)
return status;
status = nfsd4_decode_bitmap4(argp, gdd->gdda_child_attributes,
ARRAY_SIZE(gdd->gdda_child_attributes));
if (status)
return status;
return nfsd4_decode_bitmap4(argp, gdd->gdda_dir_attributes,
ARRAY_SIZE(gdd->gdda_dir_attributes));
}
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
@ -2370,7 +2399,7 @@ static const nfsd4_dec nfsd4_dec_ops[] = {
[OP_CREATE_SESSION] = nfsd4_decode_create_session,
[OP_DESTROY_SESSION] = nfsd4_decode_destroy_session,
[OP_FREE_STATEID] = nfsd4_decode_free_stateid,
[OP_GET_DIR_DELEGATION] = nfsd4_decode_notsupp,
[OP_GET_DIR_DELEGATION] = nfsd4_decode_get_dir_delegation,
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = nfsd4_decode_getdeviceinfo,
[OP_GETDEVICELIST] = nfsd4_decode_notsupp,
@ -4963,6 +4992,49 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfs_ok;
}
static __be32
nfsd4_encode_get_dir_delegation(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation;
struct xdr_stream *xdr = resp->xdr;
__be32 status = nfserr_resource;
switch(gdd->gddrnf_status) {
case GDD4_OK:
if (xdr_stream_encode_u32(xdr, GDD4_OK) != XDR_UNIT)
break;
status = nfsd4_encode_verifier4(xdr, &gdd->gddr_cookieverf);
if (status)
break;
status = nfsd4_encode_stateid4(xdr, &gdd->gddr_stateid);
if (status)
break;
status = nfsd4_encode_bitmap4(xdr, gdd->gddr_notification[0], 0, 0);
if (status)
break;
status = nfsd4_encode_bitmap4(xdr, gdd->gddr_child_attributes[0],
gdd->gddr_child_attributes[1],
gdd->gddr_child_attributes[2]);
if (status)
break;
status = nfsd4_encode_bitmap4(xdr, gdd->gddr_dir_attributes[0],
gdd->gddr_dir_attributes[1],
gdd->gddr_dir_attributes[2]);
break;
default:
pr_warn("nfsd: bad gddrnf_status (%u)\n", gdd->gddrnf_status);
gdd->gddrnf_will_signal_deleg_avail = 0;
fallthrough;
case GDD4_UNAVAIL:
if (xdr_stream_encode_u32(xdr, GDD4_UNAVAIL) != XDR_UNIT)
break;
status = nfsd4_encode_bool(xdr, gdd->gddrnf_will_signal_deleg_avail);
break;
}
return status;
}
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_encode_device_addr4(struct xdr_stream *xdr,
@ -5199,7 +5271,12 @@ nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
if (nfserr != nfs_ok)
return nfserr;
/* osr_complete<1> */
if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
if (os->completed) {
if (xdr_stream_encode_u32(xdr, 1) != XDR_UNIT)
return nfserr_resource;
if (xdr_stream_encode_be32(xdr, os->status) != XDR_UNIT)
return nfserr_resource;
} else if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
return nfserr_resource;
return nfs_ok;
}
@ -5579,7 +5656,7 @@ static const nfsd4_enc nfsd4_enc_ops[] = {
[OP_CREATE_SESSION] = nfsd4_encode_create_session,
[OP_DESTROY_SESSION] = nfsd4_encode_noop,
[OP_FREE_STATEID] = nfsd4_encode_noop,
[OP_GET_DIR_DELEGATION] = nfsd4_encode_noop,
[OP_GET_DIR_DELEGATION] = nfsd4_encode_get_dir_delegation,
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = nfsd4_encode_getdeviceinfo,
[OP_GETDEVICELIST] = nfsd4_encode_noop,

View File

@ -15,6 +15,7 @@
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/svc.h>
#include <linux/module.h>
#include <linux/fsnotify.h>
@ -48,12 +49,10 @@ enum {
NFSD_MaxBlkSize,
NFSD_MaxConnections,
NFSD_Filecache,
#ifdef CONFIG_NFSD_V4
NFSD_Leasetime,
NFSD_Gracetime,
NFSD_RecoveryDir,
NFSD_V4EndGrace,
#endif
NFSD_MaxReserved
};
@ -406,7 +405,9 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
if (newthreads < 0)
return -EINVAL;
trace_nfsd_ctl_threads(net, newthreads);
rv = nfsd_svc(newthreads, net, file->f_cred);
mutex_lock(&nfsd_mutex);
rv = nfsd_svc(newthreads, net, file->f_cred, NULL);
mutex_unlock(&nfsd_mutex);
if (rv < 0)
return rv;
} else
@ -1360,7 +1361,9 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
#endif
[NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO},
#endif
/* last one */ {""}
@ -1651,6 +1654,518 @@ int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb)
return 0;
}
/**
* nfsd_nl_threads_set_doit - set the number of running threads
* @skb: reply buffer
* @info: netlink metadata and command arguments
*
* Return 0 on success or a negative errno.
*/
int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
{
int nthreads = 0, count = 0, nrpools, ret = -EOPNOTSUPP, rem;
struct net *net = genl_info_net(info);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
const struct nlattr *attr;
const char *scope = NULL;
if (GENL_REQ_ATTR_CHECK(info, NFSD_A_SERVER_THREADS))
return -EINVAL;
/* count number of SERVER_THREADS values */
nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
if (nla_type(attr) == NFSD_A_SERVER_THREADS)
count++;
}
mutex_lock(&nfsd_mutex);
nrpools = nfsd_nrpools(net);
if (nrpools && count > nrpools)
count = nrpools;
/* XXX: make this handle non-global pool-modes */
if (count > 1)
goto out_unlock;
nthreads = nla_get_u32(info->attrs[NFSD_A_SERVER_THREADS]);
if (info->attrs[NFSD_A_SERVER_GRACETIME] ||
info->attrs[NFSD_A_SERVER_LEASETIME] ||
info->attrs[NFSD_A_SERVER_SCOPE]) {
ret = -EBUSY;
if (nn->nfsd_serv && nn->nfsd_serv->sv_nrthreads)
goto out_unlock;
ret = -EINVAL;
attr = info->attrs[NFSD_A_SERVER_GRACETIME];
if (attr) {
u32 gracetime = nla_get_u32(attr);
if (gracetime < 10 || gracetime > 3600)
goto out_unlock;
nn->nfsd4_grace = gracetime;
}
attr = info->attrs[NFSD_A_SERVER_LEASETIME];
if (attr) {
u32 leasetime = nla_get_u32(attr);
if (leasetime < 10 || leasetime > 3600)
goto out_unlock;
nn->nfsd4_lease = leasetime;
}
attr = info->attrs[NFSD_A_SERVER_SCOPE];
if (attr)
scope = nla_data(attr);
}
ret = nfsd_svc(nthreads, net, get_current_cred(), scope);
out_unlock:
mutex_unlock(&nfsd_mutex);
return ret == nthreads ? 0 : ret;
}
/**
* nfsd_nl_threads_get_doit - get the number of running threads
* @skb: reply buffer
* @info: netlink metadata and command arguments
*
* Return 0 on success or a negative errno.
*/
int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
void *hdr;
int err;
skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = genlmsg_iput(skb, info);
if (!hdr) {
err = -EMSGSIZE;
goto err_free_msg;
}
mutex_lock(&nfsd_mutex);
err = nla_put_u32(skb, NFSD_A_SERVER_GRACETIME,
nn->nfsd4_grace) ||
nla_put_u32(skb, NFSD_A_SERVER_LEASETIME,
nn->nfsd4_lease) ||
nla_put_string(skb, NFSD_A_SERVER_SCOPE,
nn->nfsd_name);
if (err)
goto err_unlock;
if (nn->nfsd_serv) {
int i;
for (i = 0; i < nfsd_nrpools(net); ++i) {
struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i];
err = nla_put_u32(skb, NFSD_A_SERVER_THREADS,
atomic_read(&sp->sp_nrthreads));
if (err)
goto err_unlock;
}
} else {
err = nla_put_u32(skb, NFSD_A_SERVER_THREADS, 0);
if (err)
goto err_unlock;
}
mutex_unlock(&nfsd_mutex);
genlmsg_end(skb, hdr);
return genlmsg_reply(skb, info);
err_unlock:
mutex_unlock(&nfsd_mutex);
err_free_msg:
nlmsg_free(skb);
return err;
}
/**
* nfsd_nl_version_set_doit - set the nfs enabled versions
* @skb: reply buffer
* @info: netlink metadata and command arguments
*
* Return 0 on success or a negative errno.
*/
int nfsd_nl_version_set_doit(struct sk_buff *skb, struct genl_info *info)
{
const struct nlattr *attr;
struct nfsd_net *nn;
int i, rem;
if (GENL_REQ_ATTR_CHECK(info, NFSD_A_SERVER_PROTO_VERSION))
return -EINVAL;
mutex_lock(&nfsd_mutex);
nn = net_generic(genl_info_net(info), nfsd_net_id);
if (nn->nfsd_serv) {
mutex_unlock(&nfsd_mutex);
return -EBUSY;
}
/* clear current supported versions. */
nfsd_vers(nn, 2, NFSD_CLEAR);
nfsd_vers(nn, 3, NFSD_CLEAR);
for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
nfsd_minorversion(nn, i, NFSD_CLEAR);
nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
struct nlattr *tb[NFSD_A_VERSION_MAX + 1];
u32 major, minor = 0;
bool enabled;
if (nla_type(attr) != NFSD_A_SERVER_PROTO_VERSION)
continue;
if (nla_parse_nested(tb, NFSD_A_VERSION_MAX, attr,
nfsd_version_nl_policy, info->extack) < 0)
continue;
if (!tb[NFSD_A_VERSION_MAJOR])
continue;
major = nla_get_u32(tb[NFSD_A_VERSION_MAJOR]);
if (tb[NFSD_A_VERSION_MINOR])
minor = nla_get_u32(tb[NFSD_A_VERSION_MINOR]);
enabled = nla_get_flag(tb[NFSD_A_VERSION_ENABLED]);
switch (major) {
case 4:
nfsd_minorversion(nn, minor, enabled ? NFSD_SET : NFSD_CLEAR);
break;
case 3:
case 2:
if (!minor)
nfsd_vers(nn, major, enabled ? NFSD_SET : NFSD_CLEAR);
break;
default:
break;
}
}
mutex_unlock(&nfsd_mutex);
return 0;
}
/**
* nfsd_nl_version_get_doit - get the enabled status for all supported nfs versions
* @skb: reply buffer
* @info: netlink metadata and command arguments
*
* Return 0 on success or a negative errno.
*/
int nfsd_nl_version_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nfsd_net *nn;
int i, err;
void *hdr;
skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = genlmsg_iput(skb, info);
if (!hdr) {
err = -EMSGSIZE;
goto err_free_msg;
}
mutex_lock(&nfsd_mutex);
nn = net_generic(genl_info_net(info), nfsd_net_id);
for (i = 2; i <= 4; i++) {
int j;
for (j = 0; j <= NFSD_SUPPORTED_MINOR_VERSION; j++) {
struct nlattr *attr;
/* Don't record any versions the kernel doesn't have
* compiled in
*/
if (!nfsd_support_version(i))
continue;
/* NFSv{2,3} does not support minor numbers */
if (i < 4 && j)
continue;
attr = nla_nest_start(skb,
NFSD_A_SERVER_PROTO_VERSION);
if (!attr) {
err = -EINVAL;
goto err_nfsd_unlock;
}
if (nla_put_u32(skb, NFSD_A_VERSION_MAJOR, i) ||
nla_put_u32(skb, NFSD_A_VERSION_MINOR, j)) {
err = -EINVAL;
goto err_nfsd_unlock;
}
/* Set the enabled flag if the version is enabled */
if (nfsd_vers(nn, i, NFSD_TEST) &&
(i < 4 || nfsd_minorversion(nn, j, NFSD_TEST)) &&
nla_put_flag(skb, NFSD_A_VERSION_ENABLED)) {
err = -EINVAL;
goto err_nfsd_unlock;
}
nla_nest_end(skb, attr);
}
}
mutex_unlock(&nfsd_mutex);
genlmsg_end(skb, hdr);
return genlmsg_reply(skb, info);
err_nfsd_unlock:
mutex_unlock(&nfsd_mutex);
err_free_msg:
nlmsg_free(skb);
return err;
}
/**
* nfsd_nl_listener_set_doit - set the nfs running sockets
* @skb: reply buffer
* @info: netlink metadata and command arguments
*
* Return 0 on success or a negative errno.
*/
int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
struct svc_xprt *xprt, *tmp;
const struct nlattr *attr;
struct svc_serv *serv;
LIST_HEAD(permsocks);
struct nfsd_net *nn;
int err, rem;
mutex_lock(&nfsd_mutex);
err = nfsd_create_serv(net);
if (err) {
mutex_unlock(&nfsd_mutex);
return err;
}
nn = net_generic(net, nfsd_net_id);
serv = nn->nfsd_serv;
spin_lock_bh(&serv->sv_lock);
/* Move all of the old listener sockets to a temp list */
list_splice_init(&serv->sv_permsocks, &permsocks);
/*
* Walk the list of server_socks from userland and move any that match
* back to sv_permsocks
*/
nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
struct nlattr *tb[NFSD_A_SOCK_MAX + 1];
const char *xcl_name;
struct sockaddr *sa;
if (nla_type(attr) != NFSD_A_SERVER_SOCK_ADDR)
continue;
if (nla_parse_nested(tb, NFSD_A_SOCK_MAX, attr,
nfsd_sock_nl_policy, info->extack) < 0)
continue;
if (!tb[NFSD_A_SOCK_ADDR] || !tb[NFSD_A_SOCK_TRANSPORT_NAME])
continue;
if (nla_len(tb[NFSD_A_SOCK_ADDR]) < sizeof(*sa))
continue;
xcl_name = nla_data(tb[NFSD_A_SOCK_TRANSPORT_NAME]);
sa = nla_data(tb[NFSD_A_SOCK_ADDR]);
/* Put back any matching sockets */
list_for_each_entry_safe(xprt, tmp, &permsocks, xpt_list) {
/* This shouldn't be possible */
if (WARN_ON_ONCE(xprt->xpt_net != net)) {
list_move(&xprt->xpt_list, &serv->sv_permsocks);
continue;
}
/* If everything matches, put it back */
if (!strcmp(xprt->xpt_class->xcl_name, xcl_name) &&
rpc_cmp_addr_port(sa, (struct sockaddr *)&xprt->xpt_local)) {
list_move(&xprt->xpt_list, &serv->sv_permsocks);
break;
}
}
}
/* For now, no removing old sockets while server is running */
if (serv->sv_nrthreads && !list_empty(&permsocks)) {
list_splice_init(&permsocks, &serv->sv_permsocks);
spin_unlock_bh(&serv->sv_lock);
err = -EBUSY;
goto out_unlock_mtx;
}
/* Close the remaining sockets on the permsocks list */
while (!list_empty(&permsocks)) {
xprt = list_first_entry(&permsocks, struct svc_xprt, xpt_list);
list_move(&xprt->xpt_list, &serv->sv_permsocks);
/*
* Newly-created sockets are born with the BUSY bit set. Clear
* it if there are no threads, since nothing can pick it up
* in that case.
*/
if (!serv->sv_nrthreads)
clear_bit(XPT_BUSY, &xprt->xpt_flags);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
spin_unlock_bh(&serv->sv_lock);
svc_xprt_close(xprt);
spin_lock_bh(&serv->sv_lock);
}
spin_unlock_bh(&serv->sv_lock);
/* walk list of addrs again, open any that still don't exist */
nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
struct nlattr *tb[NFSD_A_SOCK_MAX + 1];
const char *xcl_name;
struct sockaddr *sa;
int ret;
if (nla_type(attr) != NFSD_A_SERVER_SOCK_ADDR)
continue;
if (nla_parse_nested(tb, NFSD_A_SOCK_MAX, attr,
nfsd_sock_nl_policy, info->extack) < 0)
continue;
if (!tb[NFSD_A_SOCK_ADDR] || !tb[NFSD_A_SOCK_TRANSPORT_NAME])
continue;
if (nla_len(tb[NFSD_A_SOCK_ADDR]) < sizeof(*sa))
continue;
xcl_name = nla_data(tb[NFSD_A_SOCK_TRANSPORT_NAME]);
sa = nla_data(tb[NFSD_A_SOCK_ADDR]);
xprt = svc_find_listener(serv, xcl_name, net, sa);
if (xprt) {
svc_xprt_put(xprt);
continue;
}
ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa,
SVC_SOCK_ANONYMOUS,
get_current_cred());
/* always save the latest error */
if (ret < 0)
err = ret;
}
if (!serv->sv_nrthreads && list_empty(&nn->nfsd_serv->sv_permsocks))
nfsd_destroy_serv(net);
out_unlock_mtx:
mutex_unlock(&nfsd_mutex);
return err;
}
/**
* nfsd_nl_listener_get_doit - get the nfs running listeners
* @skb: reply buffer
* @info: netlink metadata and command arguments
*
* Return 0 on success or a negative errno.
*/
int nfsd_nl_listener_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct svc_xprt *xprt;
struct svc_serv *serv;
struct nfsd_net *nn;
void *hdr;
int err;
skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = genlmsg_iput(skb, info);
if (!hdr) {
err = -EMSGSIZE;
goto err_free_msg;
}
mutex_lock(&nfsd_mutex);
nn = net_generic(genl_info_net(info), nfsd_net_id);
/* no nfs server? Just send empty socket list */
if (!nn->nfsd_serv)
goto out_unlock_mtx;
serv = nn->nfsd_serv;
spin_lock_bh(&serv->sv_lock);
list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
struct nlattr *attr;
attr = nla_nest_start(skb, NFSD_A_SERVER_SOCK_ADDR);
if (!attr) {
err = -EINVAL;
goto err_serv_unlock;
}
if (nla_put_string(skb, NFSD_A_SOCK_TRANSPORT_NAME,
xprt->xpt_class->xcl_name) ||
nla_put(skb, NFSD_A_SOCK_ADDR,
sizeof(struct sockaddr_storage),
&xprt->xpt_local)) {
err = -EINVAL;
goto err_serv_unlock;
}
nla_nest_end(skb, attr);
}
spin_unlock_bh(&serv->sv_lock);
out_unlock_mtx:
mutex_unlock(&nfsd_mutex);
genlmsg_end(skb, hdr);
return genlmsg_reply(skb, info);
err_serv_unlock:
spin_unlock_bh(&serv->sv_lock);
mutex_unlock(&nfsd_mutex);
err_free_msg:
nlmsg_free(skb);
return err;
}
/**
* nfsd_net_init - Prepare the nfsd_net portion of a new net namespace
* @net: a freshly-created network namespace
@ -1672,7 +2187,8 @@ static __net_init int nfsd_net_init(struct net *net)
retval = nfsd_idmap_init(net);
if (retval)
goto out_idmap_error;
retval = nfsd_stat_counters_init(nn);
retval = percpu_counter_init_many(nn->counter, 0, GFP_KERNEL,
NFSD_STATS_COUNTERS_NUM);
if (retval)
goto out_repcache_error;
memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
@ -1704,7 +2220,7 @@ static __net_exit void nfsd_net_exit(struct net *net)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nfsd_proc_stat_shutdown(net);
nfsd_stat_counters_destroy(nn);
percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
nfsd_idmap_shutdown(net);
nfsd_export_shutdown(net);
nfsd_netns_free_versions(nn);

View File

@ -103,7 +103,7 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp,
/*
* Function prototypes.
*/
int nfsd_svc(int nrservs, struct net *net, const struct cred *cred);
int nfsd_svc(int nrservs, struct net *net, const struct cred *cred, const char *scope);
int nfsd_dispatch(struct svc_rqst *rqstp);
int nfsd_nrthreads(struct net *);
@ -230,7 +230,6 @@ void nfsd_lockd_shutdown(void);
#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC)
#define nfserr_rofs cpu_to_be32(NFSERR_ROFS)
#define nfserr_mlink cpu_to_be32(NFSERR_MLINK)
#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP)
#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG)
#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY)
#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT)

View File

@ -573,7 +573,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
_fh_update(fhp, exp, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) {
fh_put(fhp);
return nfserr_opnotsupp;
return nfserr_stale;
}
return 0;
@ -599,7 +599,7 @@ fh_update(struct svc_fh *fhp)
_fh_update(fhp, fhp->fh_export, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
return nfserr_opnotsupp;
return nfserr_stale;
return 0;
out_bad:
printk(KERN_ERR "fh_update: fh not verified!\n");

View File

@ -133,8 +133,7 @@ struct svc_program nfsd_program = {
.pg_rpcbind_set = nfsd_rpcbind_set,
};
static bool
nfsd_support_version(int vers)
bool nfsd_support_version(int vers)
{
if (vers >= NFSD_MINVERS && vers < NFSD_NRVERS)
return nfsd_version[vers] != NULL;
@ -769,13 +768,14 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
* this is the first time nrservs is nonzero.
*/
int
nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
nfsd_svc(int nrservs, struct net *net, const struct cred *cred, const char *scope)
{
int error;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv;
mutex_lock(&nfsd_mutex);
lockdep_assert_held(&nfsd_mutex);
dprintk("nfsd: creating service\n");
nrservs = max(nrservs, 0);
@ -785,7 +785,7 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
if (nrservs == 0 && nn->nfsd_serv == NULL)
goto out;
strscpy(nn->nfsd_name, utsname()->nodename,
strscpy(nn->nfsd_name, scope ? scope : utsname()->nodename,
sizeof(nn->nfsd_name));
error = nfsd_create_serv(net);
@ -804,7 +804,6 @@ out_put:
if (serv->sv_nrthreads == 0)
nfsd_destroy_serv(net);
out:
mutex_unlock(&nfsd_mutex);
return error;
}

View File

@ -408,6 +408,8 @@ struct nfs4_client {
1 << NFSD4_CLIENT_CB_KILL)
#define NFSD4_CLIENT_CB_RECALL_ANY (6)
unsigned long cl_flags;
struct workqueue_struct *cl_callback_wq;
const struct cred *cl_cb_cred;
struct rpc_clnt *cl_cb_client;
u32 cl_cb_ident;
@ -486,7 +488,7 @@ struct nfs4_replay {
unsigned int rp_buflen;
char *rp_buf;
struct knfsd_fh rp_openfh;
struct mutex rp_mutex;
atomic_t rp_locked;
char rp_ibuf[NFSD4_REPLAY_ISIZE];
};
@ -735,8 +737,6 @@ extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *
extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
extern int nfsd4_create_callback_queue(void);
extern void nfsd4_destroy_callback_queue(void);
extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfsd4_shutdown_copy(struct nfs4_client *clp);
extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(struct xdr_netobj name,

View File

@ -73,48 +73,6 @@ static int nfsd_show(struct seq_file *seq, void *v)
DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
int nfsd_percpu_counters_init(struct percpu_counter *counters, int num)
{
int i, err = 0;
for (i = 0; !err && i < num; i++)
err = percpu_counter_init(&counters[i], 0, GFP_KERNEL);
if (!err)
return 0;
for (; i > 0; i--)
percpu_counter_destroy(&counters[i-1]);
return err;
}
void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num)
{
int i;
for (i = 0; i < num; i++)
percpu_counter_set(&counters[i], 0);
}
void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num)
{
int i;
for (i = 0; i < num; i++)
percpu_counter_destroy(&counters[i]);
}
int nfsd_stat_counters_init(struct nfsd_net *nn)
{
return nfsd_percpu_counters_init(nn->counter, NFSD_STATS_COUNTERS_NUM);
}
void nfsd_stat_counters_destroy(struct nfsd_net *nn)
{
nfsd_percpu_counters_destroy(nn->counter, NFSD_STATS_COUNTERS_NUM);
}
void nfsd_proc_stat_init(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);

View File

@ -10,11 +10,6 @@
#include <uapi/linux/nfsd/stats.h>
#include <linux/percpu_counter.h>
int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
int nfsd_stat_counters_init(struct nfsd_net *nn);
void nfsd_stat_counters_destroy(struct nfsd_net *nn);
void nfsd_proc_stat_init(struct net *net);
void nfsd_proc_stat_shutdown(struct net *net);

View File

@ -749,6 +749,76 @@ TRACE_EVENT_CONDITION(nfsd_seq4_status,
)
);
DECLARE_EVENT_CLASS(nfsd_cs_slot_class,
TP_PROTO(
const struct nfs4_client *clp,
const struct nfsd4_create_session *cs
),
TP_ARGS(clp, cs),
TP_STRUCT__entry(
__field(u32, seqid)
__field(u32, slot_seqid)
__field(u32, cl_boot)
__field(u32, cl_id)
__sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
),
TP_fast_assign(
const struct nfsd4_clid_slot *slot = &clp->cl_cs_slot;
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
__assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
clp->cl_cb_conn.cb_addrlen);
__entry->seqid = cs->seqid;
__entry->slot_seqid = slot->sl_seqid;
),
TP_printk("addr=%pISpc client %08x:%08x seqid=%u slot_seqid=%u",
__get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
__entry->seqid, __entry->slot_seqid
)
);
#define DEFINE_CS_SLOT_EVENT(name) \
DEFINE_EVENT(nfsd_cs_slot_class, nfsd_##name, \
TP_PROTO( \
const struct nfs4_client *clp, \
const struct nfsd4_create_session *cs \
), \
TP_ARGS(clp, cs))
DEFINE_CS_SLOT_EVENT(slot_seqid_conf);
DEFINE_CS_SLOT_EVENT(slot_seqid_unconf);
TRACE_EVENT(nfsd_slot_seqid_sequence,
TP_PROTO(
const struct nfs4_client *clp,
const struct nfsd4_sequence *seq,
const struct nfsd4_slot *slot
),
TP_ARGS(clp, seq, slot),
TP_STRUCT__entry(
__field(u32, seqid)
__field(u32, slot_seqid)
__field(u32, cl_boot)
__field(u32, cl_id)
__sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
__field(bool, in_use)
),
TP_fast_assign(
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
__assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
clp->cl_cb_conn.cb_addrlen);
__entry->seqid = seq->seqid;
__entry->slot_seqid = slot->sl_seqid;
),
TP_printk("addr=%pISpc client %08x:%08x seqid=%u slot_seqid=%u (%sin use)",
__get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
__entry->seqid, __entry->slot_seqid,
__entry->in_use ? "" : "not "
)
);
DECLARE_EVENT_CLASS(nfsd_clientid_class,
TP_PROTO(const clientid_t *clid),
TP_ARGS(clid),
@ -778,6 +848,30 @@ DEFINE_CLIENTID_EVENT(purged);
DEFINE_CLIENTID_EVENT(renew);
DEFINE_CLIENTID_EVENT(stale);
TRACE_EVENT(nfsd_mark_client_expired,
TP_PROTO(
const struct nfs4_client *clp,
int cl_rpc_users
),
TP_ARGS(clp, cl_rpc_users),
TP_STRUCT__entry(
__field(int, cl_rpc_users)
__field(u32, cl_boot)
__field(u32, cl_id)
__sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
),
TP_fast_assign(
__entry->cl_rpc_users = cl_rpc_users;
__entry->cl_boot = clp->cl_clientid.cl_boot;
__entry->cl_id = clp->cl_clientid.cl_id;
__assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
clp->cl_cb_conn.cb_addrlen)
),
TP_printk("addr=%pISpc client %08x:%08x cl_rpc_users=%d",
__get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
__entry->cl_rpc_users)
);
DECLARE_EVENT_CLASS(nfsd_net_class,
TP_PROTO(const struct nfsd_net *nn),
TP_ARGS(nn),
@ -1534,7 +1628,7 @@ TRACE_EVENT(nfsd_cb_seq_status,
__entry->seq_status = cb->cb_seq_status;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
" sessionid=%08x:%08x:%08x:%08x tk_status=%d seq_status=%d\n",
" sessionid=%08x:%08x:%08x:%08x tk_status=%d seq_status=%d",
__entry->task_id, __entry->client_id,
__entry->cl_boot, __entry->cl_id,
__entry->seqno, __entry->reserved,
@ -1573,7 +1667,7 @@ TRACE_EVENT(nfsd_cb_free_slot,
__entry->slot_seqno = session->se_cb_seq_nr;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
" sessionid=%08x:%08x:%08x:%08x new slot seqno=%u\n",
" sessionid=%08x:%08x:%08x:%08x new slot seqno=%u",
__entry->task_id, __entry->client_id,
__entry->cl_boot, __entry->cl_id,
__entry->seqno, __entry->reserved,
@ -1978,7 +2072,7 @@ TRACE_EVENT(nfsd_ctl_time,
__entry->time = time;
__assign_str(name, name);
),
TP_printk("file=%s time=%d\n",
TP_printk("file=%s time=%d",
__get_str(name), __entry->time
)
);

View File

@ -1422,7 +1422,7 @@ nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
* Callers expect new file metadata to be committed even
* if the attributes have not changed.
*/
if (iap->ia_valid)
if (nfsd_attrs_valid(attrs))
status = nfsd_setattr(rqstp, resfhp, attrs, NULL);
else
status = nfserrno(commit_metadata(resfhp));

View File

@ -60,6 +60,14 @@ static inline void nfsd_attrs_free(struct nfsd_attrs *attrs)
posix_acl_release(attrs->na_dpacl);
}
static inline bool nfsd_attrs_valid(struct nfsd_attrs *attrs)
{
struct iattr *iap = attrs->na_iattr;
return (iap->ia_valid || (attrs->na_seclabel &&
attrs->na_seclabel->len));
}
__be32 nfserrno (int errno);
int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
struct svc_export **expp);

View File

@ -518,6 +518,24 @@ struct nfsd4_free_stateid {
stateid_t fr_stateid; /* request */
};
struct nfsd4_get_dir_delegation {
/* request */
u32 gdda_signal_deleg_avail;
u32 gdda_notification_types[1];
struct timespec64 gdda_child_attr_delay;
struct timespec64 gdda_dir_attr_delay;
u32 gdda_child_attributes[3];
u32 gdda_dir_attributes[3];
/* response */
u32 gddrnf_status;
nfs4_verifier gddr_cookieverf;
stateid_t gddr_stateid;
u32 gddr_notification[1];
u32 gddr_child_attributes[3];
u32 gddr_dir_attributes[3];
bool gddrnf_will_signal_deleg_avail;
};
/* also used for NVERIFY */
struct nfsd4_verify {
u32 ve_bmval[3]; /* request */
@ -674,8 +692,10 @@ struct nfsd4_copy {
#define NFSD4_COPY_F_INTRA (1)
#define NFSD4_COPY_F_SYNCHRONOUS (2)
#define NFSD4_COPY_F_COMMITTED (3)
#define NFSD4_COPY_F_COMPLETED (4)
/* response */
__be32 nfserr;
struct nfsd42_write_res cp_res;
struct knfsd_fh fh;
@ -735,7 +755,8 @@ struct nfsd4_offload_status {
/* response */
u64 count;
u32 status;
__be32 status;
bool completed;
};
struct nfsd4_copy_notify {
@ -797,6 +818,7 @@ struct nfsd4_op {
struct nfsd4_reclaim_complete reclaim_complete;
struct nfsd4_test_stateid test_stateid;
struct nfsd4_free_stateid free_stateid;
struct nfsd4_get_dir_delegation get_dir_delegation;
struct nfsd4_getdeviceinfo getdeviceinfo;
struct nfsd4_layoutget layoutget;
struct nfsd4_layoutcommit layoutcommit;

View File

@ -701,6 +701,12 @@ enum state_protect_how4 {
SP4_SSV = 2
};
/* GET_DIR_DELEGATION non-fatal status codes */
enum gddrnf4_status {
GDD4_OK = 0,
GDD4_UNAVAIL = 1
};
enum pnfs_layouttype {
LAYOUT_NFSV4_1_FILES = 1,
LAYOUT_OSD2_OBJECTS = 2,

View File

@ -135,6 +135,9 @@ int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
int svc_xprt_create_from_sa(struct svc_serv *serv, const char *xprt_name,
struct net *net, struct sockaddr *sap,
int flags, const struct cred *cred);
int svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
struct net *net, const int family,
const unsigned short port, int flags,
@ -147,6 +150,8 @@ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
void svc_xprt_close(struct svc_xprt *xprt);
int svc_port_is_privileged(struct sockaddr *sin);
int svc_print_xprts(char *buf, int maxlen);
struct svc_xprt *svc_find_listener(struct svc_serv *serv, const char *xcl_name,
struct net *net, const struct sockaddr *sa);
struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
struct net *net, const sa_family_t af,
const unsigned short port);

View File

@ -28,7 +28,6 @@ TRACE_DEFINE_ENUM(NFSERR_FBIG);
TRACE_DEFINE_ENUM(NFSERR_NOSPC);
TRACE_DEFINE_ENUM(NFSERR_ROFS);
TRACE_DEFINE_ENUM(NFSERR_MLINK);
TRACE_DEFINE_ENUM(NFSERR_OPNOTSUPP);
TRACE_DEFINE_ENUM(NFSERR_NAMETOOLONG);
TRACE_DEFINE_ENUM(NFSERR_NOTEMPTY);
TRACE_DEFINE_ENUM(NFSERR_DQUOT);
@ -64,7 +63,6 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_NOSPC, "NOSPC" }, \
{ NFSERR_ROFS, "ROFS" }, \
{ NFSERR_MLINK, "MLINK" }, \
{ NFSERR_OPNOTSUPP, "OPNOTSUPP" }, \
{ NFSERR_NAMETOOLONG, "NAMETOOLONG" }, \
{ NFSERR_NOTEMPTY, "NOTEMPTY" }, \
{ NFSERR_DQUOT, "DQUOT" }, \

View File

@ -61,7 +61,6 @@
NFSERR_NOSPC = 28, /* v2 v3 v4 */
NFSERR_ROFS = 30, /* v2 v3 v4 */
NFSERR_MLINK = 31, /* v3 v4 */
NFSERR_OPNOTSUPP = 45, /* v2 v3 */
NFSERR_NAMETOOLONG = 63, /* v2 v3 v4 */
NFSERR_NOTEMPTY = 66, /* v2 v3 v4 */
NFSERR_DQUOT = 69, /* v2 v3 v4 */

View File

@ -29,8 +29,55 @@ enum {
NFSD_A_RPC_STATUS_MAX = (__NFSD_A_RPC_STATUS_MAX - 1)
};
enum {
NFSD_A_SERVER_THREADS = 1,
NFSD_A_SERVER_GRACETIME,
NFSD_A_SERVER_LEASETIME,
NFSD_A_SERVER_SCOPE,
__NFSD_A_SERVER_MAX,
NFSD_A_SERVER_MAX = (__NFSD_A_SERVER_MAX - 1)
};
enum {
NFSD_A_VERSION_MAJOR = 1,
NFSD_A_VERSION_MINOR,
NFSD_A_VERSION_ENABLED,
__NFSD_A_VERSION_MAX,
NFSD_A_VERSION_MAX = (__NFSD_A_VERSION_MAX - 1)
};
enum {
NFSD_A_SERVER_PROTO_VERSION = 1,
__NFSD_A_SERVER_PROTO_MAX,
NFSD_A_SERVER_PROTO_MAX = (__NFSD_A_SERVER_PROTO_MAX - 1)
};
enum {
NFSD_A_SOCK_ADDR = 1,
NFSD_A_SOCK_TRANSPORT_NAME,
__NFSD_A_SOCK_MAX,
NFSD_A_SOCK_MAX = (__NFSD_A_SOCK_MAX - 1)
};
enum {
NFSD_A_SERVER_SOCK_ADDR = 1,
__NFSD_A_SERVER_SOCK_MAX,
NFSD_A_SERVER_SOCK_MAX = (__NFSD_A_SERVER_SOCK_MAX - 1)
};
enum {
NFSD_CMD_RPC_STATUS_GET = 1,
NFSD_CMD_THREADS_SET,
NFSD_CMD_THREADS_GET,
NFSD_CMD_VERSION_SET,
NFSD_CMD_VERSION_GET,
NFSD_CMD_LISTENER_SET,
NFSD_CMD_LISTENER_GET,
__NFSD_CMD_MAX,
NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1)

View File

@ -1033,17 +1033,11 @@ null_verifier:
static void gss_free_in_token_pages(struct gssp_in_token *in_token)
{
u32 inlen;
int i;
i = 0;
inlen = in_token->page_len;
while (inlen) {
if (in_token->pages[i])
put_page(in_token->pages[i]);
inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
}
while (in_token->pages[i])
put_page(in_token->pages[i++]);
kfree(in_token->pages);
in_token->pages = NULL;
}

View File

@ -1265,8 +1265,6 @@ svc_generic_init_request(struct svc_rqst *rqstp,
if (rqstp->rq_proc >= versp->vs_nproc)
goto err_bad_proc;
rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
if (!procp)
goto err_bad_proc;
/* Initialize storage for argp and resp */
memset(rqstp->rq_argp, 0, procp->pc_argzero);

View File

@ -46,7 +46,6 @@ static LIST_HEAD(svc_xprt_class_list);
/* SMP locking strategy:
*
* svc_pool->sp_lock protects most of the fields of that pool.
* svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
* when both need to be taken (rare), svc_serv->sv_lock is first.
* The "service mutex" protects svc_serv->sv_nrthread.
@ -211,51 +210,6 @@ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
}
EXPORT_SYMBOL_GPL(svc_xprt_init);
static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
struct svc_serv *serv,
struct net *net,
const int family,
const unsigned short port,
int flags)
{
struct sockaddr_in sin = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_ANY),
.sin_port = htons(port),
};
#if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 sin6 = {
.sin6_family = AF_INET6,
.sin6_addr = IN6ADDR_ANY_INIT,
.sin6_port = htons(port),
};
#endif
struct svc_xprt *xprt;
struct sockaddr *sap;
size_t len;
switch (family) {
case PF_INET:
sap = (struct sockaddr *)&sin;
len = sizeof(sin);
break;
#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:
sap = (struct sockaddr *)&sin6;
len = sizeof(sin6);
break;
#endif
default:
return ERR_PTR(-EAFNOSUPPORT);
}
xprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
if (IS_ERR(xprt))
trace_svc_xprt_create_err(serv->sv_program->pg_name,
xcl->xcl_name, sap, len, xprt);
return xprt;
}
/**
* svc_xprt_received - start next receiver thread
* @xprt: controlling transport
@ -294,9 +248,8 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
}
static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
struct net *net, const int family,
const unsigned short port, int flags,
const struct cred *cred)
struct net *net, struct sockaddr *sap,
size_t len, int flags, const struct cred *cred)
{
struct svc_xprt_class *xcl;
@ -312,8 +265,11 @@ static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
goto err;
spin_unlock(&svc_xprt_class_lock);
newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
newxprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
if (IS_ERR(newxprt)) {
trace_svc_xprt_create_err(serv->sv_program->pg_name,
xcl->xcl_name, sap, len,
newxprt);
module_put(xcl->xcl_owner);
return PTR_ERR(newxprt);
}
@ -329,6 +285,48 @@ static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
return -EPROTONOSUPPORT;
}
/**
* svc_xprt_create_from_sa - Add a new listener to @serv from socket address
* @serv: target RPC service
* @xprt_name: transport class name
* @net: network namespace
* @sap: socket address pointer
* @flags: SVC_SOCK flags
* @cred: credential to bind to this transport
*
* Return local xprt port on success or %-EPROTONOSUPPORT on failure
*/
int svc_xprt_create_from_sa(struct svc_serv *serv, const char *xprt_name,
struct net *net, struct sockaddr *sap,
int flags, const struct cred *cred)
{
size_t len;
int err;
switch (sap->sa_family) {
case AF_INET:
len = sizeof(struct sockaddr_in);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
len = sizeof(struct sockaddr_in6);
break;
#endif
default:
return -EAFNOSUPPORT;
}
err = _svc_xprt_create(serv, xprt_name, net, sap, len, flags, cred);
if (err == -EPROTONOSUPPORT) {
request_module("svc%s", xprt_name);
err = _svc_xprt_create(serv, xprt_name, net, sap, len, flags,
cred);
}
return err;
}
EXPORT_SYMBOL_GPL(svc_xprt_create_from_sa);
/**
* svc_xprt_create - Add a new listener to @serv
* @serv: target RPC service
@ -339,23 +337,41 @@ static int _svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
* @flags: SVC_SOCK flags
* @cred: credential to bind to this transport
*
* Return values:
* %0: New listener added successfully
* %-EPROTONOSUPPORT: Requested transport type not supported
* Return local xprt port on success or %-EPROTONOSUPPORT on failure
*/
int svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
struct net *net, const int family,
const unsigned short port, int flags,
const struct cred *cred)
{
int err;
struct sockaddr_in sin = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_ANY),
.sin_port = htons(port),
};
#if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 sin6 = {
.sin6_family = AF_INET6,
.sin6_addr = IN6ADDR_ANY_INIT,
.sin6_port = htons(port),
};
#endif
struct sockaddr *sap;
err = _svc_xprt_create(serv, xprt_name, net, family, port, flags, cred);
if (err == -EPROTONOSUPPORT) {
request_module("svc%s", xprt_name);
err = _svc_xprt_create(serv, xprt_name, net, family, port, flags, cred);
switch (family) {
case PF_INET:
sap = (struct sockaddr *)&sin;
break;
#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:
sap = (struct sockaddr *)&sin6;
break;
#endif
default:
return -EAFNOSUPPORT;
}
return err;
return svc_xprt_create_from_sa(serv, xprt_name, net, sap, flags, cred);
}
EXPORT_SYMBOL_GPL(svc_xprt_create);
@ -1259,6 +1275,40 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
return dr;
}
/**
* svc_find_listener - find an RPC transport instance
* @serv: pointer to svc_serv to search
* @xcl_name: C string containing transport's class name
* @net: owner net pointer
* @sa: sockaddr containing address
*
* Return the transport instance pointer for the endpoint accepting
* connections/peer traffic from the specified transport class,
* and matching sockaddr.
*/
struct svc_xprt *svc_find_listener(struct svc_serv *serv, const char *xcl_name,
struct net *net, const struct sockaddr *sa)
{
struct svc_xprt *xprt;
struct svc_xprt *found = NULL;
spin_lock_bh(&serv->sv_lock);
list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
if (xprt->xpt_net != net)
continue;
if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
continue;
if (!rpc_cmp_addr_port(sa, (struct sockaddr *)&xprt->xpt_local))
continue;
found = xprt;
svc_xprt_get(xprt);
break;
}
spin_unlock_bh(&serv->sv_lock);
return found;
}
EXPORT_SYMBOL_GPL(svc_find_listener);
/**
* svc_find_xprt - find an RPC transport instance
* @serv: pointer to svc_serv to search