2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 03:33:59 +08:00
linux-next/fs/lockd/svc.c

686 lines
16 KiB
C
Raw Normal View History

/*
* linux/fs/lockd/svc.c
*
* This is the central lockd service.
*
* FIXME: Separate the lockd NFS server functionality from the lockd NFS
* client functionality. Oh why didn't Sun create two separate
* services in the first place?
*
* Authors: Olaf Kirch (okir@monad.swb.de)
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/uio.h>
#include <linux/smp.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
#include <net/ip.h>
#include <linux/lockd/lockd.h>
#include <linux/nfs.h>
#include "netns.h"
#define NLMDBG_FACILITY NLMDBG_SVC
#define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE)
#define ALLOWED_SIGS (sigmask(SIGKILL))
static struct svc_program nlmsvc_program;
struct nlmsvc_binding * nlmsvc_ops;
EXPORT_SYMBOL_GPL(nlmsvc_ops);
static DEFINE_MUTEX(nlmsvc_mutex);
static unsigned int nlmsvc_users;
static struct task_struct *nlmsvc_task;
static struct svc_rqst *nlmsvc_rqst;
unsigned long nlmsvc_timeout;
int lockd_net_id;
/*
* These can be set at insmod time (useful for NFS as root filesystem),
* and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003
*/
static unsigned long nlm_grace_period;
static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
static int nlm_udpport, nlm_tcpport;
/* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */
static unsigned int nlm_max_connections = 1024;
/*
* Constants needed for the sysctl interface.
*/
static const unsigned long nlm_grace_period_min = 0;
static const unsigned long nlm_grace_period_max = 240;
static const unsigned long nlm_timeout_min = 3;
static const unsigned long nlm_timeout_max = 20;
static const int nlm_port_min = 0, nlm_port_max = 65535;
#ifdef CONFIG_SYSCTL
static struct ctl_table_header * nlm_sysctl_table;
#endif
static unsigned long get_lockd_grace_period(void)
{
/* Note: nlm_timeout should always be nonzero */
if (nlm_grace_period)
return roundup(nlm_grace_period, nlm_timeout) * HZ;
else
return nlm_timeout * 5 * HZ;
}
static void grace_ender(struct work_struct *grace)
{
struct delayed_work *dwork = container_of(grace, struct delayed_work,
work);
struct lockd_net *ln = container_of(dwork, struct lockd_net,
grace_period_end);
locks_end_grace(&ln->lockd_manager);
}
static void set_grace_period(struct net *net)
{
unsigned long grace_period = get_lockd_grace_period();
struct lockd_net *ln = net_generic(net, lockd_net_id);
lockd: don't depend on lockd main loop to end grace End lockd's grace period using schedule_delayed_work() instead of a check on every pass through the main loop. After a later patch, we'll depend on lockd to end its grace period even if it's not currently handling requests; so it shouldn't depend on being woken up from the main loop to do so. Also, Nakano Hiroaki (who independently produced a similar patch) noticed that the current behavior is buggy in the face of jiffies wraparound: "lockd uses time_before() to determine whether the grace period has expired. This would seem to be enough to avoid timer wrap-around issues, but, unfortunately, that is not the case. The time_* family of comparison functions can be safely used to compare jiffies relatively close in time, but they stop working after approximately LONG_MAX/2 ticks. nfsd can suffer this problem because the time_before() comparison in lockd() is not performed until the first request comes in, which means that if there is no lockd traffic for more than LONG_MAX/2 ticks we are screwed. "The implication of this is that once time_before() starts misbehaving any attempt from a NFS client to execute fcntl() will be received with a NLM_LCK_DENIED_GRACE_PERIOD message for 25 days (assuming HZ=1000). In other words, the 50 seconds grace period could turn into a grace period of 50 days or more. "Note: This bug was analyzed independently by Oda-san <oda@valinux.co.jp> and myself." Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu> Cc: Nakano Hiroaki <nakano.hiroaki@oss.ntt.co.jp> Cc: Itsuro Oda <oda@valinux.co.jp>
2008-03-19 07:00:19 +08:00
locks_start_grace(net, &ln->lockd_manager);
cancel_delayed_work_sync(&ln->grace_period_end);
schedule_delayed_work(&ln->grace_period_end, grace_period);
}
static void restart_grace(void)
{
if (nlmsvc_ops) {
struct net *net = &init_net;
struct lockd_net *ln = net_generic(net, lockd_net_id);
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
nlmsvc_invalidate_all();
set_grace_period(net);
}
}
/*
* This is the lockd kernel thread
*/
static int
lockd(void *vrqstp)
{
int err = 0;
struct svc_rqst *rqstp = vrqstp;
/* try_to_freeze() is called from svc_recv() */
set_freezable();
/* Allow SIGKILL to tell lockd to drop all of its locks */
allow_signal(SIGKILL);
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
if (!nlm_timeout)
nlm_timeout = LOCKD_DFLT_TIMEO;
nlmsvc_timeout = nlm_timeout * HZ;
/*
* The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away.
*/
while (!kthread_should_stop()) {
long timeout = MAX_SCHEDULE_TIMEOUT;
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
/* update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nlm_max_connections;
if (signalled()) {
flush_signals(current);
restart_grace();
continue;
}
timeout = nlmsvc_retry_blocked();
/*
* Find a socket with data available and call its
* recvfrom routine.
*/
err = svc_recv(rqstp, timeout);
if (err == -EAGAIN || err == -EINTR)
continue;
dprintk("lockd: request from %s\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
svc_process(rqstp);
}
flush_signals(current);
if (nlmsvc_ops)
nlmsvc_invalidate_all();
nlm_shutdown_hosts();
return 0;
}
static int create_lockd_listener(struct svc_serv *serv, const char *name,
struct net *net, const int family,
const unsigned short port)
{
struct svc_xprt *xprt;
xprt = svc_find_xprt(serv, name, net, family, 0);
if (xprt == NULL)
return svc_create_xprt(serv, name, net, family, port,
SVC_SOCK_DEFAULTS);
svc_xprt_put(xprt);
return 0;
}
static int create_lockd_family(struct svc_serv *serv, struct net *net,
const int family)
{
int err;
err = create_lockd_listener(serv, "udp", net, family, nlm_udpport);
if (err < 0)
return err;
return create_lockd_listener(serv, "tcp", net, family, nlm_tcpport);
}
/*
* Ensure there are active UDP and TCP listeners for lockd.
*
* Even if we have only TCP NFS mounts and/or TCP NFSDs, some
* local services (such as rpc.statd) still require UDP, and
* some NFS servers do not yet support NLM over TCP.
*
* Returns zero if all listeners are available; otherwise a
* negative errno value is returned.
*/
static int make_socks(struct svc_serv *serv, struct net *net)
{
static int warned;
int err;
err = create_lockd_family(serv, net, PF_INET);
if (err < 0)
goto out_err;
err = create_lockd_family(serv, net, PF_INET6);
if (err < 0 && err != -EAFNOSUPPORT)
goto out_err;
warned = 0;
return 0;
out_err:
if (warned++ == 0)
printk(KERN_WARNING
"lockd_up: makesock failed, error=%d\n", err);
lockd: ensure we tear down any live sockets when socket creation fails during lockd_up We had a Fedora ABRT report with a stack trace like this: kernel BUG at net/sunrpc/svc.c:550! invalid opcode: 0000 [#1] SMP [...] CPU: 2 PID: 913 Comm: rpc.nfsd Not tainted 3.13.6-200.fc20.x86_64 #1 Hardware name: Hewlett-Packard HP ProBook 4740s/1846, BIOS 68IRR Ver. F.40 01/29/2013 task: ffff880146b00000 ti: ffff88003f9b8000 task.ti: ffff88003f9b8000 RIP: 0010:[<ffffffffa0305fa8>] [<ffffffffa0305fa8>] svc_destroy+0x128/0x130 [sunrpc] RSP: 0018:ffff88003f9b9de0 EFLAGS: 00010206 RAX: ffff88003f829628 RBX: ffff88003f829600 RCX: 00000000000041ee RDX: 0000000000000000 RSI: 0000000000000286 RDI: 0000000000000286 RBP: ffff88003f9b9de8 R08: 0000000000017360 R09: ffff88014fa97360 R10: ffffffff8114ce57 R11: ffffea00051c9c00 R12: ffff88003f829600 R13: 00000000ffffff9e R14: ffffffff81cc7cc0 R15: 0000000000000000 FS: 00007f4fde284840(0000) GS:ffff88014fa80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f4fdf5192f8 CR3: 00000000a569a000 CR4: 00000000001407e0 Stack: ffff88003f792300 ffff88003f9b9e18 ffffffffa02de02a 0000000000000000 ffffffff81cc7cc0 ffff88003f9cb000 0000000000000008 ffff88003f9b9e60 ffffffffa033bb35 ffffffff8131c86c ffff88003f9cb000 ffff8800a5715008 Call Trace: [<ffffffffa02de02a>] lockd_up+0xaa/0x330 [lockd] [<ffffffffa033bb35>] nfsd_svc+0x1b5/0x2f0 [nfsd] [<ffffffff8131c86c>] ? simple_strtoull+0x2c/0x50 [<ffffffffa033c630>] ? write_pool_threads+0x280/0x280 [nfsd] [<ffffffffa033c6bb>] write_threads+0x8b/0xf0 [nfsd] [<ffffffff8114efa4>] ? __get_free_pages+0x14/0x50 [<ffffffff8114eff6>] ? get_zeroed_page+0x16/0x20 [<ffffffff811dec51>] ? simple_transaction_get+0xb1/0xd0 [<ffffffffa033c098>] nfsctl_transaction_write+0x48/0x80 [nfsd] [<ffffffff811b8b34>] vfs_write+0xb4/0x1f0 [<ffffffff811c3f99>] ? putname+0x29/0x40 [<ffffffff811b9569>] SyS_write+0x49/0xa0 [<ffffffff810fc2a6>] ? __audit_syscall_exit+0x1f6/0x2a0 [<ffffffff816962e9>] system_call_fastpath+0x16/0x1b Code: 31 c0 e8 82 db 37 e1 e9 2a ff ff ff 48 8b 07 8b 57 14 48 c7 c7 d5 c6 31 a0 48 8b 70 20 31 c0 e8 65 db 37 e1 e9 f4 fe ff ff 0f 0b <0f> 0b 66 0f 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 41 56 41 55 RIP [<ffffffffa0305fa8>] svc_destroy+0x128/0x130 [sunrpc] RSP <ffff88003f9b9de0> Evidently, we created some lockd sockets and then failed to create others. make_socks then returned an error and we tried to tear down the svc, but svc->sv_permsocks was not empty so we ended up tripping over the BUG() in svc_destroy(). Fix this by ensuring that we tear down any live sockets we created when socket creation is going to return an error. Fixes: 786185b5f8abefa (SUNRPC: move per-net operations from...) Reported-by: Raphos <raphoszap@laposte.net> Signed-off-by: Jeff Layton <jlayton@redhat.com> Reviewed-by: Stanislav Kinsbursky <skinsbursky@parallels.com> Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2014-03-26 02:55:26 +08:00
svc_shutdown_net(serv, net);
return err;
}
static int lockd_up_net(struct svc_serv *serv, struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
int error;
if (ln->nlmsvc_users++)
return 0;
error = svc_bind(serv, net);
if (error)
goto err_bind;
error = make_socks(serv, net);
if (error < 0)
goto err_socks;
set_grace_period(net);
dprintk("lockd_up_net: per-net data created; net=%p\n", net);
return 0;
err_socks:
svc_rpcb_cleanup(serv, net);
err_bind:
ln->nlmsvc_users--;
return error;
}
static void lockd_down_net(struct svc_serv *serv, struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
svc_shutdown_net(serv, net);
dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
}
} else {
printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
nlmsvc_task, net);
BUG();
}
}
static int lockd_start_svc(struct svc_serv *serv)
{
int error;
if (nlmsvc_rqst)
return 0;
/*
* Create the kernel thread and wait for it to start.
*/
nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
if (IS_ERR(nlmsvc_rqst)) {
error = PTR_ERR(nlmsvc_rqst);
printk(KERN_WARNING
"lockd_up: svc_rqst allocation failed, error=%d\n",
error);
goto out_rqst;
}
svc_sock_update_bufs(serv);
serv->sv_maxconn = nlm_max_connections;
nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
if (IS_ERR(nlmsvc_task)) {
error = PTR_ERR(nlmsvc_task);
printk(KERN_WARNING
"lockd_up: kthread_run failed, error=%d\n", error);
goto out_task;
}
dprintk("lockd_up: service started\n");
return 0;
out_task:
svc_exit_thread(nlmsvc_rqst);
nlmsvc_task = NULL;
out_rqst:
nlmsvc_rqst = NULL;
return error;
}
static struct svc_serv *lockd_create_svc(void)
{
struct svc_serv *serv;
/*
* Check whether we're already up and running.
*/
if (nlmsvc_rqst) {
/*
* Note: increase service usage, because later in case of error
* svc_destroy() will be called.
*/
svc_get(nlmsvc_rqst->rq_server);
return nlmsvc_rqst->rq_server;
}
/*
* Sanity check: if there's no pid,
* we should be the first user ...
*/
if (nlmsvc_users)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
return ERR_PTR(-ENOMEM);
}
dprintk("lockd_up: service created\n");
return serv;
}
/*
* Bring up the lockd process if it's not already up.
*/
int lockd_up(struct net *net)
{
struct svc_serv *serv;
int error;
mutex_lock(&nlmsvc_mutex);
serv = lockd_create_svc();
if (IS_ERR(serv)) {
error = PTR_ERR(serv);
goto err_create;
}
error = lockd_up_net(serv, net);
if (error < 0)
goto err_net;
error = lockd_start_svc(serv);
if (error < 0)
goto err_start;
nlmsvc_users++;
/*
* Note: svc_serv structures have an initial use count of 1,
* so we exit through here on both success and failure.
*/
err_net:
svc_destroy(serv);
err_create:
mutex_unlock(&nlmsvc_mutex);
return error;
err_start:
lockd_down_net(serv, net);
goto err_net;
}
EXPORT_SYMBOL_GPL(lockd_up);
/*
* Decrement the user count and bring down lockd if we're the last.
*/
void
lockd_down(struct net *net)
{
mutex_lock(&nlmsvc_mutex);
lockd_down_net(nlmsvc_rqst->rq_server, net);
if (nlmsvc_users) {
if (--nlmsvc_users)
goto out;
} else {
printk(KERN_ERR "lockd_down: no users! task=%p\n",
nlmsvc_task);
BUG();
}
if (!nlmsvc_task) {
printk(KERN_ERR "lockd_down: no lockd running.\n");
BUG();
}
kthread_stop(nlmsvc_task);
dprintk("lockd_down: service stopped\n");
svc_exit_thread(nlmsvc_rqst);
dprintk("lockd_down: service destroyed\n");
nlmsvc_task = NULL;
nlmsvc_rqst = NULL;
out:
mutex_unlock(&nlmsvc_mutex);
}
EXPORT_SYMBOL_GPL(lockd_down);
#ifdef CONFIG_SYSCTL
/*
* Sysctl parameters (same as module parameters, different interface).
*/
static ctl_table nlm_sysctls[] = {
{
.procname = "nlm_grace_period",
.data = &nlm_grace_period,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = (unsigned long *) &nlm_grace_period_min,
.extra2 = (unsigned long *) &nlm_grace_period_max,
},
{
.procname = "nlm_timeout",
.data = &nlm_timeout,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = (unsigned long *) &nlm_timeout_min,
.extra2 = (unsigned long *) &nlm_timeout_max,
},
{
.procname = "nlm_udpport",
.data = &nlm_udpport,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (int *) &nlm_port_min,
.extra2 = (int *) &nlm_port_max,
},
{
.procname = "nlm_tcpport",
.data = &nlm_tcpport,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (int *) &nlm_port_min,
.extra2 = (int *) &nlm_port_max,
},
{
.procname = "nsm_use_hostnames",
.data = &nsm_use_hostnames,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nsm_local_state",
.data = &nsm_local_state,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static ctl_table nlm_sysctl_dir[] = {
{
.procname = "nfs",
.mode = 0555,
.child = nlm_sysctls,
},
{ }
};
static ctl_table nlm_sysctl_root[] = {
{
.procname = "fs",
.mode = 0555,
.child = nlm_sysctl_dir,
},
{ }
};
#endif /* CONFIG_SYSCTL */
/*
* Module (and sysfs) parameters.
*/
#define param_set_min_max(name, type, which_strtol, min, max) \
static int param_set_##name(const char *val, struct kernel_param *kp) \
{ \
char *endp; \
__typeof__(type) num = which_strtol(val, &endp, 0); \
if (endp == val || *endp || num < (min) || num > (max)) \
return -EINVAL; \
*((type *) kp->arg) = num; \
return 0; \
}
static inline int is_callback(u32 proc)
{
return proc == NLMPROC_GRANTED
|| proc == NLMPROC_GRANTED_MSG
|| proc == NLMPROC_TEST_RES
|| proc == NLMPROC_LOCK_RES
|| proc == NLMPROC_CANCEL_RES
|| proc == NLMPROC_UNLOCK_RES
|| proc == NLMPROC_NSM_NOTIFY;
}
static int lockd_authenticate(struct svc_rqst *rqstp)
{
rqstp->rq_client = NULL;
switch (rqstp->rq_authop->flavour) {
case RPC_AUTH_NULL:
case RPC_AUTH_UNIX:
if (rqstp->rq_proc == 0)
return SVC_OK;
if (is_callback(rqstp->rq_proc)) {
/* Leave it to individual procedures to
* call nlmsvc_lookup_host(rqstp)
*/
return SVC_OK;
}
return svc_set_client(rqstp);
}
return SVC_DENIED;
}
param_set_min_max(port, int, simple_strtol, 0, 65535)
param_set_min_max(grace_period, unsigned long, simple_strtoul,
nlm_grace_period_min, nlm_grace_period_max)
param_set_min_max(timeout, unsigned long, simple_strtoul,
nlm_timeout_min, nlm_timeout_max)
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
MODULE_DESCRIPTION("NFS file locking service version " LOCKD_VERSION ".");
MODULE_LICENSE("GPL");
module_param_call(nlm_grace_period, param_set_grace_period, param_get_ulong,
&nlm_grace_period, 0644);
module_param_call(nlm_timeout, param_set_timeout, param_get_ulong,
&nlm_timeout, 0644);
module_param_call(nlm_udpport, param_set_port, param_get_int,
&nlm_udpport, 0644);
module_param_call(nlm_tcpport, param_set_port, param_get_int,
&nlm_tcpport, 0644);
module_param(nsm_use_hostnames, bool, 0644);
module_param(nlm_max_connections, uint, 0644);
static int lockd_init_net(struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
INIT_LIST_HEAD(&ln->grace_list);
spin_lock_init(&ln->nsm_clnt_lock);
return 0;
}
static void lockd_exit_net(struct net *net)
{
}
static struct pernet_operations lockd_net_ops = {
.init = lockd_init_net,
.exit = lockd_exit_net,
.id = &lockd_net_id,
.size = sizeof(struct lockd_net),
};
/*
* Initialising and terminating the module.
*/
static int __init init_nlm(void)
{
int err;
#ifdef CONFIG_SYSCTL
err = -ENOMEM;
[PATCH] sysctl: remove insert_at_head from register_sysctl The semantic effect of insert_at_head is that it would allow new registered sysctl entries to override existing sysctl entries of the same name. Which is pain for caching and the proc interface never implemented. I have done an audit and discovered that none of the current users of register_sysctl care as (excpet for directories) they do not register duplicate sysctl entries. So this patch simply removes the support for overriding existing entries in the sys_sysctl interface since no one uses it or cares and it makes future enhancments harder. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Acked-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: David Howells <dhowells@redhat.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Andi Kleen <ak@muc.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Corey Minyard <minyard@acm.org> Cc: Neil Brown <neilb@suse.de> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jan Kara <jack@ucw.cz> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: Mark Fasheh <mark.fasheh@oracle.com> Cc: David Chinner <dgc@sgi.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Patrick McHardy <kaber@trash.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-14 16:34:09 +08:00
nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root);
if (nlm_sysctl_table == NULL)
goto err_sysctl;
#endif
err = register_pernet_subsys(&lockd_net_ops);
if (err)
goto err_pernet;
return 0;
err_pernet:
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nlm_sysctl_table);
#endif
err_sysctl:
return err;
}
static void __exit exit_nlm(void)
{
/* FIXME: delete all NLM clients */
nlm_shutdown_hosts();
unregister_pernet_subsys(&lockd_net_ops);
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nlm_sysctl_table);
#endif
}
module_init(init_nlm);
module_exit(exit_nlm);
/*
* Define NLM program and procedures
*/
static struct svc_version nlmsvc_version1 = {
.vs_vers = 1,
.vs_nproc = 17,
.vs_proc = nlmsvc_procedures,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
static struct svc_version nlmsvc_version3 = {
.vs_vers = 3,
.vs_nproc = 24,
.vs_proc = nlmsvc_procedures,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#ifdef CONFIG_LOCKD_V4
static struct svc_version nlmsvc_version4 = {
.vs_vers = 4,
.vs_nproc = 24,
.vs_proc = nlmsvc_procedures4,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#endif
static struct svc_version * nlmsvc_version[] = {
[1] = &nlmsvc_version1,
[3] = &nlmsvc_version3,
#ifdef CONFIG_LOCKD_V4
[4] = &nlmsvc_version4,
#endif
};
static struct svc_stat nlmsvc_stats;
#define NLM_NRVERS ARRAY_SIZE(nlmsvc_version)
static struct svc_program nlmsvc_program = {
.pg_prog = NLM_PROGRAM, /* program number */
.pg_nvers = NLM_NRVERS, /* number of entries in nlmsvc_version */
.pg_vers = nlmsvc_version, /* version table */
.pg_name = "lockd", /* service name */
.pg_class = "nfsd", /* share authentication with nfsd */
.pg_stats = &nlmsvc_stats, /* stats table */
.pg_authenticate = &lockd_authenticate /* export authentication */
};