2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2004 Topspin Communications. All rights reserved.
|
2006-09-23 06:22:46 +08:00
|
|
|
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
|
2005-07-28 02:45:42 +08:00
|
|
|
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
* Copyright (c) 2008 Cisco. All rights reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2014-08-09 07:00:52 +08:00
|
|
|
#define pr_fmt(fmt) "user_mad: " fmt
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/cdev.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/poll.h>
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
#include <linux/mutex.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/kref.h>
|
2007-10-10 10:59:15 +08:00
|
|
|
#include <linux/compat.h>
|
2009-10-04 20:11:37 +08:00
|
|
|
#include <linux/sched.h>
|
2008-04-19 10:21:05 +08:00
|
|
|
#include <linux/semaphore.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
|
2005-08-26 04:40:04 +08:00
|
|
|
#include <rdma/ib_mad.h>
|
|
|
|
#include <rdma/ib_user_mad.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
MODULE_AUTHOR("Roland Dreier");
|
|
|
|
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
|
|
|
|
enum {
|
|
|
|
IB_UMAD_MAX_PORTS = 64,
|
|
|
|
IB_UMAD_MAX_AGENTS = 32,
|
|
|
|
|
|
|
|
IB_UMAD_MAJOR = 231,
|
|
|
|
IB_UMAD_MINOR_BASE = 0
|
|
|
|
};
|
|
|
|
|
2005-10-29 06:37:23 +08:00
|
|
|
/*
|
2010-02-03 03:08:30 +08:00
|
|
|
* Our lifetime rules for these structs are the following:
|
|
|
|
* device special file is opened, we take a reference on the
|
|
|
|
* ib_umad_port's struct ib_umad_device. We drop these
|
2005-10-29 06:37:23 +08:00
|
|
|
* references in the corresponding close().
|
|
|
|
*
|
|
|
|
* In addition to references coming from open character devices, there
|
|
|
|
* is one more reference to each ib_umad_device representing the
|
|
|
|
* module's reference taken when allocating the ib_umad_device in
|
|
|
|
* ib_umad_add_one().
|
|
|
|
*
|
2010-02-03 03:08:30 +08:00
|
|
|
* When destroying an ib_umad_device, we drop the module's reference.
|
2005-10-29 06:37:23 +08:00
|
|
|
*/
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct ib_umad_port {
|
2010-02-03 03:08:25 +08:00
|
|
|
struct cdev cdev;
|
2008-02-22 07:13:36 +08:00
|
|
|
struct device *dev;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-03 03:08:25 +08:00
|
|
|
struct cdev sm_cdev;
|
2008-02-22 07:13:36 +08:00
|
|
|
struct device *sm_dev;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct semaphore sm_sem;
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
struct mutex file_mutex;
|
2005-11-04 04:01:18 +08:00
|
|
|
struct list_head file_list;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct ib_device *ib_dev;
|
|
|
|
struct ib_umad_device *umad_dev;
|
2005-10-29 06:37:23 +08:00
|
|
|
int dev_num;
|
2005-04-17 06:20:36 +08:00
|
|
|
u8 port_num;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ib_umad_device {
|
|
|
|
int start_port, end_port;
|
2014-06-07 00:25:04 +08:00
|
|
|
struct kobject kobj;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct ib_umad_port port[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ib_umad_file {
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
struct mutex mutex;
|
2005-11-11 02:18:23 +08:00
|
|
|
struct ib_umad_port *port;
|
|
|
|
struct list_head recv_list;
|
2006-07-20 16:25:50 +08:00
|
|
|
struct list_head send_list;
|
2005-11-11 02:18:23 +08:00
|
|
|
struct list_head port_list;
|
2006-07-20 16:25:50 +08:00
|
|
|
spinlock_t send_lock;
|
2005-11-11 02:18:23 +08:00
|
|
|
wait_queue_head_t recv_wait;
|
|
|
|
struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
|
|
|
|
int agents_dead;
|
2007-10-10 10:59:15 +08:00
|
|
|
u8 use_pkey_index;
|
|
|
|
u8 already_used;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ib_umad_packet {
|
2005-07-28 02:45:42 +08:00
|
|
|
struct ib_mad_send_buf *msg;
|
2006-03-04 13:54:13 +08:00
|
|
|
struct ib_mad_recv_wc *recv_wc;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct list_head list;
|
2005-07-28 02:45:42 +08:00
|
|
|
int length;
|
|
|
|
struct ib_user_mad mad;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2005-10-29 06:37:23 +08:00
|
|
|
static struct class *umad_class;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
|
2005-10-29 06:37:23 +08:00
|
|
|
|
|
|
|
static DEFINE_SPINLOCK(port_lock);
|
2007-04-03 00:45:16 +08:00
|
|
|
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static void ib_umad_add_one(struct ib_device *device);
|
|
|
|
static void ib_umad_remove_one(struct ib_device *device);
|
|
|
|
|
2014-06-07 00:25:04 +08:00
|
|
|
static void ib_umad_release_dev(struct kobject *kobj)
|
2005-10-29 06:37:23 +08:00
|
|
|
{
|
|
|
|
struct ib_umad_device *dev =
|
2014-06-07 00:25:04 +08:00
|
|
|
container_of(kobj, struct ib_umad_device, kobj);
|
2005-10-29 06:37:23 +08:00
|
|
|
|
|
|
|
kfree(dev);
|
|
|
|
}
|
|
|
|
|
2014-06-07 00:25:04 +08:00
|
|
|
static struct kobj_type ib_umad_dev_ktype = {
|
|
|
|
.release = ib_umad_release_dev,
|
|
|
|
};
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
static int hdr_size(struct ib_umad_file *file)
|
|
|
|
{
|
|
|
|
return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
|
|
|
|
sizeof (struct ib_user_mad_hdr_old);
|
|
|
|
}
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
/* caller must hold file->mutex */
|
2005-11-11 02:18:23 +08:00
|
|
|
static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
|
|
|
|
{
|
|
|
|
return file->agents_dead ? NULL : file->agent[id];
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int queue_packet(struct ib_umad_file *file,
|
|
|
|
struct ib_mad_agent *agent,
|
|
|
|
struct ib_umad_packet *packet)
|
|
|
|
{
|
|
|
|
int ret = 1;
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&file->mutex);
|
2005-11-11 02:18:23 +08:00
|
|
|
|
2005-07-28 02:45:42 +08:00
|
|
|
for (packet->mad.hdr.id = 0;
|
|
|
|
packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
|
|
|
|
packet->mad.hdr.id++)
|
2005-11-11 02:18:23 +08:00
|
|
|
if (agent == __get_agent(file, packet->mad.hdr.id)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
list_add_tail(&packet->list, &file->recv_list);
|
|
|
|
wake_up_interruptible(&file->recv_wait);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-07-20 16:25:50 +08:00
|
|
|
static void dequeue_send(struct ib_umad_file *file,
|
|
|
|
struct ib_umad_packet *packet)
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
{
|
2006-07-20 16:25:50 +08:00
|
|
|
spin_lock_irq(&file->send_lock);
|
|
|
|
list_del(&packet->list);
|
|
|
|
spin_unlock_irq(&file->send_lock);
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
}
|
2006-07-20 16:25:50 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void send_handler(struct ib_mad_agent *agent,
|
|
|
|
struct ib_mad_send_wc *send_wc)
|
|
|
|
{
|
|
|
|
struct ib_umad_file *file = agent->context;
|
2005-10-26 01:51:39 +08:00
|
|
|
struct ib_umad_packet *packet = send_wc->send_buf->context[0];
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-07-20 16:25:50 +08:00
|
|
|
dequeue_send(file, packet);
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_destroy_ah(packet->msg->ah);
|
2005-07-28 02:45:42 +08:00
|
|
|
ib_free_send_mad(packet->msg);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
|
2006-03-04 13:54:13 +08:00
|
|
|
packet->length = IB_MGMT_MAD_HDR;
|
|
|
|
packet->mad.hdr.status = ETIMEDOUT;
|
|
|
|
if (!queue_packet(file, agent, packet))
|
|
|
|
return;
|
2005-07-28 02:45:42 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
kfree(packet);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void recv_handler(struct ib_mad_agent *agent,
|
|
|
|
struct ib_mad_recv_wc *mad_recv_wc)
|
|
|
|
{
|
|
|
|
struct ib_umad_file *file = agent->context;
|
|
|
|
struct ib_umad_packet *packet;
|
|
|
|
|
|
|
|
if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
|
2006-03-04 13:54:13 +08:00
|
|
|
goto err1;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-04 13:54:13 +08:00
|
|
|
packet = kzalloc(sizeof *packet, GFP_KERNEL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!packet)
|
2006-03-04 13:54:13 +08:00
|
|
|
goto err1;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-04 13:54:13 +08:00
|
|
|
packet->length = mad_recv_wc->mad_len;
|
|
|
|
packet->recv_wc = mad_recv_wc;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
packet->mad.hdr.status = 0;
|
|
|
|
packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
|
|
|
|
packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
|
|
|
|
packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid);
|
|
|
|
packet->mad.hdr.sl = mad_recv_wc->wc->sl;
|
|
|
|
packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
|
|
|
|
packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
|
2005-07-28 02:45:42 +08:00
|
|
|
packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
|
|
|
|
if (packet->mad.hdr.grh_present) {
|
2007-04-06 02:49:21 +08:00
|
|
|
struct ib_ah_attr ah_attr;
|
|
|
|
|
|
|
|
ib_init_ah_from_wc(agent->device, agent->port_num,
|
|
|
|
mad_recv_wc->wc, mad_recv_wc->recv_buf.grh,
|
|
|
|
&ah_attr);
|
|
|
|
|
|
|
|
packet->mad.hdr.gid_index = ah_attr.grh.sgid_index;
|
|
|
|
packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit;
|
|
|
|
packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class;
|
|
|
|
memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16);
|
|
|
|
packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (queue_packet(file, agent, packet))
|
2006-03-04 13:54:13 +08:00
|
|
|
goto err2;
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-04 13:54:13 +08:00
|
|
|
err2:
|
|
|
|
kfree(packet);
|
|
|
|
err1:
|
2005-04-17 06:20:36 +08:00
|
|
|
ib_free_recv_mad(mad_recv_wc);
|
|
|
|
}
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
|
|
|
|
struct ib_umad_packet *packet, size_t count)
|
2006-03-04 13:54:13 +08:00
|
|
|
{
|
|
|
|
struct ib_mad_recv_buf *recv_buf;
|
|
|
|
int left, seg_payload, offset, max_seg_payload;
|
|
|
|
|
|
|
|
/* We need enough room to copy the first (or only) MAD segment. */
|
|
|
|
recv_buf = &packet->recv_wc->recv_buf;
|
|
|
|
if ((packet->length <= sizeof (*recv_buf->mad) &&
|
2007-10-10 10:59:15 +08:00
|
|
|
count < hdr_size(file) + packet->length) ||
|
2006-03-04 13:54:13 +08:00
|
|
|
(packet->length > sizeof (*recv_buf->mad) &&
|
2007-10-10 10:59:15 +08:00
|
|
|
count < hdr_size(file) + sizeof (*recv_buf->mad)))
|
2006-03-04 13:54:13 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
if (copy_to_user(buf, &packet->mad, hdr_size(file)))
|
2006-03-04 13:54:13 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
buf += hdr_size(file);
|
2006-03-04 13:54:13 +08:00
|
|
|
seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad));
|
|
|
|
if (copy_to_user(buf, recv_buf->mad, seg_payload))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (seg_payload < packet->length) {
|
|
|
|
/*
|
|
|
|
* Multipacket RMPP MAD message. Copy remainder of message.
|
|
|
|
* Note that last segment may have a shorter payload.
|
|
|
|
*/
|
2007-10-10 10:59:15 +08:00
|
|
|
if (count < hdr_size(file) + packet->length) {
|
2006-03-04 13:54:13 +08:00
|
|
|
/*
|
|
|
|
* The buffer is too small, return the first RMPP segment,
|
|
|
|
* which includes the RMPP message length.
|
|
|
|
*/
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
2006-03-29 08:40:04 +08:00
|
|
|
offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
|
2006-03-04 13:54:13 +08:00
|
|
|
max_seg_payload = sizeof (struct ib_mad) - offset;
|
|
|
|
|
|
|
|
for (left = packet->length - seg_payload, buf += seg_payload;
|
|
|
|
left; left -= seg_payload, buf += seg_payload) {
|
|
|
|
recv_buf = container_of(recv_buf->list.next,
|
|
|
|
struct ib_mad_recv_buf, list);
|
|
|
|
seg_payload = min(left, max_seg_payload);
|
|
|
|
if (copy_to_user(buf, ((void *) recv_buf->mad) + offset,
|
|
|
|
seg_payload))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
}
|
2007-10-10 10:59:15 +08:00
|
|
|
return hdr_size(file) + packet->length;
|
2006-03-04 13:54:13 +08:00
|
|
|
}
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf,
|
|
|
|
struct ib_umad_packet *packet, size_t count)
|
2006-03-04 13:54:13 +08:00
|
|
|
{
|
2007-10-10 10:59:15 +08:00
|
|
|
ssize_t size = hdr_size(file) + packet->length;
|
2006-03-04 13:54:13 +08:00
|
|
|
|
|
|
|
if (count < size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
if (copy_to_user(buf, &packet->mad, hdr_size(file)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
buf += hdr_size(file);
|
|
|
|
|
|
|
|
if (copy_to_user(buf, packet->mad.data, packet->length))
|
2006-03-04 13:54:13 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static ssize_t ib_umad_read(struct file *filp, char __user *buf,
|
|
|
|
size_t count, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct ib_umad_file *file = filp->private_data;
|
|
|
|
struct ib_umad_packet *packet;
|
|
|
|
ssize_t ret;
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
if (count < hdr_size(file))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&file->mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
while (list_empty(&file->recv_list)) {
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (filp->f_flags & O_NONBLOCK)
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
if (wait_event_interruptible(file->recv_wait,
|
|
|
|
!list_empty(&file->recv_list)))
|
|
|
|
return -ERESTARTSYS;
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&file->mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
|
|
|
|
list_del(&packet->list);
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-04 13:54:13 +08:00
|
|
|
if (packet->recv_wc)
|
2007-10-10 10:59:15 +08:00
|
|
|
ret = copy_recv_mad(file, buf, packet, count);
|
2005-04-17 06:20:36 +08:00
|
|
|
else
|
2007-10-10 10:59:15 +08:00
|
|
|
ret = copy_send_mad(file, buf, packet, count);
|
2006-03-04 13:54:13 +08:00
|
|
|
|
2005-07-28 02:45:42 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
/* Requeue packet */
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&file->mutex);
|
2005-07-28 02:45:42 +08:00
|
|
|
list_add(&packet->list, &file->recv_list);
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
2006-03-04 13:54:13 +08:00
|
|
|
} else {
|
|
|
|
if (packet->recv_wc)
|
|
|
|
ib_free_recv_mad(packet->recv_wc);
|
2005-07-28 02:45:42 +08:00
|
|
|
kfree(packet);
|
2006-03-04 13:54:13 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-03-04 13:54:13 +08:00
|
|
|
static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
|
|
|
|
{
|
|
|
|
int left, seg;
|
|
|
|
|
|
|
|
/* Copy class specific header */
|
|
|
|
if ((msg->hdr_len > IB_MGMT_RMPP_HDR) &&
|
|
|
|
copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR,
|
|
|
|
msg->hdr_len - IB_MGMT_RMPP_HDR))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* All headers are in place. Copy data segments. */
|
|
|
|
for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0;
|
|
|
|
seg++, left -= msg->seg_size, buf += msg->seg_size) {
|
|
|
|
if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf,
|
|
|
|
min(left, msg->seg_size)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-07-20 16:25:50 +08:00
|
|
|
static int same_destination(struct ib_user_mad_hdr *hdr1,
|
|
|
|
struct ib_user_mad_hdr *hdr2)
|
|
|
|
{
|
|
|
|
if (!hdr1->grh_present && !hdr2->grh_present)
|
|
|
|
return (hdr1->lid == hdr2->lid);
|
|
|
|
|
|
|
|
if (hdr1->grh_present && hdr2->grh_present)
|
|
|
|
return !memcmp(hdr1->gid, hdr2->gid, 16);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int is_duplicate(struct ib_umad_file *file,
|
|
|
|
struct ib_umad_packet *packet)
|
|
|
|
{
|
|
|
|
struct ib_umad_packet *sent_packet;
|
|
|
|
struct ib_mad_hdr *sent_hdr, *hdr;
|
|
|
|
|
|
|
|
hdr = (struct ib_mad_hdr *) packet->mad.data;
|
|
|
|
list_for_each_entry(sent_packet, &file->send_list, list) {
|
|
|
|
sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
|
|
|
|
|
|
|
|
if ((hdr->tid != sent_hdr->tid) ||
|
|
|
|
(hdr->mgmt_class != sent_hdr->mgmt_class))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No need to be overly clever here. If two new operations have
|
|
|
|
* the same TID, reject the second as a duplicate. This is more
|
|
|
|
* restrictive than required by the spec.
|
|
|
|
*/
|
|
|
|
if (!ib_response_mad((struct ib_mad *) hdr)) {
|
|
|
|
if (!ib_response_mad((struct ib_mad *) sent_hdr))
|
|
|
|
return 1;
|
|
|
|
continue;
|
|
|
|
} else if (!ib_response_mad((struct ib_mad *) sent_hdr))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
|
|
|
size_t count, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct ib_umad_file *file = filp->private_data;
|
|
|
|
struct ib_umad_packet *packet;
|
|
|
|
struct ib_mad_agent *agent;
|
|
|
|
struct ib_ah_attr ah_attr;
|
2005-10-26 01:51:39 +08:00
|
|
|
struct ib_ah *ah;
|
2005-07-28 02:45:42 +08:00
|
|
|
struct ib_rmpp_mad *rmpp_mad;
|
2005-08-14 12:05:57 +08:00
|
|
|
__be64 *tid;
|
2006-03-04 13:54:13 +08:00
|
|
|
int ret, data_len, hdr_len, copy_offset, rmpp_active;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2006-03-04 13:54:13 +08:00
|
|
|
packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!packet)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
if (copy_from_user(&packet->mad, buf, hdr_size(file))) {
|
2005-07-28 02:45:42 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
goto err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-10-07 00:33:05 +08:00
|
|
|
if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
buf += hdr_size(file);
|
|
|
|
|
|
|
|
if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&file->mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-11-11 02:18:23 +08:00
|
|
|
agent = __get_agent(file, packet->mad.hdr.id);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!agent) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err_up;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&ah_attr, 0, sizeof ah_attr);
|
2005-07-28 02:45:42 +08:00
|
|
|
ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid);
|
|
|
|
ah_attr.sl = packet->mad.hdr.sl;
|
|
|
|
ah_attr.src_path_bits = packet->mad.hdr.path_bits;
|
2005-04-17 06:20:36 +08:00
|
|
|
ah_attr.port_num = file->port->port_num;
|
2005-07-28 02:45:42 +08:00
|
|
|
if (packet->mad.hdr.grh_present) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ah_attr.ah_flags = IB_AH_GRH;
|
2005-07-28 02:45:42 +08:00
|
|
|
memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
|
2007-04-06 02:49:21 +08:00
|
|
|
ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
|
2010-02-03 03:08:50 +08:00
|
|
|
ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
|
|
|
|
ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
|
2005-07-28 02:45:42 +08:00
|
|
|
ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-10-26 01:51:39 +08:00
|
|
|
ah = ib_create_ah(agent->qp->pd, &ah_attr);
|
|
|
|
if (IS_ERR(ah)) {
|
|
|
|
ret = PTR_ERR(ah);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto err_up;
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:45:42 +08:00
|
|
|
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
|
2006-03-29 08:40:04 +08:00
|
|
|
hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
|
|
|
|
if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
|
|
|
|
copy_offset = IB_MGMT_MAD_HDR;
|
|
|
|
rmpp_active = 0;
|
|
|
|
} else {
|
2006-03-04 13:54:13 +08:00
|
|
|
copy_offset = IB_MGMT_RMPP_HDR;
|
|
|
|
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
|
|
|
|
IB_MGMT_RMPP_FLAG_ACTIVE;
|
2005-07-28 02:45:42 +08:00
|
|
|
}
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
data_len = count - hdr_size(file) - hdr_len;
|
2005-07-28 02:45:42 +08:00
|
|
|
packet->msg = ib_create_send_mad(agent,
|
|
|
|
be32_to_cpu(packet->mad.hdr.qpn),
|
2007-10-10 10:59:15 +08:00
|
|
|
packet->mad.hdr.pkey_index, rmpp_active,
|
|
|
|
hdr_len, data_len, GFP_KERNEL);
|
2005-07-28 02:45:42 +08:00
|
|
|
if (IS_ERR(packet->msg)) {
|
|
|
|
ret = PTR_ERR(packet->msg);
|
|
|
|
goto err_ah;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-03 03:08:50 +08:00
|
|
|
packet->msg->ah = ah;
|
2005-10-26 01:51:39 +08:00
|
|
|
packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
|
2010-02-03 03:08:50 +08:00
|
|
|
packet->msg->retries = packet->mad.hdr.retries;
|
2005-10-26 01:51:39 +08:00
|
|
|
packet->msg->context[0] = packet;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-04 13:54:13 +08:00
|
|
|
/* Copy MAD header. Any RMPP header is already in place. */
|
2005-10-28 11:48:11 +08:00
|
|
|
memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
|
2006-03-04 13:54:13 +08:00
|
|
|
|
|
|
|
if (!rmpp_active) {
|
|
|
|
if (copy_from_user(packet->msg->mad + copy_offset,
|
|
|
|
buf + copy_offset,
|
|
|
|
hdr_len + data_len - copy_offset)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto err_msg;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = copy_rmpp_mad(packet->msg, buf);
|
|
|
|
if (ret)
|
|
|
|
goto err_msg;
|
2005-07-28 02:45:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-07-20 16:25:50 +08:00
|
|
|
* Set the high-order part of the transaction ID to make MADs from
|
|
|
|
* different agents unique, and allow routing responses back to the
|
|
|
|
* original requestor.
|
2005-07-28 02:45:42 +08:00
|
|
|
*/
|
2006-07-20 16:25:50 +08:00
|
|
|
if (!ib_response_mad(packet->msg->mad)) {
|
2005-10-28 11:33:43 +08:00
|
|
|
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
|
2005-07-28 02:45:42 +08:00
|
|
|
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
|
|
|
|
(be64_to_cpup(tid) & 0xffffffff));
|
2006-07-20 16:25:50 +08:00
|
|
|
rmpp_mad->mad_hdr.tid = *tid;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irq(&file->send_lock);
|
|
|
|
ret = is_duplicate(file, packet);
|
|
|
|
if (!ret)
|
|
|
|
list_add_tail(&packet->list, &file->send_list);
|
|
|
|
spin_unlock_irq(&file->send_lock);
|
|
|
|
if (ret) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err_msg;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(packet->msg, NULL);
|
2005-07-28 02:45:42 +08:00
|
|
|
if (ret)
|
2006-07-20 16:25:50 +08:00
|
|
|
goto err_send;
|
2005-07-28 02:45:42 +08:00
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
2005-10-28 11:48:11 +08:00
|
|
|
return count;
|
2005-07-28 02:45:42 +08:00
|
|
|
|
2006-07-20 16:25:50 +08:00
|
|
|
err_send:
|
|
|
|
dequeue_send(file, packet);
|
2005-07-28 02:45:42 +08:00
|
|
|
err_msg:
|
|
|
|
ib_free_send_mad(packet->msg);
|
|
|
|
err_ah:
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_destroy_ah(ah);
|
2005-04-17 06:20:36 +08:00
|
|
|
err_up:
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
err:
|
|
|
|
kfree(packet);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
|
|
|
|
{
|
|
|
|
struct ib_umad_file *file = filp->private_data;
|
|
|
|
|
|
|
|
/* we will always be able to post a MAD send */
|
|
|
|
unsigned int mask = POLLOUT | POLLWRNORM;
|
|
|
|
|
|
|
|
poll_wait(filp, &file->recv_wait, wait);
|
|
|
|
|
|
|
|
if (!list_empty(&file->recv_list))
|
|
|
|
mask |= POLLIN | POLLRDNORM;
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
|
|
|
|
int compat_method_mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct ib_user_mad_reg_req ureq;
|
|
|
|
struct ib_mad_reg_req req;
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
struct ib_mad_agent *agent = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
int agent_id;
|
|
|
|
int ret;
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&file->port->file_mutex);
|
|
|
|
mutex_lock(&file->mutex);
|
2005-11-04 04:01:18 +08:00
|
|
|
|
|
|
|
if (!file->port->ib_dev) {
|
2014-08-09 07:00:54 +08:00
|
|
|
dev_notice(file->port->dev,
|
|
|
|
"ib_umad_reg_agent: invalid device\n");
|
2005-11-04 04:01:18 +08:00
|
|
|
ret = -EPIPE;
|
|
|
|
goto out;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
if (copy_from_user(&ureq, arg, sizeof ureq)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ureq.qpn != 0 && ureq.qpn != 1) {
|
2014-08-09 07:00:54 +08:00
|
|
|
dev_notice(file->port->dev,
|
|
|
|
"ib_umad_reg_agent: invalid QPN %d specified\n",
|
|
|
|
ureq.qpn);
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
|
2005-11-11 02:18:23 +08:00
|
|
|
if (!__get_agent(file, agent_id))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto found;
|
|
|
|
|
2014-08-09 07:00:54 +08:00
|
|
|
dev_notice(file->port->dev,
|
|
|
|
"ib_umad_reg_agent: Max Agents (%u) reached\n",
|
|
|
|
IB_UMAD_MAX_AGENTS);
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
found:
|
2005-04-17 06:26:11 +08:00
|
|
|
if (ureq.mgmt_class) {
|
|
|
|
req.mgmt_class = ureq.mgmt_class;
|
|
|
|
req.mgmt_class_version = ureq.mgmt_class_version;
|
2007-10-10 10:59:15 +08:00
|
|
|
memcpy(req.oui, ureq.oui, sizeof req.oui);
|
|
|
|
|
|
|
|
if (compat_method_mask) {
|
|
|
|
u32 *umm = (u32 *) ureq.method_mask;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i)
|
|
|
|
req.method_mask[i] =
|
|
|
|
umm[i * 2] | ((u64) umm[i * 2 + 1] << 32);
|
|
|
|
} else
|
|
|
|
memcpy(req.method_mask, ureq.method_mask,
|
|
|
|
sizeof req.method_mask);
|
2005-04-17 06:26:11 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
|
|
|
|
ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
|
2005-04-17 06:26:11 +08:00
|
|
|
ureq.mgmt_class ? &req : NULL,
|
2005-07-28 02:45:42 +08:00
|
|
|
ureq.rmpp_version,
|
|
|
|
send_handler, recv_handler, file);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (IS_ERR(agent)) {
|
|
|
|
ret = PTR_ERR(agent);
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
agent = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(agent_id,
|
|
|
|
(u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
|
|
|
|
ret = -EFAULT;
|
2005-11-10 01:58:10 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
if (!file->already_used) {
|
|
|
|
file->already_used = 1;
|
|
|
|
if (!file->use_pkey_index) {
|
2014-08-09 07:00:52 +08:00
|
|
|
dev_warn(file->port->dev,
|
|
|
|
"process %s did not enable P_Key index support.\n",
|
|
|
|
current->comm);
|
|
|
|
dev_warn(file->port->dev,
|
|
|
|
" Documentation/infiniband/user_mad.txt has info on the new ABI.\n");
|
2007-10-10 10:59:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-11-08 02:41:29 +08:00
|
|
|
file->agent[agent_id] = agent;
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = 0;
|
2005-11-08 02:41:29 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
|
|
|
|
|
|
|
if (ret && agent)
|
|
|
|
ib_unregister_mad_agent(agent);
|
|
|
|
|
|
|
|
mutex_unlock(&file->port->file_mutex);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-11-08 02:41:29 +08:00
|
|
|
struct ib_mad_agent *agent = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
u32 id;
|
|
|
|
int ret = 0;
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
if (get_user(id, arg))
|
2005-11-08 02:41:29 +08:00
|
|
|
return -EFAULT;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&file->port->file_mutex);
|
|
|
|
mutex_lock(&file->mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-10-07 00:33:05 +08:00
|
|
|
if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2005-11-08 02:41:29 +08:00
|
|
|
agent = file->agent[id];
|
2005-04-17 06:20:36 +08:00
|
|
|
file->agent[id] = NULL;
|
|
|
|
|
|
|
|
out:
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
2005-11-08 02:41:29 +08:00
|
|
|
|
2005-11-10 01:58:10 +08:00
|
|
|
if (agent)
|
2005-11-08 02:41:29 +08:00
|
|
|
ib_unregister_mad_agent(agent);
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->port->file_mutex);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
static long ib_umad_enable_pkey(struct ib_umad_file *file)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&file->mutex);
|
2007-10-10 10:59:15 +08:00
|
|
|
if (file->already_used)
|
|
|
|
ret = -EINVAL;
|
|
|
|
else
|
|
|
|
file->use_pkey_index = 1;
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
2007-10-10 10:59:15 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:45:42 +08:00
|
|
|
static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
switch (cmd) {
|
|
|
|
case IB_USER_MAD_REGISTER_AGENT:
|
2007-10-10 10:59:15 +08:00
|
|
|
return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
case IB_USER_MAD_UNREGISTER_AGENT:
|
2007-10-10 10:59:15 +08:00
|
|
|
return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg);
|
2007-10-10 10:59:15 +08:00
|
|
|
case IB_USER_MAD_ENABLE_PKEY:
|
|
|
|
return ib_umad_enable_pkey(filp->private_data);
|
2005-04-17 06:20:36 +08:00
|
|
|
default:
|
|
|
|
return -ENOIOCTLCMD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-10 10:59:15 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
switch (cmd) {
|
|
|
|
case IB_USER_MAD_REGISTER_AGENT:
|
|
|
|
return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1);
|
|
|
|
case IB_USER_MAD_UNREGISTER_AGENT:
|
|
|
|
return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg));
|
|
|
|
case IB_USER_MAD_ENABLE_PKEY:
|
|
|
|
return ib_umad_enable_pkey(filp->private_data);
|
|
|
|
default:
|
|
|
|
return -ENOIOCTLCMD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-07-12 04:54:40 +08:00
|
|
|
/*
|
|
|
|
* ib_umad_open() does not need the BKL:
|
|
|
|
*
|
2010-02-03 03:08:30 +08:00
|
|
|
* - the ib_umad_port structures are properly reference counted, and
|
2008-07-12 04:54:40 +08:00
|
|
|
* everything else is purely local to the file being created, so
|
|
|
|
* races against other open calls are not a problem;
|
|
|
|
* - the ioctl method does not affect any global state outside of the
|
|
|
|
* file structure being operated on;
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
static int ib_umad_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2005-10-29 06:37:23 +08:00
|
|
|
struct ib_umad_port *port;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct ib_umad_file *file;
|
2014-05-20 16:33:41 +08:00
|
|
|
int ret = -ENXIO;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-03 03:08:30 +08:00
|
|
|
port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
|
2005-10-29 06:37:23 +08:00
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&port->file_mutex);
|
2005-11-04 04:01:18 +08:00
|
|
|
|
2014-05-20 16:33:41 +08:00
|
|
|
if (!port->ib_dev)
|
2005-11-04 04:01:18 +08:00
|
|
|
goto out;
|
|
|
|
|
2014-05-20 16:33:41 +08:00
|
|
|
ret = -ENOMEM;
|
2005-10-28 11:48:11 +08:00
|
|
|
file = kzalloc(sizeof *file, GFP_KERNEL);
|
2014-05-20 16:33:41 +08:00
|
|
|
if (!file)
|
2005-11-04 04:01:18 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_init(&file->mutex);
|
2006-07-20 16:25:50 +08:00
|
|
|
spin_lock_init(&file->send_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
INIT_LIST_HEAD(&file->recv_list);
|
2006-07-20 16:25:50 +08:00
|
|
|
INIT_LIST_HEAD(&file->send_list);
|
2005-04-17 06:20:36 +08:00
|
|
|
init_waitqueue_head(&file->recv_wait);
|
|
|
|
|
|
|
|
file->port = port;
|
|
|
|
filp->private_data = file;
|
|
|
|
|
2005-11-04 04:01:18 +08:00
|
|
|
list_add_tail(&file->port_list, &port->file_list);
|
|
|
|
|
2010-04-10 08:13:50 +08:00
|
|
|
ret = nonseekable_open(inode, filp);
|
2014-05-20 16:33:41 +08:00
|
|
|
if (ret) {
|
|
|
|
list_del(&file->port_list);
|
|
|
|
kfree(file);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2014-06-07 00:25:04 +08:00
|
|
|
kobject_get(&port->umad_dev->kobj);
|
2010-04-10 08:13:50 +08:00
|
|
|
|
2005-11-04 04:01:18 +08:00
|
|
|
out:
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&port->file_mutex);
|
2005-11-04 04:01:18 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ib_umad_close(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct ib_umad_file *file = filp->private_data;
|
2005-10-29 06:37:23 +08:00
|
|
|
struct ib_umad_device *dev = file->port->umad_dev;
|
2005-05-26 03:31:30 +08:00
|
|
|
struct ib_umad_packet *packet, *tmp;
|
2005-11-11 02:18:23 +08:00
|
|
|
int already_dead;
|
2005-04-17 06:20:36 +08:00
|
|
|
int i;
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&file->port->file_mutex);
|
|
|
|
mutex_lock(&file->mutex);
|
2005-11-11 02:18:23 +08:00
|
|
|
|
|
|
|
already_dead = file->agents_dead;
|
|
|
|
file->agents_dead = 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-04 13:54:13 +08:00
|
|
|
list_for_each_entry_safe(packet, tmp, &file->recv_list, list) {
|
|
|
|
if (packet->recv_wc)
|
|
|
|
ib_free_recv_mad(packet->recv_wc);
|
2005-05-26 03:31:30 +08:00
|
|
|
kfree(packet);
|
2006-03-04 13:54:13 +08:00
|
|
|
}
|
2005-05-26 03:31:30 +08:00
|
|
|
|
2005-11-04 04:01:18 +08:00
|
|
|
list_del(&file->port_list);
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
2005-11-11 02:18:23 +08:00
|
|
|
|
|
|
|
if (!already_dead)
|
|
|
|
for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
|
|
|
|
if (file->agent[i])
|
|
|
|
ib_unregister_mad_agent(file->agent[i]);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->port->file_mutex);
|
2005-11-11 02:18:23 +08:00
|
|
|
|
|
|
|
kfree(file);
|
2014-06-07 00:25:04 +08:00
|
|
|
kobject_put(&dev->kobj);
|
2005-10-29 06:37:23 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-02-12 16:55:32 +08:00
|
|
|
static const struct file_operations umad_fops = {
|
2010-02-03 03:08:50 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = ib_umad_read,
|
|
|
|
.write = ib_umad_write,
|
|
|
|
.poll = ib_umad_poll,
|
2005-04-17 06:20:36 +08:00
|
|
|
.unlocked_ioctl = ib_umad_ioctl,
|
2007-10-10 10:59:15 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
2010-02-03 03:08:50 +08:00
|
|
|
.compat_ioctl = ib_umad_compat_ioctl,
|
2007-10-10 10:59:15 +08:00
|
|
|
#endif
|
2010-02-03 03:08:50 +08:00
|
|
|
.open = ib_umad_open,
|
2010-04-10 08:13:50 +08:00
|
|
|
.release = ib_umad_close,
|
|
|
|
.llseek = no_llseek,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2005-10-29 06:37:23 +08:00
|
|
|
struct ib_umad_port *port;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct ib_port_modify props = {
|
|
|
|
.set_port_cap_mask = IB_PORT_SM
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
2010-02-03 03:08:30 +08:00
|
|
|
port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
|
2005-10-29 06:37:23 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (filp->f_flags & O_NONBLOCK) {
|
2005-10-29 06:37:23 +08:00
|
|
|
if (down_trylock(&port->sm_sem)) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
goto fail;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2005-10-29 06:37:23 +08:00
|
|
|
if (down_interruptible(&port->sm_sem)) {
|
|
|
|
ret = -ERESTARTSYS;
|
|
|
|
goto fail;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
|
2014-05-20 16:33:41 +08:00
|
|
|
if (ret)
|
|
|
|
goto err_up_sem;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
filp->private_data = port;
|
|
|
|
|
2014-05-20 16:33:41 +08:00
|
|
|
ret = nonseekable_open(inode, filp);
|
|
|
|
if (ret)
|
|
|
|
goto err_clr_sm_cap;
|
|
|
|
|
2014-06-07 00:25:04 +08:00
|
|
|
kobject_get(&port->umad_dev->kobj);
|
2014-05-20 16:33:41 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_clr_sm_cap:
|
|
|
|
swap(props.set_port_cap_mask, props.clr_port_cap_mask);
|
|
|
|
ib_modify_port(port->ib_dev, port->port_num, 0, &props);
|
|
|
|
|
|
|
|
err_up_sem:
|
|
|
|
up(&port->sm_sem);
|
2005-10-29 06:37:23 +08:00
|
|
|
|
|
|
|
fail:
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ib_umad_sm_close(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct ib_umad_port *port = filp->private_data;
|
|
|
|
struct ib_port_modify props = {
|
|
|
|
.clr_port_cap_mask = IB_PORT_SM
|
|
|
|
};
|
2005-11-04 04:01:18 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&port->file_mutex);
|
2005-11-04 04:01:18 +08:00
|
|
|
if (port->ib_dev)
|
|
|
|
ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&port->file_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
up(&port->sm_sem);
|
|
|
|
|
2014-06-07 00:25:04 +08:00
|
|
|
kobject_put(&port->umad_dev->kobj);
|
2005-10-29 06:37:23 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-02-12 16:55:32 +08:00
|
|
|
static const struct file_operations umad_sm_fops = {
|
2010-02-03 03:08:50 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = ib_umad_sm_open,
|
2010-04-10 08:13:50 +08:00
|
|
|
.release = ib_umad_sm_close,
|
|
|
|
.llseek = no_llseek,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct ib_client umad_client = {
|
|
|
|
.name = "umad",
|
|
|
|
.add = ib_umad_add_one,
|
|
|
|
.remove = ib_umad_remove_one
|
|
|
|
};
|
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-02-22 07:13:36 +08:00
|
|
|
struct ib_umad_port *port = dev_get_drvdata(dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-10-29 06:37:23 +08:00
|
|
|
if (!port)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return sprintf(buf, "%s\n", port->ib_dev->name);
|
|
|
|
}
|
2008-02-22 07:13:36 +08:00
|
|
|
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
static ssize_t show_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-02-22 07:13:36 +08:00
|
|
|
struct ib_umad_port *port = dev_get_drvdata(dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-10-29 06:37:23 +08:00
|
|
|
if (!port)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return sprintf(buf, "%d\n", port->port_num);
|
|
|
|
}
|
2008-02-22 07:13:36 +08:00
|
|
|
static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-01-05 19:48:09 +08:00
|
|
|
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
|
|
|
|
__stringify(IB_USER_MAD_ABI_VERSION));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-03 03:08:45 +08:00
|
|
|
static dev_t overflow_maj;
|
|
|
|
static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
|
2014-08-09 07:00:52 +08:00
|
|
|
static int find_overflow_devnum(struct ib_device *device)
|
2010-02-03 03:08:45 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!overflow_maj) {
|
|
|
|
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
|
|
|
|
"infiniband_mad");
|
|
|
|
if (ret) {
|
2014-08-09 07:00:52 +08:00
|
|
|
dev_err(&device->dev,
|
|
|
|
"couldn't register dynamic device number\n");
|
2010-02-03 03:08:45 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
|
|
|
|
if (ret >= IB_UMAD_MAX_PORTS)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int ib_umad_init_port(struct ib_device *device, int port_num,
|
2014-06-07 00:25:04 +08:00
|
|
|
struct ib_umad_device *umad_dev,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct ib_umad_port *port)
|
|
|
|
{
|
2010-02-03 03:08:35 +08:00
|
|
|
int devnum;
|
2010-02-03 03:08:40 +08:00
|
|
|
dev_t base;
|
2010-02-03 03:08:35 +08:00
|
|
|
|
2005-10-29 06:37:23 +08:00
|
|
|
spin_lock(&port_lock);
|
2010-02-03 03:08:35 +08:00
|
|
|
devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
|
|
|
|
if (devnum >= IB_UMAD_MAX_PORTS) {
|
2005-10-29 06:37:23 +08:00
|
|
|
spin_unlock(&port_lock);
|
2014-08-09 07:00:52 +08:00
|
|
|
devnum = find_overflow_devnum(device);
|
2010-02-03 03:08:45 +08:00
|
|
|
if (devnum < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
spin_lock(&port_lock);
|
|
|
|
port->dev_num = devnum + IB_UMAD_MAX_PORTS;
|
|
|
|
base = devnum + overflow_maj;
|
|
|
|
set_bit(devnum, overflow_map);
|
|
|
|
} else {
|
|
|
|
port->dev_num = devnum;
|
|
|
|
base = devnum + base_dev;
|
|
|
|
set_bit(devnum, dev_map);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-10-29 06:37:23 +08:00
|
|
|
spin_unlock(&port_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
port->ib_dev = device;
|
|
|
|
port->port_num = port_num;
|
2010-09-07 22:33:31 +08:00
|
|
|
sema_init(&port->sm_sem, 1);
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_init(&port->file_mutex);
|
2005-11-04 04:01:18 +08:00
|
|
|
INIT_LIST_HEAD(&port->file_list);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-03 03:08:25 +08:00
|
|
|
cdev_init(&port->cdev, &umad_fops);
|
|
|
|
port->cdev.owner = THIS_MODULE;
|
2014-06-07 00:25:04 +08:00
|
|
|
port->cdev.kobj.parent = &umad_dev->kobj;
|
2010-02-03 03:08:25 +08:00
|
|
|
kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
|
2010-02-03 03:08:40 +08:00
|
|
|
if (cdev_add(&port->cdev, base, 1))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto err_cdev;
|
|
|
|
|
2008-07-22 11:03:34 +08:00
|
|
|
port->dev = device_create(umad_class, device->dma_device,
|
2010-02-03 03:08:25 +08:00
|
|
|
port->cdev.dev, port,
|
2008-07-22 11:03:34 +08:00
|
|
|
"umad%d", port->dev_num);
|
2008-02-22 07:13:36 +08:00
|
|
|
if (IS_ERR(port->dev))
|
2005-10-29 06:37:23 +08:00
|
|
|
goto err_cdev;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
if (device_create_file(port->dev, &dev_attr_ibdev))
|
|
|
|
goto err_dev;
|
|
|
|
if (device_create_file(port->dev, &dev_attr_port))
|
|
|
|
goto err_dev;
|
|
|
|
|
2010-02-03 03:08:40 +08:00
|
|
|
base += IB_UMAD_MAX_PORTS;
|
2010-02-03 03:08:25 +08:00
|
|
|
cdev_init(&port->sm_cdev, &umad_sm_fops);
|
|
|
|
port->sm_cdev.owner = THIS_MODULE;
|
2014-06-07 00:25:04 +08:00
|
|
|
port->sm_cdev.kobj.parent = &umad_dev->kobj;
|
2010-02-03 03:08:25 +08:00
|
|
|
kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
|
2010-02-03 03:08:40 +08:00
|
|
|
if (cdev_add(&port->sm_cdev, base, 1))
|
2005-10-29 06:37:23 +08:00
|
|
|
goto err_sm_cdev;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-07-22 11:03:34 +08:00
|
|
|
port->sm_dev = device_create(umad_class, device->dma_device,
|
2010-02-03 03:08:25 +08:00
|
|
|
port->sm_cdev.dev, port,
|
2008-07-22 11:03:34 +08:00
|
|
|
"issm%d", port->dev_num);
|
2008-02-22 07:13:36 +08:00
|
|
|
if (IS_ERR(port->sm_dev))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto err_sm_cdev;
|
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
if (device_create_file(port->sm_dev, &dev_attr_ibdev))
|
|
|
|
goto err_sm_dev;
|
|
|
|
if (device_create_file(port->sm_dev, &dev_attr_port))
|
|
|
|
goto err_sm_dev;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
err_sm_dev:
|
2010-02-03 03:08:25 +08:00
|
|
|
device_destroy(umad_class, port->sm_cdev.dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
err_sm_cdev:
|
2010-02-03 03:08:25 +08:00
|
|
|
cdev_del(&port->sm_cdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
err_dev:
|
2010-02-03 03:08:25 +08:00
|
|
|
device_destroy(umad_class, port->cdev.dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
err_cdev:
|
2010-02-03 03:08:25 +08:00
|
|
|
cdev_del(&port->cdev);
|
2010-02-03 03:08:45 +08:00
|
|
|
if (port->dev_num < IB_UMAD_MAX_PORTS)
|
|
|
|
clear_bit(devnum, dev_map);
|
|
|
|
else
|
|
|
|
clear_bit(devnum, overflow_map);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2005-10-29 06:37:23 +08:00
|
|
|
static void ib_umad_kill_port(struct ib_umad_port *port)
|
|
|
|
{
|
2005-11-04 04:01:18 +08:00
|
|
|
struct ib_umad_file *file;
|
|
|
|
int id;
|
|
|
|
|
2008-02-22 07:13:36 +08:00
|
|
|
dev_set_drvdata(port->dev, NULL);
|
|
|
|
dev_set_drvdata(port->sm_dev, NULL);
|
2005-10-29 06:37:23 +08:00
|
|
|
|
2010-02-03 03:08:25 +08:00
|
|
|
device_destroy(umad_class, port->cdev.dev);
|
|
|
|
device_destroy(umad_class, port->sm_cdev.dev);
|
2005-10-29 06:37:23 +08:00
|
|
|
|
2010-02-03 03:08:25 +08:00
|
|
|
cdev_del(&port->cdev);
|
|
|
|
cdev_del(&port->sm_cdev);
|
2005-10-29 06:37:23 +08:00
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_lock(&port->file_mutex);
|
2005-11-04 04:01:18 +08:00
|
|
|
|
|
|
|
port->ib_dev = NULL;
|
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
list_for_each_entry(file, &port->file_list, port_list) {
|
|
|
|
mutex_lock(&file->mutex);
|
2005-11-11 02:18:23 +08:00
|
|
|
file->agents_dead = 1;
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&file->mutex);
|
2005-11-11 02:18:23 +08:00
|
|
|
|
|
|
|
for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
|
|
|
|
if (file->agent[id])
|
|
|
|
ib_unregister_mad_agent(file->agent[id]);
|
|
|
|
}
|
2005-11-04 04:01:18 +08:00
|
|
|
|
IB/umad: Simplify and fix locking
In addition to being overly complex, the locking in user_mad.c is
broken: there were multiple reports of deadlocks and lockdep warnings.
In particular it seems that a single thread may end up trying to take
the same rwsem for reading more than once, which is explicitly
forbidden in the comments in <linux/rwsem.h>.
To solve this, we change the locking to use plain mutexes instead of
rwsems. There is one mutex per open file, which protects the contents
of the struct ib_umad_file, including the array of agents and list of
queued packets; and there is one mutex per struct ib_umad_port, which
protects the contents, including the list of open files. We never
hold the file mutex across calls to functions like ib_unregister_mad_agent(),
which can call back into other ib_umad code to queue a packet, and we
always hold the port mutex as long as we need to make sure that a
device is not hot-unplugged from under us.
This even makes things nicer for users of the -rt patch, since we
remove calls to downgrade_write() (which is not implemented in -rt).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-26 06:15:42 +08:00
|
|
|
mutex_unlock(&port->file_mutex);
|
2005-11-04 04:01:18 +08:00
|
|
|
|
2010-02-03 03:08:45 +08:00
|
|
|
if (port->dev_num < IB_UMAD_MAX_PORTS)
|
|
|
|
clear_bit(port->dev_num, dev_map);
|
|
|
|
else
|
|
|
|
clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
|
2005-10-29 06:37:23 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static void ib_umad_add_one(struct ib_device *device)
|
|
|
|
{
|
|
|
|
struct ib_umad_device *umad_dev;
|
|
|
|
int s, e, i;
|
|
|
|
|
2006-08-04 05:02:42 +08:00
|
|
|
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (device->node_type == RDMA_NODE_IB_SWITCH)
|
2005-04-17 06:20:36 +08:00
|
|
|
s = e = 0;
|
|
|
|
else {
|
|
|
|
s = 1;
|
|
|
|
e = device->phys_port_cnt;
|
|
|
|
}
|
|
|
|
|
2005-10-28 11:48:11 +08:00
|
|
|
umad_dev = kzalloc(sizeof *umad_dev +
|
2005-04-17 06:20:36 +08:00
|
|
|
(e - s + 1) * sizeof (struct ib_umad_port),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!umad_dev)
|
|
|
|
return;
|
|
|
|
|
2014-06-07 00:25:04 +08:00
|
|
|
kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
umad_dev->start_port = s;
|
|
|
|
umad_dev->end_port = e;
|
|
|
|
|
|
|
|
for (i = s; i <= e; ++i) {
|
|
|
|
umad_dev->port[i - s].umad_dev = umad_dev;
|
|
|
|
|
2014-06-07 00:25:04 +08:00
|
|
|
if (ib_umad_init_port(device, i, umad_dev,
|
|
|
|
&umad_dev->port[i - s]))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ib_set_client_data(device, &umad_client, umad_dev);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
err:
|
2005-10-29 06:37:23 +08:00
|
|
|
while (--i >= s)
|
2005-11-07 07:47:02 +08:00
|
|
|
ib_umad_kill_port(&umad_dev->port[i - s]);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-06-07 00:25:04 +08:00
|
|
|
kobject_put(&umad_dev->kobj);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ib_umad_remove_one(struct ib_device *device)
|
|
|
|
{
|
|
|
|
struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!umad_dev)
|
|
|
|
return;
|
|
|
|
|
2005-10-29 06:37:23 +08:00
|
|
|
for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
|
|
|
|
ib_umad_kill_port(&umad_dev->port[i]);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-06-07 00:25:04 +08:00
|
|
|
kobject_put(&umad_dev->kobj);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-07-24 08:24:48 +08:00
|
|
|
static char *umad_devnode(struct device *dev, umode_t *mode)
|
2011-05-24 02:21:47 +08:00
|
|
|
{
|
|
|
|
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int __init ib_umad_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
|
|
|
|
"infiniband_mad");
|
|
|
|
if (ret) {
|
2014-08-09 07:00:52 +08:00
|
|
|
pr_err("couldn't register device number\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2005-10-29 06:37:23 +08:00
|
|
|
umad_class = class_create(THIS_MODULE, "infiniband_mad");
|
|
|
|
if (IS_ERR(umad_class)) {
|
|
|
|
ret = PTR_ERR(umad_class);
|
2014-08-09 07:00:52 +08:00
|
|
|
pr_err("couldn't create class infiniband_mad\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_chrdev;
|
|
|
|
}
|
|
|
|
|
2011-05-24 02:21:47 +08:00
|
|
|
umad_class->devnode = umad_devnode;
|
|
|
|
|
2010-01-05 19:48:09 +08:00
|
|
|
ret = class_create_file(umad_class, &class_attr_abi_version.attr);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ret) {
|
2014-08-09 07:00:52 +08:00
|
|
|
pr_err("couldn't create abi_version attribute\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_class;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ib_register_client(&umad_client);
|
|
|
|
if (ret) {
|
2014-08-09 07:00:52 +08:00
|
|
|
pr_err("couldn't register ib_umad client\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_class;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_class:
|
2005-10-29 06:37:23 +08:00
|
|
|
class_destroy(umad_class);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
out_chrdev:
|
|
|
|
unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit ib_umad_cleanup(void)
|
|
|
|
{
|
|
|
|
ib_unregister_client(&umad_client);
|
2005-10-29 06:37:23 +08:00
|
|
|
class_destroy(umad_class);
|
2005-04-17 06:20:36 +08:00
|
|
|
unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
|
2010-02-03 03:08:45 +08:00
|
|
|
if (overflow_maj)
|
|
|
|
unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(ib_umad_init);
|
|
|
|
module_exit(ib_umad_cleanup);
|