2019-10-20 15:15:56 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
2005-07-28 02:45:40 +08:00
|
|
|
/*
|
2007-07-17 12:49:35 +08:00
|
|
|
* Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
|
2005-07-28 02:45:40 +08:00
|
|
|
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
2019-10-20 15:15:56 +08:00
|
|
|
* Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
|
2005-07-28 02:45:40 +08:00
|
|
|
*/
|
2006-05-13 05:57:52 +08:00
|
|
|
|
|
|
|
#include <linux/completion.h>
|
2005-07-28 02:45:40 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
2007-07-17 12:49:35 +08:00
|
|
|
#include <linux/device.h>
|
2011-05-28 03:35:46 +08:00
|
|
|
#include <linux/module.h>
|
2005-07-28 02:45:40 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/idr.h>
|
|
|
|
#include <linux/interrupt.h>
|
2006-08-29 06:15:18 +08:00
|
|
|
#include <linux/random.h>
|
2005-07-28 02:45:40 +08:00
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include <linux/spinlock.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2007-07-17 12:49:35 +08:00
|
|
|
#include <linux/sysfs.h>
|
2005-07-28 02:45:40 +08:00
|
|
|
#include <linux/workqueue.h>
|
2008-05-27 15:17:53 +08:00
|
|
|
#include <linux/kdev_t.h>
|
IB/core: Ethernet L2 attributes in verbs/cm structures
This patch add the support for Ethernet L2 attributes in the
verbs/cm/cma structures.
When dealing with L2 Ethernet, we should use smac, dmac, vlan ID and priority
in a similar manner that the IB L2 (and the L4 PKEY) attributes are used.
Thus, those attributes were added to the following structures:
* ib_ah_attr - added dmac
* ib_qp_attr - added smac and vlan_id, (sl remains vlan priority)
* ib_wc - added smac, vlan_id
* ib_sa_path_rec - added smac, dmac, vlan_id
* cm_av - added smac and vlan_id
For the path record structure, extra care was taken to avoid the new
fields when packing it into wire format, so we don't break the IB CM
and SA wire protocol.
On the active side, the CM fills. its internal structures from the
path provided by the ULP. We add there taking the ETH L2 attributes
and placing them into the CM Address Handle (struct cm_av).
On the passive side, the CM fills its internal structures from the WC
associated with the REQ message. We add there taking the ETH L2
attributes from the WC.
When the HW driver provides the required ETH L2 attributes in the WC,
they set the IB_WC_WITH_SMAC and IB_WC_WITH_VLAN flags. The IB core
code checks for the presence of these flags, and in their absence does
address resolution from the ib_init_ah_from_wc() helper function.
ib_modify_qp_is_ok is also updated to consider the link layer. Some
parameters are mandatory for Ethernet link layer, while they are
irrelevant for IB. Vendor drivers are modified to support the new
function signature.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-12-13 00:03:11 +08:00
|
|
|
#include <linux/etherdevice.h>
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2005-08-26 04:40:04 +08:00
|
|
|
#include <rdma/ib_cache.h>
|
|
|
|
#include <rdma/ib_cm.h>
|
2005-07-28 02:45:40 +08:00
|
|
|
#include "cm_msgs.h"
|
2019-03-11 20:40:31 +08:00
|
|
|
#include "core_priv.h"
|
2020-08-17 21:53:16 +08:00
|
|
|
#include "cm_trace.h"
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
MODULE_AUTHOR("Sean Hefty");
|
|
|
|
MODULE_DESCRIPTION("InfiniBand CM");
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
|
2016-10-27 03:36:40 +08:00
|
|
|
static const char * const ibcm_rej_reason_strs[] = {
|
|
|
|
[IB_CM_REJ_NO_QP] = "no QP",
|
|
|
|
[IB_CM_REJ_NO_EEC] = "no EEC",
|
|
|
|
[IB_CM_REJ_NO_RESOURCES] = "no resources",
|
|
|
|
[IB_CM_REJ_TIMEOUT] = "timeout",
|
|
|
|
[IB_CM_REJ_UNSUPPORTED] = "unsupported",
|
|
|
|
[IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
|
|
|
|
[IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
|
|
|
|
[IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
|
|
|
|
[IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
|
|
|
|
[IB_CM_REJ_STALE_CONN] = "stale conn",
|
|
|
|
[IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
|
|
|
|
[IB_CM_REJ_INVALID_GID] = "invalid GID",
|
|
|
|
[IB_CM_REJ_INVALID_LID] = "invalid LID",
|
|
|
|
[IB_CM_REJ_INVALID_SL] = "invalid SL",
|
|
|
|
[IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
|
|
|
|
[IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
|
|
|
|
[IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
|
|
|
|
[IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
|
|
|
|
[IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
|
|
|
|
[IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
|
|
|
|
[IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
|
|
|
|
[IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
|
|
|
|
[IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
|
|
|
|
[IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
|
|
|
|
[IB_CM_REJ_PORT_REDIRECT] = "port redirect",
|
|
|
|
[IB_CM_REJ_INVALID_MTU] = "invalid MTU",
|
|
|
|
[IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
|
|
|
|
[IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
|
|
|
|
[IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
|
|
|
|
[IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
|
|
|
|
[IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
|
|
|
|
[IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
|
|
|
|
[IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
|
2020-05-26 18:33:02 +08:00
|
|
|
[IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
|
|
|
|
"vendor option is not supported",
|
2016-10-27 03:36:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
const char *__attribute_const__ ibcm_reject_msg(int reason)
|
|
|
|
{
|
|
|
|
size_t index = reason;
|
|
|
|
|
|
|
|
if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
|
|
|
|
ibcm_rej_reason_strs[index])
|
|
|
|
return ibcm_rej_reason_strs[index];
|
|
|
|
else
|
|
|
|
return "unrecognized reason";
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ibcm_reject_msg);
|
|
|
|
|
2020-03-10 17:25:41 +08:00
|
|
|
struct cm_id_private;
|
2020-05-06 15:46:55 +08:00
|
|
|
struct cm_work;
|
2020-04-22 01:24:40 +08:00
|
|
|
static int cm_add_one(struct ib_device *device);
|
2015-07-30 22:50:14 +08:00
|
|
|
static void cm_remove_one(struct ib_device *device, void *client_data);
|
2020-05-06 15:46:55 +08:00
|
|
|
static void cm_process_work(struct cm_id_private *cm_id_priv,
|
|
|
|
struct cm_work *work);
|
2020-03-10 17:25:44 +08:00
|
|
|
static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_cm_sidr_rep_param *param);
|
2020-03-10 17:25:41 +08:00
|
|
|
static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
|
|
|
|
const void *private_data, u8 private_data_len);
|
2020-03-10 17:25:42 +08:00
|
|
|
static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
|
|
|
|
void *private_data, u8 private_data_len);
|
2020-03-10 17:25:43 +08:00
|
|
|
static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
|
|
|
enum ib_cm_rej_reason reason, void *ari,
|
|
|
|
u8 ari_length, const void *private_data,
|
|
|
|
u8 private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
static struct ib_client cm_client = {
|
|
|
|
.name = "cm",
|
|
|
|
.add = cm_add_one,
|
|
|
|
.remove = cm_remove_one
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct ib_cm {
|
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head device_list;
|
|
|
|
rwlock_t device_lock;
|
|
|
|
struct rb_root listen_service_table;
|
|
|
|
u64 listen_service_id;
|
|
|
|
/* struct rb_root peer_service_table; todo: fix peer to peer */
|
|
|
|
struct rb_root remote_qp_table;
|
|
|
|
struct rb_root remote_id_table;
|
|
|
|
struct rb_root remote_sidr_table;
|
2019-02-21 08:20:43 +08:00
|
|
|
struct xarray local_id_table;
|
|
|
|
u32 local_id_next;
|
2006-08-29 06:15:18 +08:00
|
|
|
__be32 random_id_operand;
|
2006-10-05 02:29:59 +08:00
|
|
|
struct list_head timewait_list;
|
2005-07-28 02:45:40 +08:00
|
|
|
struct workqueue_struct *wq;
|
2016-10-27 21:36:27 +08:00
|
|
|
/* Sync on cm change port state */
|
|
|
|
spinlock_t state_lock;
|
2005-07-28 02:45:40 +08:00
|
|
|
} cm;
|
|
|
|
|
2007-07-17 12:49:35 +08:00
|
|
|
/* Counter indexes ordered by attribute ID */
|
|
|
|
enum {
|
|
|
|
CM_REQ_COUNTER,
|
|
|
|
CM_MRA_COUNTER,
|
|
|
|
CM_REJ_COUNTER,
|
|
|
|
CM_REP_COUNTER,
|
|
|
|
CM_RTU_COUNTER,
|
|
|
|
CM_DREQ_COUNTER,
|
|
|
|
CM_DREP_COUNTER,
|
|
|
|
CM_SIDR_REQ_COUNTER,
|
|
|
|
CM_SIDR_REP_COUNTER,
|
|
|
|
CM_LAP_COUNTER,
|
|
|
|
CM_APR_COUNTER,
|
|
|
|
CM_ATTR_COUNT,
|
|
|
|
CM_ATTR_ID_OFFSET = 0x0010,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
CM_XMIT,
|
|
|
|
CM_XMIT_RETRIES,
|
|
|
|
CM_RECV,
|
|
|
|
CM_RECV_DUPLICATES,
|
|
|
|
CM_COUNTER_GROUPS
|
|
|
|
};
|
|
|
|
|
|
|
|
static char const counter_group_names[CM_COUNTER_GROUPS]
|
|
|
|
[sizeof("cm_rx_duplicates")] = {
|
|
|
|
"cm_tx_msgs", "cm_tx_retries",
|
|
|
|
"cm_rx_msgs", "cm_rx_duplicates"
|
|
|
|
};
|
|
|
|
|
|
|
|
struct cm_counter_group {
|
|
|
|
struct kobject obj;
|
|
|
|
atomic_long_t counter[CM_ATTR_COUNT];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct cm_counter_attribute {
|
|
|
|
struct attribute attr;
|
|
|
|
int index;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define CM_COUNTER_ATTR(_name, _index) \
|
|
|
|
struct cm_counter_attribute cm_##_name##_counter_attr = { \
|
2008-10-19 11:28:50 +08:00
|
|
|
.attr = { .name = __stringify(_name), .mode = 0444 }, \
|
2007-07-17 12:49:35 +08:00
|
|
|
.index = _index \
|
|
|
|
}
|
|
|
|
|
|
|
|
static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
|
|
|
|
static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
|
|
|
|
static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
|
|
|
|
static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
|
|
|
|
static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
|
|
|
|
static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
|
|
|
|
static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
|
|
|
|
static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
|
|
|
|
static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
|
|
|
|
static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
|
|
|
|
static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
|
|
|
|
|
|
|
|
static struct attribute *cm_counter_default_attrs[] = {
|
|
|
|
&cm_req_counter_attr.attr,
|
|
|
|
&cm_mra_counter_attr.attr,
|
|
|
|
&cm_rej_counter_attr.attr,
|
|
|
|
&cm_rep_counter_attr.attr,
|
|
|
|
&cm_rtu_counter_attr.attr,
|
|
|
|
&cm_dreq_counter_attr.attr,
|
|
|
|
&cm_drep_counter_attr.attr,
|
|
|
|
&cm_sidr_req_counter_attr.attr,
|
|
|
|
&cm_sidr_rep_counter_attr.attr,
|
|
|
|
&cm_lap_counter_attr.attr,
|
|
|
|
&cm_apr_counter_attr.attr,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
struct cm_port {
|
|
|
|
struct cm_device *cm_dev;
|
|
|
|
struct ib_mad_agent *mad_agent;
|
2021-03-01 15:04:20 +08:00
|
|
|
u32 port_num;
|
2016-10-27 21:36:27 +08:00
|
|
|
struct list_head cm_priv_prim_list;
|
|
|
|
struct list_head cm_priv_altr_list;
|
2007-07-17 12:49:35 +08:00
|
|
|
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
|
2005-07-28 02:45:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct cm_device {
|
|
|
|
struct list_head list;
|
2008-05-27 15:17:53 +08:00
|
|
|
struct ib_device *ib_device;
|
2007-06-19 02:09:37 +08:00
|
|
|
u8 ack_delay;
|
2015-06-25 22:13:22 +08:00
|
|
|
int going_down;
|
2020-02-13 09:04:25 +08:00
|
|
|
struct cm_port *port[];
|
2005-07-28 02:45:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct cm_av {
|
|
|
|
struct cm_port *port;
|
|
|
|
union ib_gid dgid;
|
2017-04-30 02:41:18 +08:00
|
|
|
struct rdma_ah_attr ah_attr;
|
2005-07-28 02:45:40 +08:00
|
|
|
u16 pkey_index;
|
2007-06-19 02:09:37 +08:00
|
|
|
u8 timeout;
|
2005-07-28 02:45:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct cm_work {
|
2006-11-22 22:57:56 +08:00
|
|
|
struct delayed_work work;
|
2005-07-28 02:45:40 +08:00
|
|
|
struct list_head list;
|
|
|
|
struct cm_port *port;
|
|
|
|
struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
|
2005-08-14 12:05:57 +08:00
|
|
|
__be32 local_id; /* Established / timewait */
|
|
|
|
__be32 remote_id;
|
2005-07-28 02:45:40 +08:00
|
|
|
struct ib_cm_event cm_event;
|
2020-02-13 09:04:25 +08:00
|
|
|
struct sa_path_rec path[];
|
2005-07-28 02:45:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct cm_timewait_info {
|
2019-10-02 20:25:17 +08:00
|
|
|
struct cm_work work;
|
2006-10-05 02:29:59 +08:00
|
|
|
struct list_head list;
|
2005-07-28 02:45:40 +08:00
|
|
|
struct rb_node remote_qp_node;
|
|
|
|
struct rb_node remote_id_node;
|
2005-08-14 12:05:57 +08:00
|
|
|
__be64 remote_ca_guid;
|
|
|
|
__be32 remote_qpn;
|
2005-07-28 02:45:40 +08:00
|
|
|
u8 inserted_remote_qp;
|
|
|
|
u8 inserted_remote_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct cm_id_private {
|
|
|
|
struct ib_cm_id id;
|
|
|
|
|
|
|
|
struct rb_node service_node;
|
|
|
|
struct rb_node sidr_id_node;
|
2006-03-03 08:50:37 +08:00
|
|
|
spinlock_t lock; /* Do not acquire inside cm.lock */
|
2006-05-13 05:57:52 +08:00
|
|
|
struct completion comp;
|
2019-11-17 21:33:21 +08:00
|
|
|
refcount_t refcount;
|
2015-07-30 22:50:18 +08:00
|
|
|
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
|
|
|
|
* Protected by the cm.lock spinlock. */
|
|
|
|
int listen_sharecount;
|
2019-12-19 21:47:50 +08:00
|
|
|
struct rcu_head rcu;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
struct ib_mad_send_buf *msg;
|
|
|
|
struct cm_timewait_info *timewait_info;
|
|
|
|
/* todo: use alternate port on send failure */
|
|
|
|
struct cm_av av;
|
|
|
|
struct cm_av alt_av;
|
|
|
|
|
|
|
|
void *private_data;
|
2005-08-14 12:05:57 +08:00
|
|
|
__be64 tid;
|
|
|
|
__be32 local_qpn;
|
|
|
|
__be32 remote_qpn;
|
2005-10-25 03:33:56 +08:00
|
|
|
enum ib_qp_type qp_type;
|
2005-08-14 12:05:57 +08:00
|
|
|
__be32 sq_psn;
|
|
|
|
__be32 rq_psn;
|
2005-07-28 02:45:40 +08:00
|
|
|
int timeout_ms;
|
|
|
|
enum ib_mtu path_mtu;
|
2006-11-29 06:57:13 +08:00
|
|
|
__be16 pkey;
|
2005-07-28 02:45:40 +08:00
|
|
|
u8 private_data_len;
|
|
|
|
u8 max_cm_retries;
|
|
|
|
u8 responder_resources;
|
|
|
|
u8 initiator_depth;
|
|
|
|
u8 retry_count;
|
|
|
|
u8 rnr_retry_count;
|
|
|
|
u8 service_timeout;
|
2007-06-19 02:09:37 +08:00
|
|
|
u8 target_ack_delay;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2016-10-27 21:36:27 +08:00
|
|
|
struct list_head prim_list;
|
|
|
|
struct list_head altr_list;
|
|
|
|
/* Indicates that the send port mad is registered and av is set */
|
|
|
|
int prim_send_port_not_ready;
|
|
|
|
int altr_send_port_not_ready;
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
struct list_head work_list;
|
|
|
|
atomic_t work_count;
|
2020-05-26 18:33:02 +08:00
|
|
|
|
|
|
|
struct rdma_ucm_ece ece;
|
2005-07-28 02:45:40 +08:00
|
|
|
};
|
|
|
|
|
2006-11-22 22:57:56 +08:00
|
|
|
static void cm_work_handler(struct work_struct *work);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
|
|
|
|
{
|
2019-11-17 21:33:21 +08:00
|
|
|
if (refcount_dec_and_test(&cm_id_priv->refcount))
|
2006-05-13 05:57:52 +08:00
|
|
|
complete(&cm_id_priv->comp);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_mad_send_buf **msg)
|
|
|
|
{
|
|
|
|
struct ib_mad_agent *mad_agent;
|
|
|
|
struct ib_mad_send_buf *m;
|
|
|
|
struct ib_ah *ah;
|
2016-10-27 21:36:27 +08:00
|
|
|
struct cm_av *av;
|
|
|
|
unsigned long flags, flags2;
|
|
|
|
int ret = 0;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2016-10-27 21:36:27 +08:00
|
|
|
/* don't let the port to be released till the agent is down */
|
|
|
|
spin_lock_irqsave(&cm.state_lock, flags2);
|
|
|
|
spin_lock_irqsave(&cm.lock, flags);
|
|
|
|
if (!cm_id_priv->prim_send_port_not_ready)
|
|
|
|
av = &cm_id_priv->av;
|
|
|
|
else if (!cm_id_priv->altr_send_port_not_ready &&
|
|
|
|
(cm_id_priv->alt_av.port))
|
|
|
|
av = &cm_id_priv->alt_av;
|
|
|
|
else {
|
|
|
|
pr_info("%s: not valid CM id\n", __func__);
|
|
|
|
ret = -ENODEV;
|
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
|
|
|
/* Make sure the port haven't released the mad yet */
|
2005-07-28 02:45:40 +08:00
|
|
|
mad_agent = cm_id_priv->av.port->mad_agent;
|
2016-10-27 21:36:27 +08:00
|
|
|
if (!mad_agent) {
|
|
|
|
pr_info("%s: not a valid MAD agent\n", __func__);
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
2018-12-12 17:09:05 +08:00
|
|
|
ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
|
2016-10-27 21:36:27 +08:00
|
|
|
if (IS_ERR(ah)) {
|
|
|
|
ret = PTR_ERR(ah);
|
|
|
|
goto out;
|
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2006-09-23 06:22:46 +08:00
|
|
|
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
|
2016-10-27 21:36:27 +08:00
|
|
|
av->pkey_index,
|
2005-10-26 01:51:39 +08:00
|
|
|
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
|
2015-06-07 02:38:28 +08:00
|
|
|
GFP_ATOMIC,
|
|
|
|
IB_MGMT_BASE_VERSION);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (IS_ERR(m)) {
|
2018-12-12 17:09:06 +08:00
|
|
|
rdma_destroy_ah(ah, 0);
|
2016-10-27 21:36:27 +08:00
|
|
|
ret = PTR_ERR(m);
|
|
|
|
goto out;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Timeout set by caller if response is expected. */
|
2005-10-26 01:51:39 +08:00
|
|
|
m->ah = ah;
|
|
|
|
m->retries = cm_id_priv->max_cm_retries;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2019-11-17 21:33:21 +08:00
|
|
|
refcount_inc(&cm_id_priv->refcount);
|
2005-07-28 02:45:40 +08:00
|
|
|
m->context[0] = cm_id_priv;
|
|
|
|
*msg = m;
|
2016-10-27 21:36:27 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&cm.state_lock, flags2);
|
|
|
|
return ret;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2017-08-30 01:34:43 +08:00
|
|
|
static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
|
|
|
|
struct ib_mad_recv_wc *mad_recv_wc)
|
|
|
|
{
|
|
|
|
return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
|
|
|
|
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
|
|
|
|
GFP_ATOMIC,
|
|
|
|
IB_MGMT_BASE_VERSION);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_create_response_msg_ah(struct cm_port *port,
|
|
|
|
struct ib_mad_recv_wc *mad_recv_wc,
|
|
|
|
struct ib_mad_send_buf *msg)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct ib_ah *ah;
|
|
|
|
|
|
|
|
ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
|
|
|
|
mad_recv_wc->recv_buf.grh, port->port_num);
|
|
|
|
if (IS_ERR(ah))
|
|
|
|
return PTR_ERR(ah);
|
|
|
|
|
2017-08-30 01:34:43 +08:00
|
|
|
msg->ah = ah;
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_free_msg(struct ib_mad_send_buf *msg)
|
|
|
|
{
|
2017-08-30 01:34:43 +08:00
|
|
|
if (msg->ah)
|
2018-12-12 17:09:06 +08:00
|
|
|
rdma_destroy_ah(msg->ah, 0);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (msg->context[0])
|
|
|
|
cm_deref_id(msg->context[0]);
|
|
|
|
ib_free_send_mad(msg);
|
|
|
|
}
|
|
|
|
|
2017-08-30 01:34:43 +08:00
|
|
|
static int cm_alloc_response_msg(struct cm_port *port,
|
|
|
|
struct ib_mad_recv_wc *mad_recv_wc,
|
|
|
|
struct ib_mad_send_buf **msg)
|
|
|
|
{
|
|
|
|
struct ib_mad_send_buf *m;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
|
|
|
|
if (IS_ERR(m))
|
|
|
|
return PTR_ERR(m);
|
|
|
|
|
|
|
|
ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
|
|
|
|
if (ret) {
|
|
|
|
cm_free_msg(m);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
*msg = m;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-04-07 16:15:50 +08:00
|
|
|
static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
if (!private_data || !private_data_len)
|
|
|
|
return NULL;
|
|
|
|
|
2006-10-24 04:17:21 +08:00
|
|
|
data = kmemdup(private_data, private_data_len, GFP_KERNEL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!data)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_set_private_data(struct cm_id_private *cm_id_priv,
|
|
|
|
void *private_data, u8 private_data_len)
|
|
|
|
{
|
|
|
|
if (cm_id_priv->private_data && cm_id_priv->private_data_len)
|
|
|
|
kfree(cm_id_priv->private_data);
|
|
|
|
|
|
|
|
cm_id_priv->private_data = private_data;
|
|
|
|
cm_id_priv->private_data_len = private_data_len;
|
|
|
|
}
|
|
|
|
|
2018-05-14 16:11:08 +08:00
|
|
|
static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
|
|
|
|
struct ib_grh *grh, struct cm_av *av)
|
|
|
|
{
|
|
|
|
struct rdma_ah_attr new_ah_attr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
av->port = port;
|
|
|
|
av->pkey_index = wc->pkey_index;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* av->ah_attr might be initialized based on past wc during incoming
|
|
|
|
* connect request or while sending out connect request. So initialize
|
|
|
|
* a new ah_attr on stack. If initialization fails, old ah_attr is
|
|
|
|
* used for sending any responses. If initialization is successful,
|
|
|
|
* than new ah_attr is used by overwriting old one.
|
|
|
|
*/
|
|
|
|
ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
|
|
|
|
port->port_num, wc,
|
|
|
|
grh, &new_ah_attr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2018-06-13 15:22:05 +08:00
|
|
|
rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
|
2018-05-14 16:11:08 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-14 20:52:13 +08:00
|
|
|
static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
|
|
|
|
struct ib_grh *grh, struct cm_av *av)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
av->port = port;
|
|
|
|
av->pkey_index = wc->pkey_index;
|
2017-11-14 20:52:17 +08:00
|
|
|
return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
|
|
|
|
port->port_num, wc,
|
|
|
|
grh, &av->ah_attr);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2020-05-06 15:46:53 +08:00
|
|
|
static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
|
|
|
|
struct cm_av *av, struct cm_port *port)
|
2018-03-13 22:06:18 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cm.lock, flags);
|
|
|
|
if (&cm_id_priv->av == av)
|
|
|
|
list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
|
|
|
|
else if (&cm_id_priv->alt_av == av)
|
|
|
|
list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
|
|
|
|
else
|
2020-05-06 15:46:53 +08:00
|
|
|
WARN_ON(true);
|
2018-03-13 22:06:18 +08:00
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
|
|
|
}
|
|
|
|
|
2018-06-19 15:59:19 +08:00
|
|
|
static struct cm_port *
|
|
|
|
get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_device *cm_dev;
|
|
|
|
struct cm_port *port = NULL;
|
|
|
|
unsigned long flags;
|
2018-06-19 15:59:19 +08:00
|
|
|
|
|
|
|
if (attr) {
|
|
|
|
read_lock_irqsave(&cm.device_lock, flags);
|
|
|
|
list_for_each_entry(cm_dev, &cm.device_list, list) {
|
|
|
|
if (cm_dev->ib_device == attr->device) {
|
|
|
|
port = cm_dev->port[attr->port_num - 1];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
read_unlock_irqrestore(&cm.device_lock, flags);
|
|
|
|
} else {
|
|
|
|
/* SGID attribute can be NULL in following
|
|
|
|
* conditions.
|
|
|
|
* (a) Alternative path
|
|
|
|
* (b) IB link layer without GRH
|
|
|
|
* (c) LAP send messages
|
|
|
|
*/
|
|
|
|
read_lock_irqsave(&cm.device_lock, flags);
|
|
|
|
list_for_each_entry(cm_dev, &cm.device_list, list) {
|
|
|
|
attr = rdma_find_gid(cm_dev->ib_device,
|
|
|
|
&path->sgid,
|
|
|
|
sa_conv_pathrec_to_gid_type(path),
|
|
|
|
NULL);
|
|
|
|
if (!IS_ERR(attr)) {
|
|
|
|
port = cm_dev->port[attr->port_num - 1];
|
|
|
|
break;
|
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2018-06-19 15:59:19 +08:00
|
|
|
read_unlock_irqrestore(&cm.device_lock, flags);
|
|
|
|
if (port)
|
|
|
|
rdma_put_gid_attr(attr);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2018-03-13 22:06:22 +08:00
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2018-06-19 15:59:19 +08:00
|
|
|
static int cm_init_av_by_path(struct sa_path_rec *path,
|
|
|
|
const struct ib_gid_attr *sgid_attr,
|
|
|
|
struct cm_av *av,
|
2018-03-13 22:06:22 +08:00
|
|
|
struct cm_id_private *cm_id_priv)
|
|
|
|
{
|
2018-05-14 16:11:09 +08:00
|
|
|
struct rdma_ah_attr new_ah_attr;
|
2018-03-13 22:06:22 +08:00
|
|
|
struct cm_device *cm_dev;
|
|
|
|
struct cm_port *port;
|
|
|
|
int ret;
|
2015-10-15 23:38:48 +08:00
|
|
|
|
2018-06-19 15:59:19 +08:00
|
|
|
port = get_cm_port_from_path(path, sgid_attr);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!port)
|
|
|
|
return -EINVAL;
|
2018-03-13 22:06:22 +08:00
|
|
|
cm_dev = port->cm_dev;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2008-05-27 15:17:53 +08:00
|
|
|
ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
|
2005-07-28 02:45:40 +08:00
|
|
|
be16_to_cpu(path->pkey), &av->pkey_index);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
av->port = port;
|
2018-05-14 16:11:09 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* av->ah_attr might be initialized based on wc or during
|
2018-06-19 15:59:16 +08:00
|
|
|
* request processing time which might have reference to sgid_attr.
|
|
|
|
* So initialize a new ah_attr on stack.
|
2018-05-14 16:11:09 +08:00
|
|
|
* If initialization fails, old ah_attr is used for sending any
|
|
|
|
* responses. If initialization is successful, than new ah_attr
|
2018-06-19 15:59:16 +08:00
|
|
|
* is used by overwriting the old one. So that right ah_attr
|
|
|
|
* can be used to return an error response.
|
2018-05-14 16:11:09 +08:00
|
|
|
*/
|
2017-11-14 20:52:16 +08:00
|
|
|
ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
|
2018-06-19 15:59:19 +08:00
|
|
|
&new_ah_attr, sgid_attr);
|
2017-11-14 20:52:14 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2007-06-19 02:09:37 +08:00
|
|
|
av->timeout = path->packet_life_time + 1;
|
2020-05-06 15:46:53 +08:00
|
|
|
add_cm_id_to_port_list(cm_id_priv, av, port);
|
2018-06-13 15:22:05 +08:00
|
|
|
rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
|
2018-05-14 16:11:09 +08:00
|
|
|
return 0;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2019-02-21 08:20:43 +08:00
|
|
|
static u32 cm_local_id(__be32 local_id)
|
|
|
|
{
|
|
|
|
return (__force u32) (local_id ^ cm.random_id_operand);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2019-12-19 21:47:50 +08:00
|
|
|
static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
|
2019-12-19 21:47:50 +08:00
|
|
|
rcu_read_lock();
|
2019-02-21 08:20:43 +08:00
|
|
|
cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
|
2019-12-19 21:47:50 +08:00
|
|
|
if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
|
|
|
|
!refcount_inc_not_zero(&cm_id_priv->refcount))
|
|
|
|
cm_id_priv = NULL;
|
|
|
|
rcu_read_unlock();
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
return cm_id_priv;
|
|
|
|
}
|
|
|
|
|
2008-04-17 12:01:07 +08:00
|
|
|
/*
|
|
|
|
* Trivial helpers to strip endian annotation and compare; the
|
|
|
|
* endianness doesn't actually matter since we just need a stable
|
|
|
|
* order for the RB tree.
|
|
|
|
*/
|
|
|
|
static int be32_lt(__be32 a, __be32 b)
|
|
|
|
{
|
|
|
|
return (__force u32) a < (__force u32) b;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int be32_gt(__be32 a, __be32 b)
|
|
|
|
{
|
|
|
|
return (__force u32) a > (__force u32) b;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int be64_lt(__be64 a, __be64 b)
|
|
|
|
{
|
|
|
|
return (__force u64) a < (__force u64) b;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int be64_gt(__be64 a, __be64 b)
|
|
|
|
{
|
|
|
|
return (__force u64) a > (__force u64) b;
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
/*
|
|
|
|
* Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
|
|
|
|
* if the new ID was inserted, NULL if it could not be inserted due to a
|
|
|
|
* collision, or the existing cm_id_priv ready for shared usage.
|
|
|
|
*/
|
|
|
|
static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
|
|
|
|
ib_cm_handler shared_handler)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct rb_node **link = &cm.listen_service_table.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct cm_id_private *cur_cm_id_priv;
|
2005-08-14 12:05:57 +08:00
|
|
|
__be64 service_id = cm_id_priv->id.service_id;
|
|
|
|
__be64 service_mask = cm_id_priv->id.service_mask;
|
2020-03-10 17:25:35 +08:00
|
|
|
unsigned long flags;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
spin_lock_irqsave(&cm.lock, flags);
|
2005-07-28 02:45:40 +08:00
|
|
|
while (*link) {
|
|
|
|
parent = *link;
|
|
|
|
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
|
|
|
|
service_node);
|
|
|
|
if ((cur_cm_id_priv->id.service_mask & service_id) ==
|
2005-10-18 06:37:43 +08:00
|
|
|
(service_mask & cur_cm_id_priv->id.service_id) &&
|
2020-03-10 17:25:35 +08:00
|
|
|
(cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
|
|
|
|
/*
|
|
|
|
* Sharing an ib_cm_id with different handlers is not
|
|
|
|
* supported
|
|
|
|
*/
|
|
|
|
if (cur_cm_id_priv->id.cm_handler != shared_handler ||
|
|
|
|
cur_cm_id_priv->id.context ||
|
|
|
|
WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
|
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
refcount_inc(&cur_cm_id_priv->refcount);
|
|
|
|
cur_cm_id_priv->listen_sharecount++;
|
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
2005-10-18 06:37:43 +08:00
|
|
|
return cur_cm_id_priv;
|
2020-03-10 17:25:35 +08:00
|
|
|
}
|
2005-10-18 06:37:43 +08:00
|
|
|
|
|
|
|
if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
|
|
|
|
link = &(*link)->rb_left;
|
|
|
|
else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
|
|
|
|
link = &(*link)->rb_right;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_left;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
|
2006-06-18 11:37:28 +08:00
|
|
|
link = &(*link)->rb_right;
|
2005-07-28 02:45:40 +08:00
|
|
|
else
|
|
|
|
link = &(*link)->rb_right;
|
|
|
|
}
|
2020-03-10 17:25:35 +08:00
|
|
|
cm_id_priv->listen_sharecount++;
|
2005-07-28 02:45:40 +08:00
|
|
|
rb_link_node(&cm_id_priv->service_node, parent, link);
|
|
|
|
rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
|
2020-03-10 17:25:35 +08:00
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
|
|
|
return cm_id_priv;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2021-04-07 16:15:50 +08:00
|
|
|
static struct cm_id_private *cm_find_listen(struct ib_device *device,
|
|
|
|
__be64 service_id)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct rb_node *node = cm.listen_service_table.rb_node;
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
|
|
|
|
while (node) {
|
|
|
|
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
|
|
|
|
if ((cm_id_priv->id.service_mask & service_id) ==
|
2005-10-18 06:37:43 +08:00
|
|
|
cm_id_priv->id.service_id &&
|
2020-05-06 15:47:01 +08:00
|
|
|
(cm_id_priv->id.device == device)) {
|
|
|
|
refcount_inc(&cm_id_priv->refcount);
|
2005-07-28 02:45:40 +08:00
|
|
|
return cm_id_priv;
|
2020-05-06 15:47:01 +08:00
|
|
|
}
|
2005-10-18 06:37:43 +08:00
|
|
|
if (device < cm_id_priv->id.device)
|
|
|
|
node = node->rb_left;
|
|
|
|
else if (device > cm_id_priv->id.device)
|
|
|
|
node = node->rb_right;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be64_lt(service_id, cm_id_priv->id.service_id))
|
2005-07-28 02:45:40 +08:00
|
|
|
node = node->rb_left;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be64_gt(service_id, cm_id_priv->id.service_id))
|
2006-06-18 11:37:28 +08:00
|
|
|
node = node->rb_right;
|
2005-07-28 02:45:40 +08:00
|
|
|
else
|
|
|
|
node = node->rb_right;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-04-07 16:15:50 +08:00
|
|
|
static struct cm_timewait_info *
|
|
|
|
cm_insert_remote_id(struct cm_timewait_info *timewait_info)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct rb_node **link = &cm.remote_id_table.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct cm_timewait_info *cur_timewait_info;
|
2005-08-14 12:05:57 +08:00
|
|
|
__be64 remote_ca_guid = timewait_info->remote_ca_guid;
|
|
|
|
__be32 remote_id = timewait_info->work.remote_id;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
while (*link) {
|
|
|
|
parent = *link;
|
|
|
|
cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
|
|
|
|
remote_id_node);
|
2008-04-17 12:01:07 +08:00
|
|
|
if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_left;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_right;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_left;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_right;
|
|
|
|
else
|
|
|
|
return cur_timewait_info;
|
|
|
|
}
|
|
|
|
timewait_info->inserted_remote_id = 1;
|
|
|
|
rb_link_node(&timewait_info->remote_id_node, parent, link);
|
|
|
|
rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-05-06 15:46:58 +08:00
|
|
|
static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
|
|
|
|
__be32 remote_id)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct rb_node *node = cm.remote_id_table.rb_node;
|
|
|
|
struct cm_timewait_info *timewait_info;
|
2020-05-06 15:46:58 +08:00
|
|
|
struct cm_id_private *res = NULL;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-05-06 15:46:58 +08:00
|
|
|
spin_lock_irq(&cm.lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
while (node) {
|
|
|
|
timewait_info = rb_entry(node, struct cm_timewait_info,
|
|
|
|
remote_id_node);
|
2008-04-17 12:01:07 +08:00
|
|
|
if (be32_lt(remote_id, timewait_info->work.remote_id))
|
2005-07-28 02:45:40 +08:00
|
|
|
node = node->rb_left;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be32_gt(remote_id, timewait_info->work.remote_id))
|
2005-07-28 02:45:40 +08:00
|
|
|
node = node->rb_right;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
|
2005-07-28 02:45:40 +08:00
|
|
|
node = node->rb_left;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
|
2005-07-28 02:45:40 +08:00
|
|
|
node = node->rb_right;
|
2020-05-06 15:46:58 +08:00
|
|
|
else {
|
|
|
|
res = cm_acquire_id(timewait_info->work.local_id,
|
|
|
|
timewait_info->work.remote_id);
|
|
|
|
break;
|
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2020-05-06 15:46:58 +08:00
|
|
|
spin_unlock_irq(&cm.lock);
|
|
|
|
return res;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2021-04-07 16:15:50 +08:00
|
|
|
static struct cm_timewait_info *
|
|
|
|
cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct rb_node **link = &cm.remote_qp_table.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct cm_timewait_info *cur_timewait_info;
|
2005-08-14 12:05:57 +08:00
|
|
|
__be64 remote_ca_guid = timewait_info->remote_ca_guid;
|
|
|
|
__be32 remote_qpn = timewait_info->remote_qpn;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
while (*link) {
|
|
|
|
parent = *link;
|
|
|
|
cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
|
|
|
|
remote_qp_node);
|
2008-04-17 12:01:07 +08:00
|
|
|
if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_left;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_right;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_left;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_right;
|
|
|
|
else
|
|
|
|
return cur_timewait_info;
|
|
|
|
}
|
|
|
|
timewait_info->inserted_remote_qp = 1;
|
|
|
|
rb_link_node(&timewait_info->remote_qp_node, parent, link);
|
|
|
|
rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-04-07 16:15:50 +08:00
|
|
|
static struct cm_id_private *
|
|
|
|
cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct rb_node **link = &cm.remote_sidr_table.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct cm_id_private *cur_cm_id_priv;
|
|
|
|
union ib_gid *port_gid = &cm_id_priv->av.dgid;
|
2005-08-14 12:05:57 +08:00
|
|
|
__be32 remote_id = cm_id_priv->id.remote_id;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
while (*link) {
|
|
|
|
parent = *link;
|
|
|
|
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
|
|
|
|
sidr_id_node);
|
2008-04-17 12:01:07 +08:00
|
|
|
if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_left;
|
2008-04-17 12:01:07 +08:00
|
|
|
else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
|
2005-07-28 02:45:40 +08:00
|
|
|
link = &(*link)->rb_right;
|
|
|
|
else {
|
|
|
|
int cmp;
|
|
|
|
cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
|
|
|
|
sizeof *port_gid);
|
|
|
|
if (cmp < 0)
|
|
|
|
link = &(*link)->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
link = &(*link)->rb_right;
|
|
|
|
else
|
|
|
|
return cur_cm_id_priv;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
|
|
|
|
rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
|
|
|
|
ib_cm_handler cm_handler,
|
|
|
|
void *context)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
2020-03-10 17:25:31 +08:00
|
|
|
u32 id;
|
2005-07-28 02:45:40 +08:00
|
|
|
int ret;
|
|
|
|
|
2005-11-02 23:23:14 +08:00
|
|
|
cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!cm_id_priv)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
cm_id_priv->id.state = IB_CM_IDLE;
|
2005-10-18 06:37:43 +08:00
|
|
|
cm_id_priv->id.device = device;
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->id.cm_handler = cm_handler;
|
|
|
|
cm_id_priv->id.context = context;
|
2005-09-10 09:23:32 +08:00
|
|
|
cm_id_priv->id.remote_cm_qpn = 1;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-03-10 17:25:34 +08:00
|
|
|
RB_CLEAR_NODE(&cm_id_priv->service_node);
|
|
|
|
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
|
2005-07-28 02:45:40 +08:00
|
|
|
spin_lock_init(&cm_id_priv->lock);
|
2006-05-13 05:57:52 +08:00
|
|
|
init_completion(&cm_id_priv->comp);
|
2005-07-28 02:45:40 +08:00
|
|
|
INIT_LIST_HEAD(&cm_id_priv->work_list);
|
2016-10-27 21:36:27 +08:00
|
|
|
INIT_LIST_HEAD(&cm_id_priv->prim_list);
|
|
|
|
INIT_LIST_HEAD(&cm_id_priv->altr_list);
|
2005-07-28 02:45:40 +08:00
|
|
|
atomic_set(&cm_id_priv->work_count, -1);
|
2019-11-17 21:33:21 +08:00
|
|
|
refcount_set(&cm_id_priv->refcount, 1);
|
2020-03-10 17:25:31 +08:00
|
|
|
|
2020-11-05 05:40:59 +08:00
|
|
|
ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
|
|
|
|
&cm.local_id_next, GFP_KERNEL);
|
2020-04-07 17:37:14 +08:00
|
|
|
if (ret < 0)
|
2020-03-10 17:25:31 +08:00
|
|
|
goto error;
|
|
|
|
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
|
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
return cm_id_priv;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
error:
|
|
|
|
kfree(cm_id_priv);
|
2020-03-10 17:25:31 +08:00
|
|
|
return ERR_PTR(ret);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2020-03-10 17:25:35 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make the ID visible to the MAD handlers and other threads that use the
|
|
|
|
* xarray.
|
|
|
|
*/
|
|
|
|
static void cm_finalize_id(struct cm_id_private *cm_id_priv)
|
|
|
|
{
|
2020-11-05 05:40:59 +08:00
|
|
|
xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
|
|
|
|
cm_id_priv, GFP_ATOMIC);
|
2020-03-10 17:25:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
|
|
|
|
ib_cm_handler cm_handler,
|
|
|
|
void *context)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
|
|
|
|
cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
|
|
|
|
if (IS_ERR(cm_id_priv))
|
|
|
|
return ERR_CAST(cm_id_priv);
|
|
|
|
|
|
|
|
cm_finalize_id(cm_id_priv);
|
|
|
|
return &cm_id_priv->id;
|
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
EXPORT_SYMBOL(ib_create_cm_id);
|
|
|
|
|
2021-04-07 16:15:50 +08:00
|
|
|
static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_work *work;
|
|
|
|
|
|
|
|
if (list_empty(&cm_id_priv->work_list))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
|
|
|
|
list_del(&work->list);
|
|
|
|
return work;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_free_work(struct cm_work *work)
|
|
|
|
{
|
|
|
|
if (work->mad_recv_wc)
|
|
|
|
ib_free_recv_mad(work->mad_recv_wc);
|
|
|
|
kfree(work);
|
|
|
|
}
|
|
|
|
|
2020-05-06 15:46:55 +08:00
|
|
|
static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
|
|
|
|
struct cm_work *work)
|
2020-06-11 21:00:45 +08:00
|
|
|
__releases(&cm_id_priv->lock)
|
2020-05-06 15:46:55 +08:00
|
|
|
{
|
|
|
|
bool immediate;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To deliver the event to the user callback we have the drop the
|
|
|
|
* spinlock, however, we need to ensure that the user callback is single
|
|
|
|
* threaded and receives events in the temporal order. If there are
|
|
|
|
* already events being processed then thread new events onto a list,
|
|
|
|
* the thread currently processing will pick them up.
|
|
|
|
*/
|
|
|
|
immediate = atomic_inc_and_test(&cm_id_priv->work_count);
|
|
|
|
if (!immediate) {
|
|
|
|
list_add_tail(&work->list, &cm_id_priv->work_list);
|
|
|
|
/*
|
|
|
|
* This routine always consumes incoming reference. Once queued
|
|
|
|
* to the work_list then a reference is held by the thread
|
|
|
|
* currently running cm_process_work() and this reference is not
|
|
|
|
* needed.
|
|
|
|
*/
|
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
|
|
|
|
|
|
|
if (immediate)
|
|
|
|
cm_process_work(cm_id_priv, work);
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
static inline int cm_convert_to_ms(int iba_time)
|
|
|
|
{
|
|
|
|
/* approximate conversion to ms from 4.096us x 2^iba_time */
|
|
|
|
return 1 << max(iba_time - 8, 0);
|
|
|
|
}
|
|
|
|
|
2007-06-19 02:09:37 +08:00
|
|
|
/*
|
|
|
|
* calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
|
|
|
|
* Because of how ack_timeout is stored, adding one doubles the timeout.
|
|
|
|
* To avoid large timeouts, select the max(ack_delay, life_time + 1), and
|
|
|
|
* increment it (round up) only if the other is within 50%.
|
|
|
|
*/
|
|
|
|
static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
|
|
|
|
{
|
|
|
|
int ack_timeout = packet_life_time + 1;
|
|
|
|
|
|
|
|
if (ack_timeout >= ca_ack_delay)
|
|
|
|
ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
|
|
|
|
else
|
|
|
|
ack_timeout = ca_ack_delay +
|
|
|
|
(ack_timeout >= (ca_ack_delay - 1));
|
|
|
|
|
|
|
|
return min(31, ack_timeout);
|
|
|
|
}
|
|
|
|
|
2020-05-06 15:46:56 +08:00
|
|
|
static void cm_remove_remote(struct cm_id_private *cm_id_priv)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
2020-05-06 15:46:56 +08:00
|
|
|
struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
if (timewait_info->inserted_remote_id) {
|
|
|
|
rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
|
|
|
|
timewait_info->inserted_remote_id = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timewait_info->inserted_remote_qp) {
|
|
|
|
rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
|
|
|
|
timewait_info->inserted_remote_qp = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-07 16:15:50 +08:00
|
|
|
static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_timewait_info *timewait_info;
|
|
|
|
|
2005-11-02 23:23:14 +08:00
|
|
|
timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!timewait_info)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
timewait_info->work.local_id = local_id;
|
2006-11-22 22:57:56 +08:00
|
|
|
INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
|
2005-07-28 02:45:40 +08:00
|
|
|
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
|
|
|
|
return timewait_info;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
|
|
|
|
{
|
|
|
|
int wait_time;
|
2006-10-05 02:29:59 +08:00
|
|
|
unsigned long flags;
|
2015-06-25 22:13:22 +08:00
|
|
|
struct cm_device *cm_dev;
|
|
|
|
|
2020-03-10 17:25:40 +08:00
|
|
|
lockdep_assert_held(&cm_id_priv->lock);
|
|
|
|
|
2015-06-25 22:13:22 +08:00
|
|
|
cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
|
|
|
|
if (!cm_dev)
|
|
|
|
return;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2006-10-05 02:29:59 +08:00
|
|
|
spin_lock_irqsave(&cm.lock, flags);
|
2020-05-06 15:46:56 +08:00
|
|
|
cm_remove_remote(cm_id_priv);
|
2006-10-05 02:29:59 +08:00
|
|
|
list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
|
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
2006-08-28 21:32:50 +08:00
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
/*
|
|
|
|
* The cm_id could be destroyed by the user before we exit timewait.
|
|
|
|
* To protect against this, we search for the cm_id after exiting
|
|
|
|
* timewait before notifying the user that we've exited timewait.
|
|
|
|
*/
|
|
|
|
cm_id_priv->id.state = IB_CM_TIMEWAIT;
|
2007-06-19 02:09:37 +08:00
|
|
|
wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
|
2015-06-25 22:13:22 +08:00
|
|
|
|
|
|
|
/* Check if the device started its remove_one */
|
2016-01-01 20:17:46 +08:00
|
|
|
spin_lock_irqsave(&cm.lock, flags);
|
2015-06-25 22:13:22 +08:00
|
|
|
if (!cm_dev->going_down)
|
|
|
|
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
|
|
|
|
msecs_to_jiffies(wait_time));
|
2016-01-01 20:17:46 +08:00
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
2015-06-25 22:13:22 +08:00
|
|
|
|
2020-05-06 15:46:57 +08:00
|
|
|
/*
|
|
|
|
* The timewait_info is converted into a work and gets freed during
|
|
|
|
* cm_free_work() in cm_timewait_handler().
|
|
|
|
*/
|
|
|
|
BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->timewait_info = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
|
|
|
|
{
|
2006-10-05 02:29:59 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2020-03-10 17:25:40 +08:00
|
|
|
lockdep_assert_held(&cm_id_priv->lock);
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->id.state = IB_CM_IDLE;
|
|
|
|
if (cm_id_priv->timewait_info) {
|
2006-10-05 02:29:59 +08:00
|
|
|
spin_lock_irqsave(&cm.lock, flags);
|
2020-05-06 15:46:56 +08:00
|
|
|
cm_remove_remote(cm_id_priv);
|
2006-10-05 02:29:59 +08:00
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
2005-07-28 02:45:40 +08:00
|
|
|
kfree(cm_id_priv->timewait_info);
|
|
|
|
cm_id_priv->timewait_info = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-07-14 15:23:52 +08:00
|
|
|
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_work *work;
|
|
|
|
|
|
|
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2020-03-10 17:25:45 +08:00
|
|
|
retest:
|
2005-07-28 02:45:40 +08:00
|
|
|
switch (cm_id->state) {
|
|
|
|
case IB_CM_LISTEN:
|
2020-03-10 17:25:45 +08:00
|
|
|
spin_lock(&cm.lock);
|
2015-07-30 22:50:18 +08:00
|
|
|
if (--cm_id_priv->listen_sharecount > 0) {
|
|
|
|
/* The id is still shared. */
|
2020-03-10 17:25:34 +08:00
|
|
|
WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
|
2020-03-10 17:25:45 +08:00
|
|
|
spin_unlock(&cm.lock);
|
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2015-07-30 22:50:18 +08:00
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
return;
|
|
|
|
}
|
2020-03-10 17:25:45 +08:00
|
|
|
cm_id->state = IB_CM_IDLE;
|
2005-07-28 02:45:40 +08:00
|
|
|
rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
|
2020-03-10 17:25:34 +08:00
|
|
|
RB_CLEAR_NODE(&cm_id_priv->service_node);
|
2020-03-10 17:25:45 +08:00
|
|
|
spin_unlock(&cm.lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
case IB_CM_SIDR_REQ_SENT:
|
|
|
|
cm_id->state = IB_CM_IDLE;
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
case IB_CM_SIDR_REQ_RCVD:
|
2020-03-10 17:25:44 +08:00
|
|
|
cm_send_sidr_rep_locked(cm_id_priv,
|
|
|
|
&(struct ib_cm_sidr_rep_param){
|
|
|
|
.status = IB_SIDR_REJECT });
|
2020-03-10 17:25:45 +08:00
|
|
|
/* cm_send_sidr_rep_locked will not move to IDLE if it fails */
|
|
|
|
cm_id->state = IB_CM_IDLE;
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
case IB_CM_REQ_SENT:
|
ib/cm: Change reject message type when destroying cm_id
Problem reported by: Ted Kim <ted.h.kim@oracle.com>:
We have a case where a Linux system and a non-Linux system are
trying to interoperate. The Linux host is the active side and
starts the connection establishment, but later decides to not go
through with the connection setup and does rdma_destroy_id().
The rdma_destroy_id() eventually works its way down to cm_destroy_id()
in core/cm.c, where a REJ is sent. The non-Linux system
has some trouble recognizing the REJ because of:
A. CM states which can't receive the REJ
B. Some issues about REJ formatting (missing comm ID)
ISSUE A: That part of the spec says, a Consumer Reject REJ can be
sent for a connection abort, but it goes further
and says: can send a REJ message with a "Consumer Reject"
Reason code if they are in a CM state (i.e. REP
Rcvd, MRA(REP) Sent, REQ Rcvd, MRA Sent) that allows
a REJ to be sent (lines 35-38).
Of the states listed there in that sentence, it would
seem to limit the active side to using the Consumer Reject
(for the abort case) in just the REP-Rcvd and MRA-REP-Sent
states. That is basically only after the active side
sees a REP (or alternatively goes down the state transitions
to timeout in which case a Timeout REJ is sent).
As a fix, in cm-destroy-id() move the IB-CM-MRA-REQ-RCVD case
to the same as REQ-SENT. Essentially, make a REJ sent after
getting an MRA on active side a timeout rather than Consumer-
Reject, which is arguably more correct with the CM state
diagrams previous to getting a REP.
Signed-off-by: Ted Kim <ted.h.kim@oracle.com>
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
2015-05-15 03:49:01 +08:00
|
|
|
case IB_CM_MRA_REQ_RCVD:
|
2005-12-01 02:00:25 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2020-03-10 17:25:43 +08:00
|
|
|
cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
|
|
|
|
&cm_id_priv->id.device->node_guid,
|
|
|
|
sizeof(cm_id_priv->id.device->node_guid),
|
|
|
|
NULL, 0);
|
2005-12-01 02:00:25 +08:00
|
|
|
break;
|
2006-07-14 15:23:52 +08:00
|
|
|
case IB_CM_REQ_RCVD:
|
|
|
|
if (err == -ENOMEM) {
|
|
|
|
/* Do not reject to allow future retries. */
|
|
|
|
cm_reset_to_idle(cm_id_priv);
|
|
|
|
} else {
|
2020-03-10 17:25:43 +08:00
|
|
|
cm_send_rej_locked(cm_id_priv,
|
|
|
|
IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
|
|
|
|
NULL, 0);
|
2006-07-14 15:23:52 +08:00
|
|
|
}
|
|
|
|
break;
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_REP_SENT:
|
|
|
|
case IB_CM_MRA_REP_RCVD:
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2020-06-03 21:33:38 +08:00
|
|
|
cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
|
|
|
|
0, NULL, 0);
|
|
|
|
goto retest;
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_MRA_REQ_SENT:
|
|
|
|
case IB_CM_REP_RCVD:
|
|
|
|
case IB_CM_MRA_REP_SENT:
|
2020-03-10 17:25:43 +08:00
|
|
|
cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
|
|
|
|
0, NULL, 0);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
case IB_CM_ESTABLISHED:
|
2020-03-10 17:25:41 +08:00
|
|
|
if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
|
2020-03-10 17:25:45 +08:00
|
|
|
cm_id->state = IB_CM_IDLE;
|
2011-05-31 13:30:46 +08:00
|
|
|
break;
|
2020-03-10 17:25:41 +08:00
|
|
|
}
|
|
|
|
cm_send_dreq_locked(cm_id_priv, NULL, 0);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto retest;
|
|
|
|
case IB_CM_DREQ_SENT:
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_enter_timewait(cm_id_priv);
|
2020-03-10 17:25:45 +08:00
|
|
|
goto retest;
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_DREQ_RCVD:
|
2020-03-10 17:25:42 +08:00
|
|
|
cm_send_drep_locked(cm_id_priv, NULL, 0);
|
2020-03-10 17:25:45 +08:00
|
|
|
WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
|
|
|
|
goto retest;
|
|
|
|
case IB_CM_TIMEWAIT:
|
|
|
|
/*
|
|
|
|
* The cm_acquire_id in cm_timewait_handler will stop working
|
2020-05-06 15:46:59 +08:00
|
|
|
* once we do xa_erase below, so just move to idle here for
|
2020-03-10 17:25:45 +08:00
|
|
|
* consistency.
|
|
|
|
*/
|
|
|
|
cm_id->state = IB_CM_IDLE;
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
2020-03-10 17:25:45 +08:00
|
|
|
case IB_CM_IDLE:
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
}
|
2020-03-10 17:25:45 +08:00
|
|
|
WARN_ON(cm_id->state != IB_CM_IDLE);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-03-10 17:25:33 +08:00
|
|
|
spin_lock(&cm.lock);
|
|
|
|
/* Required for cleanup paths related cm_req_handler() */
|
|
|
|
if (cm_id_priv->timewait_info) {
|
2020-05-06 15:46:56 +08:00
|
|
|
cm_remove_remote(cm_id_priv);
|
2020-03-10 17:25:33 +08:00
|
|
|
kfree(cm_id_priv->timewait_info);
|
|
|
|
cm_id_priv->timewait_info = NULL;
|
|
|
|
}
|
2016-10-27 21:36:27 +08:00
|
|
|
if (!list_empty(&cm_id_priv->altr_list) &&
|
|
|
|
(!cm_id_priv->altr_send_port_not_ready))
|
|
|
|
list_del(&cm_id_priv->altr_list);
|
|
|
|
if (!list_empty(&cm_id_priv->prim_list) &&
|
|
|
|
(!cm_id_priv->prim_send_port_not_ready))
|
|
|
|
list_del(&cm_id_priv->prim_list);
|
2020-03-10 17:25:34 +08:00
|
|
|
WARN_ON(cm_id_priv->listen_sharecount);
|
|
|
|
WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
|
|
|
|
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
|
|
|
|
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
|
2020-03-10 17:25:33 +08:00
|
|
|
spin_unlock(&cm.lock);
|
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2016-10-27 21:36:27 +08:00
|
|
|
|
2020-11-05 05:40:59 +08:00
|
|
|
xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
|
2006-05-13 05:57:52 +08:00
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
wait_for_completion(&cm_id_priv->comp);
|
2005-07-28 02:45:40 +08:00
|
|
|
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
|
|
|
|
cm_free_work(work);
|
2018-06-19 15:59:14 +08:00
|
|
|
|
|
|
|
rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
|
|
|
|
rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
|
2006-06-18 11:37:28 +08:00
|
|
|
kfree(cm_id_priv->private_data);
|
2019-12-19 21:47:50 +08:00
|
|
|
kfree_rcu(cm_id_priv, rcu);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2006-07-14 15:23:52 +08:00
|
|
|
|
|
|
|
void ib_destroy_cm_id(struct ib_cm_id *cm_id)
|
|
|
|
{
|
|
|
|
cm_destroy_id(cm_id, 0);
|
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
EXPORT_SYMBOL(ib_destroy_cm_id);
|
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
|
|
|
|
__be64 service_mask)
|
|
|
|
{
|
|
|
|
service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
|
|
|
|
service_id &= service_mask;
|
|
|
|
if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
|
|
|
|
(service_id != IB_CM_ASSIGN_SERVICE_ID))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
|
|
|
|
cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
|
|
|
|
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
|
|
|
|
} else {
|
|
|
|
cm_id_priv->id.service_id = service_id;
|
|
|
|
cm_id_priv->id.service_mask = service_mask;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-30 22:50:18 +08:00
|
|
|
/**
|
2020-03-10 17:25:35 +08:00
|
|
|
* ib_cm_listen - Initiates listening on the specified service ID for
|
2015-07-30 22:50:18 +08:00
|
|
|
* connection and service ID resolution requests.
|
|
|
|
* @cm_id: Connection identifier associated with the listen request.
|
|
|
|
* @service_id: Service identifier matched against incoming connection
|
|
|
|
* and service ID resolution requests. The service ID should be specified
|
|
|
|
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
|
|
|
|
* assign a service ID to the caller.
|
|
|
|
* @service_mask: Mask applied to service ID used to listen across a
|
|
|
|
* range of service IDs. If set to 0, the service ID is matched
|
|
|
|
* exactly. This parameter is ignored if %service_id is set to
|
|
|
|
* IB_CM_ASSIGN_SERVICE_ID.
|
|
|
|
*/
|
2020-03-10 17:25:35 +08:00
|
|
|
int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
2020-03-10 17:25:35 +08:00
|
|
|
struct cm_id_private *cm_id_priv =
|
|
|
|
container_of(cm_id, struct cm_id_private, id);
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
2015-07-30 22:50:26 +08:00
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
if (cm_id_priv->id.state != IB_CM_IDLE) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
ret = cm_init_listen(cm_id_priv, service_id, service_mask);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!cm_insert_listen(cm_id_priv, NULL)) {
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EBUSY;
|
2020-03-10 17:25:35 +08:00
|
|
|
goto out;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2015-07-30 22:50:26 +08:00
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
cm_id_priv->id.state = IB_CM_LISTEN;
|
|
|
|
ret = 0;
|
2015-07-30 22:50:26 +08:00
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
2015-07-30 22:50:26 +08:00
|
|
|
return ret;
|
2015-07-30 22:50:18 +08:00
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
EXPORT_SYMBOL(ib_cm_listen);
|
|
|
|
|
2015-07-30 22:50:18 +08:00
|
|
|
/**
|
2020-12-01 20:08:55 +08:00
|
|
|
* ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
|
|
|
|
* the given service ID.
|
2015-07-30 22:50:18 +08:00
|
|
|
*
|
|
|
|
* If there's an existing ID listening on that same device and service ID,
|
|
|
|
* return it.
|
|
|
|
*
|
|
|
|
* @device: Device associated with the cm_id. All related communication will
|
|
|
|
* be associated with the specified device.
|
|
|
|
* @cm_handler: Callback invoked to notify the user of CM events.
|
|
|
|
* @service_id: Service identifier matched against incoming connection
|
|
|
|
* and service ID resolution requests. The service ID should be specified
|
|
|
|
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
|
|
|
|
* assign a service ID to the caller.
|
|
|
|
*
|
|
|
|
* Callers should call ib_destroy_cm_id when done with the listener ID.
|
|
|
|
*/
|
|
|
|
struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
|
|
|
|
ib_cm_handler cm_handler,
|
|
|
|
__be64 service_id)
|
|
|
|
{
|
2020-03-10 17:25:35 +08:00
|
|
|
struct cm_id_private *listen_id_priv;
|
2015-07-30 22:50:18 +08:00
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/* Create an ID in advance, since the creation may sleep */
|
2020-03-10 17:25:35 +08:00
|
|
|
cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
|
|
|
|
if (IS_ERR(cm_id_priv))
|
|
|
|
return ERR_CAST(cm_id_priv);
|
2015-07-30 22:50:18 +08:00
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
err = cm_init_listen(cm_id_priv, service_id, 0);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
2015-07-30 22:50:18 +08:00
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
|
|
|
listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
|
|
|
|
if (listen_id_priv != cm_id_priv) {
|
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
|
|
|
ib_destroy_cm_id(&cm_id_priv->id);
|
|
|
|
if (!listen_id_priv)
|
2015-07-30 22:50:18 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
2020-03-10 17:25:35 +08:00
|
|
|
return &listen_id_priv->id;
|
2015-07-30 22:50:18 +08:00
|
|
|
}
|
2020-03-10 17:25:35 +08:00
|
|
|
cm_id_priv->id.state = IB_CM_LISTEN;
|
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2015-07-30 22:50:18 +08:00
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
/*
|
|
|
|
* A listen ID does not need to be in the xarray since it does not
|
|
|
|
* receive mads, is not placed in the remote_id or remote_qpn rbtree,
|
|
|
|
* and does not enter timewait.
|
|
|
|
*/
|
2015-07-30 22:50:18 +08:00
|
|
|
|
2020-03-10 17:25:35 +08:00
|
|
|
return &cm_id_priv->id;
|
2015-07-30 22:50:18 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_cm_insert_listen);
|
|
|
|
|
2018-07-04 18:48:01 +08:00
|
|
|
static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
u64 hi_tid, low_tid;
|
|
|
|
|
|
|
|
hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
|
2018-07-04 18:48:01 +08:00
|
|
|
low_tid = (u64)cm_id_priv->id.local_id;
|
2005-07-28 02:45:40 +08:00
|
|
|
return cpu_to_be64(hi_tid | low_tid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
|
2005-08-14 12:05:57 +08:00
|
|
|
__be16 attr_id, __be64 tid)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
hdr->base_version = IB_MGMT_BASE_VERSION;
|
|
|
|
hdr->mgmt_class = IB_MGMT_CLASS_CM;
|
|
|
|
hdr->class_version = IB_CM_CLASS_VERSION;
|
|
|
|
hdr->method = IB_MGMT_METHOD_SEND;
|
|
|
|
hdr->attr_id = attr_id;
|
|
|
|
hdr->tid = tid;
|
|
|
|
}
|
|
|
|
|
2020-05-26 18:33:02 +08:00
|
|
|
static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
|
|
|
|
__be64 tid, u32 attr_mod)
|
|
|
|
{
|
|
|
|
cm_format_mad_hdr(hdr, attr_id, tid);
|
|
|
|
hdr->attr_mod = cpu_to_be32(attr_mod);
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
static void cm_format_req(struct cm_req_msg *req_msg,
|
|
|
|
struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_cm_req_param *param)
|
|
|
|
{
|
2017-04-28 07:05:58 +08:00
|
|
|
struct sa_path_rec *pri_path = param->primary_path;
|
|
|
|
struct sa_path_rec *alt_path = param->alternate_path;
|
2017-06-09 01:38:02 +08:00
|
|
|
bool pri_ext = false;
|
|
|
|
|
|
|
|
if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
|
|
|
|
pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
|
|
|
|
pri_path->opa.slid);
|
2007-12-11 07:53:25 +08:00
|
|
|
|
2020-05-26 18:33:02 +08:00
|
|
|
cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
|
|
|
|
cm_form_tid(cm_id_priv), param->ece.attr_mod);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.local_id));
|
|
|
|
IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
|
|
|
|
IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
|
|
|
|
be64_to_cpu(cm_id_priv->id.device->node_guid));
|
2020-01-17 01:00:34 +08:00
|
|
|
IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
|
|
|
|
IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
|
|
|
|
param->remote_cm_response_timeout);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_req_set_qp_type(req_msg, param->qp_type);
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
|
2020-01-17 01:00:34 +08:00
|
|
|
IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
|
|
|
|
param->local_cm_response_timeout);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
|
|
|
|
be16_to_cpu(param->primary_path->pkey));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
|
|
|
|
param->primary_path->mtu);
|
|
|
|
IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
|
2011-05-14 01:46:20 +08:00
|
|
|
|
|
|
|
if (param->qp_type != IB_QPT_XRC_INI) {
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
|
|
|
|
param->responder_resources);
|
|
|
|
IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
|
|
|
|
IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
|
|
|
|
param->rnr_retry_count);
|
|
|
|
IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
|
2011-05-14 01:46:20 +08:00
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
|
|
|
|
pri_path->sgid;
|
|
|
|
*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
|
|
|
|
pri_path->dgid;
|
2017-06-09 01:38:02 +08:00
|
|
|
if (pri_ext) {
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
|
|
|
|
->global.interface_id =
|
|
|
|
OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
|
|
|
|
IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
|
|
|
|
->global.interface_id =
|
|
|
|
OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
|
2017-06-09 01:38:02 +08:00
|
|
|
}
|
2007-12-11 07:53:25 +08:00
|
|
|
if (pri_path->hop_limit <= 1) {
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
|
|
|
|
be16_to_cpu(pri_ext ? 0 :
|
|
|
|
htons(ntohl(sa_path_get_slid(
|
|
|
|
pri_path)))));
|
|
|
|
IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
|
|
|
|
be16_to_cpu(pri_ext ? 0 :
|
|
|
|
htons(ntohl(sa_path_get_dlid(
|
|
|
|
pri_path)))));
|
2007-12-11 07:53:25 +08:00
|
|
|
} else {
|
|
|
|
/* Work-around until there's a way to obtain remote LID info */
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
|
|
|
|
be16_to_cpu(IB_LID_PERMISSIVE));
|
|
|
|
IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
|
|
|
|
be16_to_cpu(IB_LID_PERMISSIVE));
|
2007-12-11 07:53:25 +08:00
|
|
|
}
|
2020-01-17 01:00:34 +08:00
|
|
|
IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
|
|
|
|
be32_to_cpu(pri_path->flow_label));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
|
|
|
|
IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
|
|
|
|
IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
|
|
|
|
(pri_path->hop_limit <= 1));
|
|
|
|
IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
|
2007-06-19 02:09:37 +08:00
|
|
|
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
|
2007-12-11 07:53:25 +08:00
|
|
|
pri_path->packet_life_time));
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2007-12-11 07:53:25 +08:00
|
|
|
if (alt_path) {
|
2017-06-09 01:38:02 +08:00
|
|
|
bool alt_ext = false;
|
|
|
|
|
|
|
|
if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
|
|
|
|
alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
|
|
|
|
alt_path->opa.slid);
|
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
|
|
|
|
alt_path->sgid;
|
|
|
|
*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
|
|
|
|
alt_path->dgid;
|
2017-06-09 01:38:02 +08:00
|
|
|
if (alt_ext) {
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
|
|
|
|
req_msg)
|
|
|
|
->global.interface_id =
|
|
|
|
OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
|
|
|
|
IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
|
|
|
|
req_msg)
|
|
|
|
->global.interface_id =
|
|
|
|
OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
|
2017-06-09 01:38:02 +08:00
|
|
|
}
|
2007-12-11 07:53:25 +08:00
|
|
|
if (alt_path->hop_limit <= 1) {
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
|
|
|
|
be16_to_cpu(
|
|
|
|
alt_ext ? 0 :
|
|
|
|
htons(ntohl(sa_path_get_slid(
|
|
|
|
alt_path)))));
|
|
|
|
IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
|
|
|
|
be16_to_cpu(
|
|
|
|
alt_ext ? 0 :
|
|
|
|
htons(ntohl(sa_path_get_dlid(
|
|
|
|
alt_path)))));
|
2007-12-11 07:53:25 +08:00
|
|
|
} else {
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
|
|
|
|
be16_to_cpu(IB_LID_PERMISSIVE));
|
|
|
|
IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
|
|
|
|
be16_to_cpu(IB_LID_PERMISSIVE));
|
2007-12-11 07:53:25 +08:00
|
|
|
}
|
2020-01-17 01:00:34 +08:00
|
|
|
IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
|
|
|
|
be32_to_cpu(alt_path->flow_label));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
|
|
|
|
alt_path->traffic_class);
|
|
|
|
IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
|
|
|
|
alt_path->hop_limit);
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
|
|
|
|
IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
|
|
|
|
(alt_path->hop_limit <= 1));
|
|
|
|
IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
|
2007-06-19 02:09:37 +08:00
|
|
|
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
|
2007-12-11 07:53:25 +08:00
|
|
|
alt_path->packet_life_time));
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2020-05-26 18:33:02 +08:00
|
|
|
IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
if (param->private_data && param->private_data_len)
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
|
|
|
|
param->private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2006-01-15 05:20:43 +08:00
|
|
|
static int cm_validate_req_param(struct ib_cm_req_param *param)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
if (!param->primary_path)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2011-05-14 01:46:20 +08:00
|
|
|
if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
|
|
|
|
param->qp_type != IB_QPT_XRC_INI)
|
2005-07-28 02:45:40 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (param->private_data &&
|
|
|
|
param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (param->alternate_path &&
|
|
|
|
(param->alternate_path->pkey != param->primary_path->pkey ||
|
|
|
|
param->alternate_path->mtu != param->primary_path->mtu))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ib_send_cm_req(struct ib_cm_id *cm_id,
|
|
|
|
struct ib_cm_req_param *param)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_req_msg *req_msg;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = cm_validate_req_param(param);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Verify that we're not in timewait. */
|
|
|
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
2020-03-10 17:25:33 +08:00
|
|
|
if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
|
2005-07-28 02:45:40 +08:00
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
|
|
|
|
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
|
|
|
|
id.local_id);
|
2006-07-26 00:52:01 +08:00
|
|
|
if (IS_ERR(cm_id_priv->timewait_info)) {
|
|
|
|
ret = PTR_ERR(cm_id_priv->timewait_info);
|
2020-12-04 14:42:05 +08:00
|
|
|
cm_id_priv->timewait_info = NULL;
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
2006-07-26 00:52:01 +08:00
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2018-06-19 15:59:19 +08:00
|
|
|
ret = cm_init_av_by_path(param->primary_path,
|
|
|
|
param->ppath_sgid_attr, &cm_id_priv->av,
|
2016-10-27 21:36:27 +08:00
|
|
|
cm_id_priv);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret)
|
2020-03-10 17:25:33 +08:00
|
|
|
goto out;
|
2005-07-28 02:45:40 +08:00
|
|
|
if (param->alternate_path) {
|
2018-06-19 15:59:19 +08:00
|
|
|
ret = cm_init_av_by_path(param->alternate_path, NULL,
|
2016-10-27 21:36:27 +08:00
|
|
|
&cm_id_priv->alt_av, cm_id_priv);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret)
|
2020-03-10 17:25:33 +08:00
|
|
|
goto out;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
cm_id->service_id = param->service_id;
|
2009-01-18 09:11:57 +08:00
|
|
|
cm_id->service_mask = ~cpu_to_be64(0);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->timeout_ms = cm_convert_to_ms(
|
|
|
|
param->primary_path->packet_life_time) * 2 +
|
|
|
|
cm_convert_to_ms(
|
|
|
|
param->remote_cm_response_timeout);
|
|
|
|
cm_id_priv->max_cm_retries = param->max_cm_retries;
|
|
|
|
cm_id_priv->initiator_depth = param->initiator_depth;
|
|
|
|
cm_id_priv->responder_resources = param->responder_resources;
|
|
|
|
cm_id_priv->retry_count = param->retry_count;
|
|
|
|
cm_id_priv->path_mtu = param->primary_path->mtu;
|
2006-11-29 06:57:13 +08:00
|
|
|
cm_id_priv->pkey = param->primary_path->pkey;
|
2005-10-25 03:33:56 +08:00
|
|
|
cm_id_priv->qp_type = param->qp_type;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
|
|
|
|
if (ret)
|
2020-03-10 17:25:33 +08:00
|
|
|
goto out;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
|
|
|
|
cm_format_req(req_msg, cm_id_priv, param);
|
|
|
|
cm_id_priv->tid = req_msg->hdr.tid;
|
2005-10-26 01:51:39 +08:00
|
|
|
cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
|
|
|
|
|
2020-01-17 01:00:34 +08:00
|
|
|
cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
|
|
|
|
cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_send_req(&cm_id_priv->id);
|
2005-07-28 02:45:40 +08:00
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(cm_id_priv->msg, NULL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret) {
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
goto error2;
|
|
|
|
}
|
|
|
|
BUG_ON(cm_id->state != IB_CM_IDLE);
|
|
|
|
cm_id->state = IB_CM_REQ_SENT;
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error2: cm_free_msg(cm_id_priv->msg);
|
|
|
|
out: return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_send_cm_req);
|
|
|
|
|
|
|
|
static int cm_issue_rej(struct cm_port *port,
|
|
|
|
struct ib_mad_recv_wc *mad_recv_wc,
|
|
|
|
enum ib_cm_rej_reason reason,
|
|
|
|
enum cm_msg_response msg_rejected,
|
|
|
|
void *ari, u8 ari_length)
|
|
|
|
{
|
|
|
|
struct ib_mad_send_buf *msg = NULL;
|
|
|
|
struct cm_rej_msg *rej_msg, *rcv_msg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* We just need common CM header information. Cast to any message. */
|
|
|
|
rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
|
|
|
|
rej_msg = (struct cm_rej_msg *) msg->mad;
|
|
|
|
|
|
|
|
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
|
|
|
|
IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
|
|
|
|
IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
|
|
|
|
IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REJ_REASON, rej_msg, reason);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
if (ari && ari_length) {
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_issue_rej(
|
|
|
|
IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
|
|
|
|
IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret)
|
|
|
|
cm_free_msg(msg);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-06-09 01:38:03 +08:00
|
|
|
static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
|
|
|
|
{
|
2020-01-17 01:00:35 +08:00
|
|
|
return ((cpu_to_be16(
|
|
|
|
IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
(ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
|
|
|
|
req_msg))));
|
2017-06-09 01:38:03 +08:00
|
|
|
}
|
|
|
|
|
2021-03-01 15:04:20 +08:00
|
|
|
static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
|
2017-06-09 01:38:03 +08:00
|
|
|
struct sa_path_rec *path, union ib_gid *gid)
|
|
|
|
{
|
|
|
|
if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
|
|
|
|
path->rec_type = SA_PATH_REC_TYPE_OPA;
|
|
|
|
else
|
|
|
|
path->rec_type = SA_PATH_REC_TYPE_IB;
|
|
|
|
}
|
|
|
|
|
2017-06-09 01:38:04 +08:00
|
|
|
static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
|
|
|
|
struct sa_path_rec *primary_path,
|
|
|
|
struct sa_path_rec *alt_path)
|
|
|
|
{
|
|
|
|
u32 lid;
|
|
|
|
|
|
|
|
if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
|
|
|
|
sa_path_set_dlid(primary_path,
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
|
|
|
|
req_msg));
|
2017-06-09 01:38:04 +08:00
|
|
|
sa_path_set_slid(primary_path,
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
|
|
|
|
req_msg));
|
2017-06-09 01:38:04 +08:00
|
|
|
} else {
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
|
|
|
|
CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
|
2017-10-07 04:06:17 +08:00
|
|
|
sa_path_set_dlid(primary_path, lid);
|
2017-06-09 01:38:04 +08:00
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
|
|
|
|
CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
|
2017-10-07 04:06:17 +08:00
|
|
|
sa_path_set_slid(primary_path, lid);
|
2017-06-09 01:38:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!cm_req_has_alt_path(req_msg))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
|
2020-01-17 01:00:35 +08:00
|
|
|
sa_path_set_dlid(alt_path,
|
|
|
|
IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
|
|
|
|
req_msg));
|
|
|
|
sa_path_set_slid(alt_path,
|
|
|
|
IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
|
|
|
|
req_msg));
|
2017-06-09 01:38:04 +08:00
|
|
|
} else {
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
|
|
|
|
CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
|
2017-10-07 04:06:17 +08:00
|
|
|
sa_path_set_dlid(alt_path, lid);
|
2017-06-09 01:38:04 +08:00
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
|
|
|
|
CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
|
2017-10-07 04:06:17 +08:00
|
|
|
sa_path_set_slid(alt_path, lid);
|
2017-06-09 01:38:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-01-15 05:20:43 +08:00
|
|
|
static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
|
2017-04-28 07:05:58 +08:00
|
|
|
struct sa_path_rec *primary_path,
|
|
|
|
struct sa_path_rec *alt_path)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
primary_path->dgid =
|
|
|
|
*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
|
|
|
|
primary_path->sgid =
|
|
|
|
*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
|
2020-01-17 01:00:34 +08:00
|
|
|
primary_path->flow_label =
|
|
|
|
cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
|
2020-01-17 01:00:35 +08:00
|
|
|
primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
|
|
|
|
primary_path->traffic_class =
|
|
|
|
IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
primary_path->reversible = 1;
|
2020-01-17 01:00:35 +08:00
|
|
|
primary_path->pkey =
|
|
|
|
cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
|
2020-01-17 01:00:33 +08:00
|
|
|
primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
primary_path->mtu_selector = IB_SA_EQ;
|
2020-01-17 01:00:33 +08:00
|
|
|
primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
primary_path->rate_selector = IB_SA_EQ;
|
2020-01-17 01:00:33 +08:00
|
|
|
primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
primary_path->packet_life_time_selector = IB_SA_EQ;
|
|
|
|
primary_path->packet_life_time =
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
|
2020-01-17 01:00:35 +08:00
|
|
|
primary_path->service_id =
|
|
|
|
cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
|
2018-03-21 23:16:35 +08:00
|
|
|
if (sa_path_is_roce(primary_path))
|
|
|
|
primary_path->roce.route_resolved = false;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2017-06-09 01:38:04 +08:00
|
|
|
if (cm_req_has_alt_path(req_msg)) {
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
alt_path->dgid = *IBA_GET_MEM_PTR(
|
|
|
|
CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
|
|
|
|
alt_path->sgid = *IBA_GET_MEM_PTR(
|
|
|
|
CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
|
2020-01-17 01:00:34 +08:00
|
|
|
alt_path->flow_label = cpu_to_be32(
|
|
|
|
IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
|
2020-01-17 01:00:35 +08:00
|
|
|
alt_path->hop_limit =
|
|
|
|
IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
|
|
|
|
alt_path->traffic_class =
|
|
|
|
IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
alt_path->reversible = 1;
|
2020-01-17 01:00:35 +08:00
|
|
|
alt_path->pkey =
|
|
|
|
cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
|
2020-01-17 01:00:33 +08:00
|
|
|
alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
alt_path->mtu_selector = IB_SA_EQ;
|
2020-01-17 01:00:33 +08:00
|
|
|
alt_path->mtu =
|
|
|
|
IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
alt_path->rate_selector = IB_SA_EQ;
|
2020-01-17 01:00:33 +08:00
|
|
|
alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
alt_path->packet_life_time_selector = IB_SA_EQ;
|
|
|
|
alt_path->packet_life_time =
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
|
2020-01-17 01:00:35 +08:00
|
|
|
alt_path->service_id =
|
|
|
|
cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
|
2018-03-21 23:16:35 +08:00
|
|
|
|
|
|
|
if (sa_path_is_roce(alt_path))
|
|
|
|
alt_path->roce.route_resolved = false;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2017-06-09 01:38:04 +08:00
|
|
|
cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2015-07-30 22:50:21 +08:00
|
|
|
static u16 cm_get_bth_pkey(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct ib_device *ib_dev = work->port->cm_dev->ib_device;
|
2021-03-01 15:04:20 +08:00
|
|
|
u32 port_num = work->port->port_num;
|
2015-07-30 22:50:21 +08:00
|
|
|
u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
|
|
|
|
u16 pkey;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
|
|
|
|
if (ret) {
|
|
|
|
dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
|
|
|
|
port_num, pkey_index, ret);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pkey;
|
|
|
|
}
|
|
|
|
|
2017-11-14 20:34:38 +08:00
|
|
|
/**
|
2020-12-01 20:08:55 +08:00
|
|
|
* cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
|
2017-11-14 20:34:38 +08:00
|
|
|
* ULPs (such as IPoIB) do not understand OPA GIDs and will
|
|
|
|
* reject them as the local_gid will not match the sgid. Therefore,
|
|
|
|
* change the pathrec's SGID to an IB SGID.
|
|
|
|
*
|
|
|
|
* @work: Work completion
|
|
|
|
* @path: Path record
|
|
|
|
*/
|
|
|
|
static void cm_opa_to_ib_sgid(struct cm_work *work,
|
|
|
|
struct sa_path_rec *path)
|
|
|
|
{
|
|
|
|
struct ib_device *dev = work->port->cm_dev->ib_device;
|
2021-03-01 15:04:20 +08:00
|
|
|
u32 port_num = work->port->port_num;
|
2017-11-14 20:34:38 +08:00
|
|
|
|
|
|
|
if (rdma_cap_opa_ah(dev, port_num) &&
|
|
|
|
(ib_is_opa_gid(&path->sgid))) {
|
|
|
|
union ib_gid sgid;
|
|
|
|
|
2018-06-05 13:40:22 +08:00
|
|
|
if (rdma_query_gid(dev, port_num, 0, &sgid)) {
|
2017-11-14 20:34:38 +08:00
|
|
|
dev_warn(&dev->dev,
|
|
|
|
"Error updating sgid in CM request\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
path->sgid = sgid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
static void cm_format_req_event(struct cm_work *work,
|
|
|
|
struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_cm_id *listen_id)
|
|
|
|
{
|
|
|
|
struct cm_req_msg *req_msg;
|
|
|
|
struct ib_cm_req_event_param *param;
|
|
|
|
|
|
|
|
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
|
|
|
|
param = &work->cm_event.param.req_rcvd;
|
|
|
|
param->listen_id = listen_id;
|
2015-07-30 22:50:21 +08:00
|
|
|
param->bth_pkey = cm_get_bth_pkey(work);
|
2005-07-28 02:45:40 +08:00
|
|
|
param->port = cm_id_priv->av.port->port_num;
|
|
|
|
param->primary_path = &work->path[0];
|
2017-11-14 20:34:38 +08:00
|
|
|
cm_opa_to_ib_sgid(work, param->primary_path);
|
|
|
|
if (cm_req_has_alt_path(req_msg)) {
|
2005-07-28 02:45:40 +08:00
|
|
|
param->alternate_path = &work->path[1];
|
2017-11-14 20:34:38 +08:00
|
|
|
cm_opa_to_ib_sgid(work, param->alternate_path);
|
|
|
|
} else {
|
2005-07-28 02:45:40 +08:00
|
|
|
param->alternate_path = NULL;
|
2017-11-14 20:34:38 +08:00
|
|
|
}
|
2020-01-17 01:00:35 +08:00
|
|
|
param->remote_ca_guid =
|
|
|
|
cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
|
|
|
|
param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
|
2020-01-17 01:00:34 +08:00
|
|
|
param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
param->qp_type = cm_req_get_qp_type(req_msg);
|
2020-01-17 01:00:34 +08:00
|
|
|
param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
|
2020-01-17 01:00:33 +08:00
|
|
|
param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
|
|
|
|
param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
param->local_cm_response_timeout =
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
|
|
|
|
param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
param->remote_cm_response_timeout =
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
|
|
|
|
param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
|
|
|
|
param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
|
|
|
|
param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
|
2018-07-16 16:50:11 +08:00
|
|
|
param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
|
2020-05-26 18:33:02 +08:00
|
|
|
param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
|
|
|
|
param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
|
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_process_work(struct cm_id_private *cm_id_priv,
|
|
|
|
struct cm_work *work)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* We will typically only have the current event to report. */
|
|
|
|
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
|
|
|
|
cm_free_work(work);
|
|
|
|
|
|
|
|
while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
work = cm_dequeue_work(cm_id_priv);
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2018-05-29 19:56:16 +08:00
|
|
|
if (!work)
|
|
|
|
return;
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
|
|
|
|
&work->cm_event);
|
|
|
|
cm_free_work(work);
|
|
|
|
}
|
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
if (ret)
|
2006-07-14 15:23:52 +08:00
|
|
|
cm_destroy_id(&cm_id_priv->id, ret);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_format_mra(struct cm_mra_msg *mra_msg,
|
|
|
|
struct cm_id_private *cm_id_priv,
|
|
|
|
enum cm_msg_response msg_mraed, u8 service_timeout,
|
|
|
|
const void *private_data, u8 private_data_len)
|
|
|
|
{
|
|
|
|
cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.local_id));
|
|
|
|
IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.remote_id));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
if (private_data && private_data_len)
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
|
|
|
|
private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_format_rej(struct cm_rej_msg *rej_msg,
|
|
|
|
struct cm_id_private *cm_id_priv,
|
2020-04-07 01:32:42 +08:00
|
|
|
enum ib_cm_rej_reason reason, void *ari,
|
|
|
|
u8 ari_length, const void *private_data,
|
|
|
|
u8 private_data_len, enum ib_cm_state state)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
2020-03-10 17:25:40 +08:00
|
|
|
lockdep_assert_held(&cm_id_priv->lock);
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.remote_id));
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-04-07 01:32:42 +08:00
|
|
|
switch (state) {
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_REQ_RCVD:
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
case IB_CM_MRA_REQ_SENT:
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.local_id));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
case IB_CM_REP_RCVD:
|
|
|
|
case IB_CM_MRA_REP_SENT:
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.local_id));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
default:
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.local_id));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
|
|
|
|
CM_MSG_RESPONSE_OTHER);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REJ_REASON, rej_msg, reason);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ari && ari_length) {
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (private_data && private_data_len)
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
|
|
|
|
private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_dup_req_handler(struct cm_work *work,
|
|
|
|
struct cm_id_private *cm_id_priv)
|
|
|
|
{
|
|
|
|
struct ib_mad_send_buf *msg = NULL;
|
|
|
|
int ret;
|
|
|
|
|
2007-07-17 12:49:35 +08:00
|
|
|
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_REQ_COUNTER]);
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
/* Quick state check to discard duplicate REQs. */
|
2020-03-10 17:25:39 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
|
|
|
if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
|
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
return;
|
2020-03-10 17:25:39 +08:00
|
|
|
}
|
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
|
|
|
|
if (ret)
|
|
|
|
return;
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
switch (cm_id_priv->id.state) {
|
|
|
|
case IB_CM_MRA_REQ_SENT:
|
|
|
|
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
|
|
|
|
CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
|
|
|
|
cm_id_priv->private_data,
|
|
|
|
cm_id_priv->private_data_len);
|
|
|
|
break;
|
|
|
|
case IB_CM_TIMEWAIT:
|
2020-04-07 01:32:42 +08:00
|
|
|
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
|
|
|
|
IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
|
|
|
|
IB_CM_TIMEWAIT);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto unlock;
|
|
|
|
}
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_send_dup_req(&cm_id_priv->id);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret)
|
|
|
|
goto free;
|
|
|
|
return;
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
unlock: spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
free: cm_free_msg(msg);
|
|
|
|
}
|
|
|
|
|
2021-04-07 16:15:50 +08:00
|
|
|
static struct cm_id_private *cm_match_req(struct cm_work *work,
|
|
|
|
struct cm_id_private *cm_id_priv)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
|
|
|
|
struct cm_timewait_info *timewait_info;
|
|
|
|
struct cm_req_msg *req_msg;
|
|
|
|
|
|
|
|
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
|
|
|
|
|
2007-05-22 08:38:02 +08:00
|
|
|
/* Check for possible duplicate REQ. */
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm.lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
|
|
|
|
if (timewait_info) {
|
2019-12-19 21:47:50 +08:00
|
|
|
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
|
2005-07-28 02:45:40 +08:00
|
|
|
timewait_info->work.remote_id);
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm.lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (cur_cm_id_priv) {
|
|
|
|
cm_dup_req_handler(work, cur_cm_id_priv);
|
|
|
|
cm_deref_id(cur_cm_id_priv);
|
2007-05-22 08:38:02 +08:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for stale connections. */
|
|
|
|
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
|
|
|
|
if (timewait_info) {
|
2020-05-06 15:46:56 +08:00
|
|
|
cm_remove_remote(cm_id_priv);
|
2019-12-19 21:47:50 +08:00
|
|
|
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
|
2016-10-28 19:14:29 +08:00
|
|
|
timewait_info->work.remote_id);
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm.lock);
|
2007-05-22 08:38:02 +08:00
|
|
|
cm_issue_rej(work->port, work->mad_recv_wc,
|
|
|
|
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
|
|
|
|
NULL, 0);
|
2016-10-28 19:14:29 +08:00
|
|
|
if (cur_cm_id_priv) {
|
2020-05-06 15:47:00 +08:00
|
|
|
ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
|
2016-10-28 19:14:29 +08:00
|
|
|
cm_deref_id(cur_cm_id_priv);
|
|
|
|
}
|
2007-05-22 08:38:02 +08:00
|
|
|
return NULL;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Find matching listen request. */
|
2020-01-17 01:00:35 +08:00
|
|
|
listen_cm_id_priv = cm_find_listen(
|
|
|
|
cm_id_priv->id.device,
|
|
|
|
cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!listen_cm_id_priv) {
|
2020-05-06 15:46:56 +08:00
|
|
|
cm_remove_remote(cm_id_priv);
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm.lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_issue_rej(work->port, work->mad_recv_wc,
|
|
|
|
IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
|
|
|
|
NULL, 0);
|
2020-03-10 17:25:38 +08:00
|
|
|
return NULL;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm.lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
return listen_cm_id_priv;
|
|
|
|
}
|
|
|
|
|
2007-12-11 07:53:25 +08:00
|
|
|
/*
|
|
|
|
* Work-around for inter-subnet connections. If the LIDs are permissive,
|
|
|
|
* we need to override the LID/SL data in the REQ with the LID information
|
|
|
|
* in the work completion.
|
|
|
|
*/
|
|
|
|
static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
|
|
|
|
{
|
2020-01-17 01:00:33 +08:00
|
|
|
if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
|
2020-01-17 01:00:35 +08:00
|
|
|
if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
|
|
|
|
req_msg)) == IB_LID_PERMISSIVE) {
|
|
|
|
IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
|
|
|
|
be16_to_cpu(ib_lid_be16(wc->slid)));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
|
2007-12-11 07:53:25 +08:00
|
|
|
}
|
|
|
|
|
2020-01-17 01:00:35 +08:00
|
|
|
if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
|
|
|
|
req_msg)) == IB_LID_PERMISSIVE)
|
|
|
|
IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
|
|
|
|
wc->dlid_path_bits);
|
2007-12-11 07:53:25 +08:00
|
|
|
}
|
|
|
|
|
2020-01-17 01:00:33 +08:00
|
|
|
if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
|
2020-01-17 01:00:35 +08:00
|
|
|
if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
|
|
|
|
req_msg)) == IB_LID_PERMISSIVE) {
|
|
|
|
IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
|
|
|
|
be16_to_cpu(ib_lid_be16(wc->slid)));
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
|
2007-12-11 07:53:25 +08:00
|
|
|
}
|
|
|
|
|
2020-01-17 01:00:35 +08:00
|
|
|
if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
|
|
|
|
req_msg)) == IB_LID_PERMISSIVE)
|
|
|
|
IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
|
|
|
|
wc->dlid_path_bits);
|
2007-12-11 07:53:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
static int cm_req_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
|
|
|
|
struct cm_req_msg *req_msg;
|
2017-04-30 02:41:28 +08:00
|
|
|
const struct ib_global_route *grh;
|
2018-06-19 15:59:20 +08:00
|
|
|
const struct ib_gid_attr *gid_attr;
|
2005-07-28 02:45:40 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
|
|
|
|
|
2020-03-10 17:25:38 +08:00
|
|
|
cm_id_priv =
|
|
|
|
cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
|
|
|
|
if (IS_ERR(cm_id_priv))
|
|
|
|
return PTR_ERR(cm_id_priv);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv->id.remote_id =
|
|
|
|
cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
|
2020-03-10 17:25:38 +08:00
|
|
|
cm_id_priv->id.service_id =
|
|
|
|
cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
|
|
|
|
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
|
|
|
|
cm_id_priv->tid = req_msg->hdr.tid;
|
|
|
|
cm_id_priv->timeout_ms = cm_convert_to_ms(
|
|
|
|
IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
|
|
|
|
cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
|
|
|
|
cm_id_priv->remote_qpn =
|
|
|
|
cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
|
|
|
|
cm_id_priv->initiator_depth =
|
|
|
|
IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
|
|
|
|
cm_id_priv->responder_resources =
|
|
|
|
IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
|
|
|
|
cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
|
|
|
|
cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
|
|
|
|
cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
|
|
|
|
cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
|
|
|
|
cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
|
|
|
|
cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
|
|
|
|
|
2017-11-14 20:52:13 +08:00
|
|
|
ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
|
|
|
|
work->mad_recv_wc->recv_buf.grh,
|
|
|
|
&cm_id_priv->av);
|
|
|
|
if (ret)
|
|
|
|
goto destroy;
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
|
|
|
|
id.local_id);
|
|
|
|
if (IS_ERR(cm_id_priv->timewait_info)) {
|
|
|
|
ret = PTR_ERR(cm_id_priv->timewait_info);
|
2020-12-04 14:42:05 +08:00
|
|
|
cm_id_priv->timewait_info = NULL;
|
2006-08-29 02:57:42 +08:00
|
|
|
goto destroy;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2020-03-10 17:25:38 +08:00
|
|
|
cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv->timewait_info->remote_ca_guid =
|
|
|
|
cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
|
2020-03-10 17:25:38 +08:00
|
|
|
cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that the ID pointer is not in the xarray at this point,
|
|
|
|
* so this set is only visible to the local thread.
|
|
|
|
*/
|
|
|
|
cm_id_priv->id.state = IB_CM_REQ_RCVD;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
|
|
|
|
if (!listen_cm_id_priv) {
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_no_listener_err(&cm_id_priv->id);
|
2020-03-10 17:25:38 +08:00
|
|
|
cm_id_priv->id.state = IB_CM_IDLE;
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EINVAL;
|
2020-03-10 17:25:33 +08:00
|
|
|
goto destroy;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
RDMA/core: Fix corrupted SL on passive side
On RoCE systems, a CM REQ contains a Primary Hop Limit > 1 and Primary
Subnet Local is zero.
In cm_req_handler(), the cm_process_routed_req() function is called. Since
the Primary Subnet Local value is zero in the request, and since this is
RoCE (Primary Local LID is permissive), the following statement will be
executed:
IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
This corrupts SL in req_msg if it was different from zero. In other words,
a request to setup a connection using an SL != zero, will not be honored,
and a connection using SL zero will be created instead.
Fixed by not calling cm_process_routed_req() on RoCE systems, the
cm_process_route_req() is only for IB anyhow.
Fixes: 3971c9f6dbf2 ("IB/cm: Add interim support for routed paths")
Link: https://lore.kernel.org/r/1616420132-31005-1-git-send-email-haakon.bugge@oracle.com
Signed-off-by: Håkon Bugge <haakon.bugge@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2021-03-22 21:35:32 +08:00
|
|
|
if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
|
|
|
|
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
|
IB/core: Ethernet L2 attributes in verbs/cm structures
This patch add the support for Ethernet L2 attributes in the
verbs/cm/cma structures.
When dealing with L2 Ethernet, we should use smac, dmac, vlan ID and priority
in a similar manner that the IB L2 (and the L4 PKEY) attributes are used.
Thus, those attributes were added to the following structures:
* ib_ah_attr - added dmac
* ib_qp_attr - added smac and vlan_id, (sl remains vlan priority)
* ib_wc - added smac, vlan_id
* ib_sa_path_rec - added smac, dmac, vlan_id
* cm_av - added smac and vlan_id
For the path record structure, extra care was taken to avoid the new
fields when packing it into wire format, so we don't break the IB CM
and SA wire protocol.
On the active side, the CM fills. its internal structures from the
path provided by the ULP. We add there taking the ETH L2 attributes
and placing them into the CM Address Handle (struct cm_av).
On the passive side, the CM fills its internal structures from the WC
associated with the REQ message. We add there taking the ETH L2
attributes from the WC.
When the HW driver provides the required ETH L2 attributes in the WC,
they set the IB_WC_WITH_SMAC and IB_WC_WITH_VLAN flags. The IB core
code checks for the presence of these flags, and in their absence does
address resolution from the ib_init_ah_from_wc() helper function.
ib_modify_qp_is_ok is also updated to consider the link layer. Some
parameters are mandatory for Ethernet link layer, while they are
irrelevant for IB. Vendor drivers are modified to support the new
function signature.
Signed-off-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
2013-12-13 00:03:11 +08:00
|
|
|
|
2017-04-28 07:06:00 +08:00
|
|
|
memset(&work->path[0], 0, sizeof(work->path[0]));
|
2017-10-19 13:40:30 +08:00
|
|
|
if (cm_req_has_alt_path(req_msg))
|
|
|
|
memset(&work->path[1], 0, sizeof(work->path[1]));
|
2017-04-30 02:41:28 +08:00
|
|
|
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
|
2018-06-19 15:59:20 +08:00
|
|
|
gid_attr = grh->sgid_attr;
|
2017-11-14 20:52:18 +08:00
|
|
|
|
2019-05-02 15:48:02 +08:00
|
|
|
if (gid_attr &&
|
|
|
|
rdma_protocol_roce(work->port->cm_dev->ib_device,
|
|
|
|
work->port->port_num)) {
|
2017-11-14 20:52:18 +08:00
|
|
|
work->path[0].rec_type =
|
2018-06-19 15:59:20 +08:00
|
|
|
sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
|
2017-11-14 20:52:18 +08:00
|
|
|
} else {
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
cm_path_set_rec_type(
|
|
|
|
work->port->cm_dev->ib_device, work->port->port_num,
|
|
|
|
&work->path[0],
|
|
|
|
IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
|
|
|
|
req_msg));
|
2017-11-14 20:52:18 +08:00
|
|
|
}
|
|
|
|
if (cm_req_has_alt_path(req_msg))
|
|
|
|
work->path[1].rec_type = work->path[0].rec_type;
|
|
|
|
cm_format_paths_from_req(req_msg, &work->path[0],
|
|
|
|
&work->path[1]);
|
|
|
|
if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
|
|
|
|
sa_path_set_dmac(&work->path[0],
|
|
|
|
cm_id_priv->av.ah_attr.roce.dmac);
|
|
|
|
work->path[0].hop_limit = grh->hop_limit;
|
2018-06-19 15:59:20 +08:00
|
|
|
ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
|
2017-11-14 20:52:18 +08:00
|
|
|
cm_id_priv);
|
2006-08-29 02:57:42 +08:00
|
|
|
if (ret) {
|
2017-11-14 20:52:18 +08:00
|
|
|
int err;
|
|
|
|
|
2018-06-05 13:40:22 +08:00
|
|
|
err = rdma_query_gid(work->port->cm_dev->ib_device,
|
|
|
|
work->port->port_num, 0,
|
|
|
|
&work->path[0].sgid);
|
2017-11-14 20:52:18 +08:00
|
|
|
if (err)
|
2020-03-10 17:25:38 +08:00
|
|
|
ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
|
2017-11-14 20:52:18 +08:00
|
|
|
NULL, 0, NULL, 0);
|
|
|
|
else
|
2020-03-10 17:25:38 +08:00
|
|
|
ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
|
2017-11-14 20:52:18 +08:00
|
|
|
&work->path[0].sgid,
|
|
|
|
sizeof(work->path[0].sgid),
|
|
|
|
NULL, 0);
|
2006-08-29 02:57:42 +08:00
|
|
|
goto rejected;
|
|
|
|
}
|
2017-06-09 01:38:03 +08:00
|
|
|
if (cm_req_has_alt_path(req_msg)) {
|
2018-06-19 15:59:19 +08:00
|
|
|
ret = cm_init_av_by_path(&work->path[1], NULL,
|
|
|
|
&cm_id_priv->alt_av, cm_id_priv);
|
2006-08-29 02:57:42 +08:00
|
|
|
if (ret) {
|
2020-03-10 17:25:38 +08:00
|
|
|
ib_send_cm_rej(&cm_id_priv->id,
|
|
|
|
IB_CM_REJ_INVALID_ALT_GID,
|
2006-08-29 02:57:42 +08:00
|
|
|
&work->path[0].sgid,
|
2017-11-14 20:52:18 +08:00
|
|
|
sizeof(work->path[0].sgid), NULL, 0);
|
2006-08-29 02:57:42 +08:00
|
|
|
goto rejected;
|
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:38 +08:00
|
|
|
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
|
|
|
|
cm_id_priv->id.context = listen_cm_id_priv->id.context;
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
|
2020-03-10 17:25:38 +08:00
|
|
|
|
|
|
|
/* Now MAD handlers can see the new ID */
|
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
|
|
|
cm_finalize_id(cm_id_priv);
|
|
|
|
|
|
|
|
/* Refcount belongs to the event, pairs with cm_process_work() */
|
|
|
|
refcount_inc(&cm_id_priv->refcount);
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2020-03-10 17:25:38 +08:00
|
|
|
/*
|
|
|
|
* Since this ID was just created and was not made visible to other MAD
|
|
|
|
* handlers until the cm_finalize_id() above we know that the
|
|
|
|
* cm_process_work() will deliver the event and the listen_cm_id
|
|
|
|
* embedded in the event can be derefed here.
|
|
|
|
*/
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_deref_id(listen_cm_id_priv);
|
|
|
|
return 0;
|
|
|
|
|
2006-08-29 02:57:42 +08:00
|
|
|
rejected:
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_deref_id(listen_cm_id_priv);
|
2006-08-29 02:57:42 +08:00
|
|
|
destroy:
|
2020-03-10 17:25:38 +08:00
|
|
|
ib_destroy_cm_id(&cm_id_priv->id);
|
2005-07-28 02:45:40 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_format_rep(struct cm_rep_msg *rep_msg,
|
|
|
|
struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_cm_rep_param *param)
|
|
|
|
{
|
2020-05-26 18:33:02 +08:00
|
|
|
cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
|
|
|
|
param->ece.attr_mod);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.local_id));
|
|
|
|
IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.remote_id));
|
2020-01-17 01:00:34 +08:00
|
|
|
IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
|
|
|
|
param->responder_resources);
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
|
|
|
|
cm_id_priv->av.port->cm_dev->ack_delay);
|
|
|
|
IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
|
|
|
|
IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
|
|
|
|
be64_to_cpu(cm_id_priv->id.device->node_guid));
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2011-05-14 01:46:20 +08:00
|
|
|
if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
|
|
|
|
param->initiator_depth);
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
|
|
|
|
param->flow_control);
|
|
|
|
IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
|
2020-01-17 01:00:34 +08:00
|
|
|
IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
|
2011-05-14 01:46:20 +08:00
|
|
|
} else {
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_SET(CM_REP_SRQ, rep_msg, 1);
|
2020-01-17 01:00:34 +08:00
|
|
|
IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
|
2011-05-14 01:46:20 +08:00
|
|
|
}
|
|
|
|
|
2020-05-26 18:33:02 +08:00
|
|
|
IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
|
|
|
|
IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
|
|
|
|
IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
if (param->private_data && param->private_data_len)
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
|
|
|
|
param->private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int ib_send_cm_rep(struct ib_cm_id *cm_id,
|
|
|
|
struct ib_cm_rep_param *param)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct ib_mad_send_buf *msg;
|
|
|
|
struct cm_rep_msg *rep_msg;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (param->private_data &&
|
|
|
|
param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
if (cm_id->state != IB_CM_REQ_RCVD &&
|
|
|
|
cm_id->state != IB_CM_MRA_REQ_SENT) {
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = cm_alloc_msg(cm_id_priv, &msg);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
rep_msg = (struct cm_rep_msg *) msg->mad;
|
|
|
|
cm_format_rep(rep_msg, cm_id_priv, param);
|
2005-10-26 01:51:39 +08:00
|
|
|
msg->timeout_ms = cm_id_priv->timeout_ms;
|
2005-07-28 02:45:40 +08:00
|
|
|
msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
|
|
|
|
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_send_rep(cm_id);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret) {
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
cm_free_msg(msg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
cm_id->state = IB_CM_REP_SENT;
|
|
|
|
cm_id_priv->msg = msg;
|
|
|
|
cm_id_priv->initiator_depth = param->initiator_depth;
|
|
|
|
cm_id_priv->responder_resources = param->responder_resources;
|
2020-01-17 01:00:34 +08:00
|
|
|
cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
|
2020-02-12 15:26:27 +08:00
|
|
|
WARN_ONCE(param->qp_num & 0xFF000000,
|
|
|
|
"IBTA declares QPN to be 24 bits, but it is 0x%X\n",
|
|
|
|
param->qp_num);
|
2011-08-03 02:08:22 +08:00
|
|
|
cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_send_cm_rep);
|
|
|
|
|
|
|
|
static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
|
|
|
|
struct cm_id_private *cm_id_priv,
|
|
|
|
const void *private_data,
|
|
|
|
u8 private_data_len)
|
|
|
|
{
|
|
|
|
cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.local_id));
|
|
|
|
IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.remote_id));
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
if (private_data && private_data_len)
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
|
|
|
|
private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int ib_send_cm_rtu(struct ib_cm_id *cm_id,
|
|
|
|
const void *private_data,
|
|
|
|
u8 private_data_len)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct ib_mad_send_buf *msg;
|
|
|
|
unsigned long flags;
|
|
|
|
void *data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
data = cm_copy_private_data(private_data, private_data_len);
|
|
|
|
if (IS_ERR(data))
|
|
|
|
return PTR_ERR(data);
|
|
|
|
|
|
|
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
if (cm_id->state != IB_CM_REP_RCVD &&
|
|
|
|
cm_id->state != IB_CM_MRA_REP_SENT) {
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_send_cm_rtu_err(cm_id);
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = cm_alloc_msg(cm_id_priv, &msg);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
|
|
|
|
private_data, private_data_len);
|
|
|
|
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_send_rtu(cm_id);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret) {
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
cm_free_msg(msg);
|
|
|
|
kfree(data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
cm_id->state = IB_CM_ESTABLISHED;
|
|
|
|
cm_set_private_data(cm_id_priv, data, private_data_len);
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
kfree(data);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_send_cm_rtu);
|
|
|
|
|
2011-08-03 02:08:22 +08:00
|
|
|
static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_rep_msg *rep_msg;
|
|
|
|
struct ib_cm_rep_event_param *param;
|
|
|
|
|
|
|
|
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
|
|
|
|
param = &work->cm_event.param.rep_rcvd;
|
2020-01-17 01:00:35 +08:00
|
|
|
param->remote_ca_guid =
|
|
|
|
cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
|
|
|
|
param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
|
2011-08-03 02:08:22 +08:00
|
|
|
param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
|
2020-01-17 01:00:34 +08:00
|
|
|
param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
|
2020-01-17 01:00:35 +08:00
|
|
|
param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
|
|
|
|
param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
|
2020-01-17 01:00:33 +08:00
|
|
|
param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
|
|
|
|
param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
|
|
|
|
param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
|
|
|
|
param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
|
|
|
|
param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
|
2020-05-26 18:33:02 +08:00
|
|
|
param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
|
|
|
|
param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
|
|
|
|
param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
|
|
|
|
param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
|
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_dup_rep_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_rep_msg *rep_msg;
|
|
|
|
struct ib_mad_send_buf *msg = NULL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv = cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
|
|
|
|
cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!cm_id_priv)
|
|
|
|
return;
|
|
|
|
|
2007-07-17 12:49:35 +08:00
|
|
|
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_REP_COUNTER]);
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
|
|
|
|
if (ret)
|
|
|
|
goto deref;
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
|
|
|
|
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
|
|
|
|
cm_id_priv->private_data,
|
|
|
|
cm_id_priv->private_data_len);
|
|
|
|
else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
|
|
|
|
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
|
|
|
|
CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
|
|
|
|
cm_id_priv->private_data,
|
|
|
|
cm_id_priv->private_data_len);
|
|
|
|
else
|
|
|
|
goto unlock;
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_send_dup_rep(&cm_id_priv->id);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret)
|
|
|
|
goto free;
|
|
|
|
goto deref;
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
unlock: spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
free: cm_free_msg(msg);
|
|
|
|
deref: cm_deref_id(cm_id_priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_rep_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_rep_msg *rep_msg;
|
|
|
|
int ret;
|
2016-10-28 19:14:29 +08:00
|
|
|
struct cm_id_private *cur_cm_id_priv;
|
|
|
|
struct cm_timewait_info *timewait_info;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv = cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!cm_id_priv) {
|
|
|
|
cm_dup_rep_handler(work);
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_remote_no_priv_err(
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
|
2005-07-28 02:45:40 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-08-03 02:08:22 +08:00
|
|
|
cm_format_rep_event(work, cm_id_priv->qp_type);
|
2006-03-03 08:50:37 +08:00
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2006-03-03 08:50:37 +08:00
|
|
|
switch (cm_id_priv->id.state) {
|
|
|
|
case IB_CM_REQ_SENT:
|
|
|
|
case IB_CM_MRA_REQ_RCVD:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_rep_unknown_err(
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
|
2020-08-17 21:53:16 +08:00
|
|
|
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
|
|
|
|
cm_id_priv->id.state);
|
2020-03-10 17:25:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2006-03-03 08:50:37 +08:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv->timewait_info->work.remote_id =
|
|
|
|
cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
|
|
|
|
cm_id_priv->timewait_info->remote_ca_guid =
|
|
|
|
cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
|
2011-08-03 02:08:22 +08:00
|
|
|
cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2006-03-03 08:50:37 +08:00
|
|
|
spin_lock(&cm.lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
/* Check for duplicate REP. */
|
|
|
|
if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
|
2006-03-03 08:50:37 +08:00
|
|
|
spin_unlock(&cm.lock);
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EINVAL;
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_insert_failed_err(
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
|
2005-07-28 02:45:40 +08:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
/* Check for a stale connection. */
|
2016-10-28 19:14:29 +08:00
|
|
|
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
|
|
|
|
if (timewait_info) {
|
2020-05-06 15:46:56 +08:00
|
|
|
cm_remove_remote(cm_id_priv);
|
2019-12-19 21:47:50 +08:00
|
|
|
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
|
2016-10-28 19:14:29 +08:00
|
|
|
timewait_info->work.remote_id);
|
|
|
|
|
2006-03-03 08:50:37 +08:00
|
|
|
spin_unlock(&cm.lock);
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_issue_rej(work->port, work->mad_recv_wc,
|
|
|
|
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
|
|
|
|
NULL, 0);
|
|
|
|
ret = -EINVAL;
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_staleconn_err(
|
|
|
|
IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
|
2017-11-14 20:51:58 +08:00
|
|
|
|
2016-10-28 19:14:29 +08:00
|
|
|
if (cur_cm_id_priv) {
|
2020-05-06 15:47:00 +08:00
|
|
|
ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
|
2016-10-28 19:14:29 +08:00
|
|
|
cm_deref_id(cur_cm_id_priv);
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
goto error;
|
|
|
|
}
|
2006-03-03 08:50:37 +08:00
|
|
|
spin_unlock(&cm.lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
cm_id_priv->id.state = IB_CM_REP_RCVD;
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv->id.remote_id =
|
|
|
|
cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
|
2011-08-03 02:08:22 +08:00
|
|
|
cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv->initiator_depth =
|
|
|
|
IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
|
|
|
|
cm_id_priv->responder_resources =
|
|
|
|
IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
|
2020-01-17 01:00:34 +08:00
|
|
|
cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
|
2020-01-17 01:00:33 +08:00
|
|
|
cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
|
|
|
|
cm_id_priv->target_ack_delay =
|
|
|
|
IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
|
2007-06-19 02:09:37 +08:00
|
|
|
cm_id_priv->av.timeout =
|
|
|
|
cm_ack_timeout(cm_id_priv->target_ack_delay,
|
|
|
|
cm_id_priv->av.timeout - 1);
|
|
|
|
cm_id_priv->alt_av.timeout =
|
|
|
|
cm_ack_timeout(cm_id_priv->target_ack_delay,
|
|
|
|
cm_id_priv->alt_av.timeout - 1);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
|
2006-03-03 08:50:37 +08:00
|
|
|
error:
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_establish_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
|
2006-11-29 06:57:13 +08:00
|
|
|
/* See comment in cm_establish about lookup. */
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
|
|
|
|
if (!cm_id_priv)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_rtu_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_rtu_msg *rtu_msg;
|
|
|
|
|
|
|
|
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv = cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
|
|
|
|
cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!cm_id_priv)
|
|
|
|
return -EINVAL;
|
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
|
|
|
|
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2007-07-17 12:49:35 +08:00
|
|
|
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_RTU_COUNTER]);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cm_id_priv->id.state = IB_CM_ESTABLISHED;
|
|
|
|
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
|
|
|
|
struct cm_id_private *cm_id_priv,
|
|
|
|
const void *private_data,
|
|
|
|
u8 private_data_len)
|
|
|
|
{
|
|
|
|
cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
|
2018-07-04 18:48:01 +08:00
|
|
|
cm_form_tid(cm_id_priv));
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.local_id));
|
|
|
|
IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.remote_id));
|
2020-01-17 01:00:34 +08:00
|
|
|
IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->remote_qpn));
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
if (private_data && private_data_len)
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
|
|
|
|
private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:41 +08:00
|
|
|
static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
|
|
|
|
const void *private_data, u8 private_data_len)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct ib_mad_send_buf *msg;
|
|
|
|
int ret;
|
|
|
|
|
2020-03-10 17:25:41 +08:00
|
|
|
lockdep_assert_held(&cm_id_priv->lock);
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-03-10 17:25:41 +08:00
|
|
|
if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_dreq_skipped(&cm_id_priv->id);
|
2020-03-10 17:25:41 +08:00
|
|
|
return -EINVAL;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:41 +08:00
|
|
|
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
|
|
|
|
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
|
2011-03-04 07:31:06 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = cm_alloc_msg(cm_id_priv, &msg);
|
|
|
|
if (ret) {
|
|
|
|
cm_enter_timewait(cm_id_priv);
|
2020-03-10 17:25:41 +08:00
|
|
|
return ret;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
|
|
|
|
private_data, private_data_len);
|
2005-10-26 01:51:39 +08:00
|
|
|
msg->timeout_ms = cm_id_priv->timeout_ms;
|
2005-07-28 02:45:40 +08:00
|
|
|
msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
|
|
|
|
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_send_dreq(&cm_id_priv->id);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret) {
|
|
|
|
cm_enter_timewait(cm_id_priv);
|
|
|
|
cm_free_msg(msg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:41 +08:00
|
|
|
cm_id_priv->id.state = IB_CM_DREQ_SENT;
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->msg = msg;
|
2020-03-10 17:25:41 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
|
|
|
|
u8 private_data_len)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv =
|
|
|
|
container_of(cm_id, struct cm_id_private, id);
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
2005-07-28 02:45:40 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_send_cm_dreq);
|
|
|
|
|
|
|
|
static void cm_format_drep(struct cm_drep_msg *drep_msg,
|
|
|
|
struct cm_id_private *cm_id_priv,
|
|
|
|
const void *private_data,
|
|
|
|
u8 private_data_len)
|
|
|
|
{
|
|
|
|
cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.local_id));
|
|
|
|
IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.remote_id));
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
if (private_data && private_data_len)
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
|
|
|
|
private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:42 +08:00
|
|
|
static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
|
|
|
|
void *private_data, u8 private_data_len)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct ib_mad_send_buf *msg;
|
|
|
|
int ret;
|
|
|
|
|
2020-03-10 17:25:42 +08:00
|
|
|
lockdep_assert_held(&cm_id_priv->lock);
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-03-10 17:25:42 +08:00
|
|
|
if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_send_drep_err(&cm_id_priv->id);
|
2020-03-10 17:25:42 +08:00
|
|
|
kfree(private_data);
|
2005-07-28 02:45:40 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:42 +08:00
|
|
|
cm_set_private_data(cm_id_priv, private_data, private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_enter_timewait(cm_id_priv);
|
|
|
|
|
|
|
|
ret = cm_alloc_msg(cm_id_priv, &msg);
|
|
|
|
if (ret)
|
2020-03-10 17:25:42 +08:00
|
|
|
return ret;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
|
|
|
|
private_data, private_data_len);
|
|
|
|
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_send_drep(&cm_id_priv->id);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret) {
|
|
|
|
cm_free_msg(msg);
|
|
|
|
return ret;
|
|
|
|
}
|
2020-03-10 17:25:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-03-10 17:25:42 +08:00
|
|
|
int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
|
|
|
|
u8 private_data_len)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv =
|
|
|
|
container_of(cm_id, struct cm_id_private, id);
|
|
|
|
unsigned long flags;
|
|
|
|
void *data;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
data = cm_copy_private_data(private_data, private_data_len);
|
|
|
|
if (IS_ERR(data))
|
|
|
|
return PTR_ERR(data);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
2005-07-28 02:45:40 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_send_cm_drep);
|
|
|
|
|
2006-10-05 02:37:25 +08:00
|
|
|
static int cm_issue_drep(struct cm_port *port,
|
|
|
|
struct ib_mad_recv_wc *mad_recv_wc)
|
|
|
|
{
|
|
|
|
struct ib_mad_send_buf *msg = NULL;
|
|
|
|
struct cm_dreq_msg *dreq_msg;
|
|
|
|
struct cm_drep_msg *drep_msg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
|
|
|
|
drep_msg = (struct cm_drep_msg *) msg->mad;
|
|
|
|
|
|
|
|
cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
|
|
|
|
IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
|
|
|
|
IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
|
|
|
|
IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
|
2006-10-05 02:37:25 +08:00
|
|
|
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_issue_drep(
|
|
|
|
IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
|
|
|
|
IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
|
2006-10-05 02:37:25 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
|
|
|
if (ret)
|
|
|
|
cm_free_msg(msg);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
static int cm_dreq_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_dreq_msg *dreq_msg;
|
|
|
|
struct ib_mad_send_buf *msg = NULL;
|
|
|
|
|
|
|
|
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv = cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
|
|
|
|
cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
|
2006-10-05 02:37:25 +08:00
|
|
|
if (!cm_id_priv) {
|
2007-07-17 12:49:35 +08:00
|
|
|
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_DREQ_COUNTER]);
|
2006-10-05 02:37:25 +08:00
|
|
|
cm_issue_drep(work->port, work->mad_recv_wc);
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_no_priv_err(
|
|
|
|
IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
|
2005-07-28 02:45:40 +08:00
|
|
|
return -EINVAL;
|
2006-10-05 02:37:25 +08:00
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2020-01-17 01:00:34 +08:00
|
|
|
if (cm_id_priv->local_qpn !=
|
|
|
|
cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
|
2005-07-28 02:45:40 +08:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
switch (cm_id_priv->id.state) {
|
|
|
|
case IB_CM_REP_SENT:
|
|
|
|
case IB_CM_DREQ_SENT:
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
case IB_CM_ESTABLISHED:
|
2011-03-04 07:31:06 +08:00
|
|
|
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
|
|
|
|
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
|
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
|
|
|
break;
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_MRA_REP_RCVD:
|
|
|
|
break;
|
|
|
|
case IB_CM_TIMEWAIT:
|
2007-07-17 12:49:35 +08:00
|
|
|
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_DREQ_COUNTER]);
|
2017-08-30 01:34:43 +08:00
|
|
|
msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
|
|
|
|
if (IS_ERR(msg))
|
2005-07-28 02:45:40 +08:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
|
|
|
|
cm_id_priv->private_data,
|
|
|
|
cm_id_priv->private_data_len);
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2017-08-30 01:34:43 +08:00
|
|
|
if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
|
|
|
|
ib_post_send_mad(msg, NULL))
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_free_msg(msg);
|
|
|
|
goto deref;
|
2007-07-17 12:49:35 +08:00
|
|
|
case IB_CM_DREQ_RCVD:
|
|
|
|
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_DREQ_COUNTER]);
|
|
|
|
goto unlock;
|
2005-07-28 02:45:40 +08:00
|
|
|
default:
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_dreq_unknown_err(&cm_id_priv->id);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
|
|
|
|
cm_id_priv->tid = dreq_msg->hdr.tid;
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
unlock: spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
deref: cm_deref_id(cm_id_priv);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_drep_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_drep_msg *drep_msg;
|
|
|
|
|
|
|
|
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv = cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
|
|
|
|
cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!cm_id_priv)
|
|
|
|
return -EINVAL;
|
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
|
|
|
|
cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cm_enter_timewait(cm_id_priv);
|
|
|
|
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:43 +08:00
|
|
|
static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
|
|
|
enum ib_cm_rej_reason reason, void *ari,
|
|
|
|
u8 ari_length, const void *private_data,
|
|
|
|
u8 private_data_len)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
2020-04-07 01:32:42 +08:00
|
|
|
enum ib_cm_state state = cm_id_priv->id.state;
|
2005-07-28 02:45:40 +08:00
|
|
|
struct ib_mad_send_buf *msg;
|
|
|
|
int ret;
|
|
|
|
|
2020-03-10 17:25:43 +08:00
|
|
|
lockdep_assert_held(&cm_id_priv->lock);
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
|
|
|
|
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-04-07 01:32:42 +08:00
|
|
|
switch (state) {
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_REQ_SENT:
|
|
|
|
case IB_CM_MRA_REQ_RCVD:
|
|
|
|
case IB_CM_REQ_RCVD:
|
|
|
|
case IB_CM_MRA_REQ_SENT:
|
|
|
|
case IB_CM_REP_RCVD:
|
|
|
|
case IB_CM_MRA_REP_SENT:
|
|
|
|
cm_reset_to_idle(cm_id_priv);
|
2020-03-10 17:25:43 +08:00
|
|
|
ret = cm_alloc_msg(cm_id_priv, &msg);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
|
2020-04-07 01:32:42 +08:00
|
|
|
ari, ari_length, private_data, private_data_len,
|
|
|
|
state);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
case IB_CM_REP_SENT:
|
|
|
|
case IB_CM_MRA_REP_RCVD:
|
|
|
|
cm_enter_timewait(cm_id_priv);
|
2020-03-10 17:25:43 +08:00
|
|
|
ret = cm_alloc_msg(cm_id_priv, &msg);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
|
2020-04-07 01:32:42 +08:00
|
|
|
ari, ari_length, private_data, private_data_len,
|
|
|
|
state);
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
default:
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_send_unknown_rej_err(&cm_id_priv->id);
|
2020-03-10 17:25:43 +08:00
|
|
|
return -EINVAL;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_send_rej(&cm_id_priv->id, reason);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2020-03-10 17:25:43 +08:00
|
|
|
if (ret) {
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_free_msg(msg);
|
2020-03-10 17:25:43 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-03-10 17:25:43 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
|
|
|
|
void *ari, u8 ari_length, const void *private_data,
|
|
|
|
u8 private_data_len)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv =
|
|
|
|
container_of(cm_id, struct cm_id_private, id);
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
|
|
|
|
private_data, private_data_len);
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
2005-07-28 02:45:40 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_send_cm_rej);
|
|
|
|
|
|
|
|
static void cm_format_rej_event(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_rej_msg *rej_msg;
|
|
|
|
struct ib_cm_rej_event_param *param;
|
|
|
|
|
|
|
|
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
|
|
|
|
param = &work->cm_event.param.rej_rcvd;
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
|
2020-01-17 01:00:33 +08:00
|
|
|
param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
|
2020-01-17 01:00:35 +08:00
|
|
|
param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2021-04-07 16:15:50 +08:00
|
|
|
static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
2005-08-14 12:05:57 +08:00
|
|
|
__be32 remote_id;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-01-17 01:00:35 +08:00
|
|
|
remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-01-17 01:00:35 +08:00
|
|
|
if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
|
2020-05-06 15:46:58 +08:00
|
|
|
cm_id_priv = cm_find_remote_id(
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
*((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
|
|
|
|
remote_id);
|
2020-01-17 01:00:33 +08:00
|
|
|
} else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
|
|
|
|
CM_MSG_RESPONSE_REQ)
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv = cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
|
|
|
|
0);
|
2005-07-28 02:45:40 +08:00
|
|
|
else
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv = cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
|
|
|
|
remote_id);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
return cm_id_priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_rej_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_rej_msg *rej_msg;
|
|
|
|
|
|
|
|
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
|
|
|
|
cm_id_priv = cm_acquire_rejected_id(rej_msg);
|
|
|
|
if (!cm_id_priv)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cm_format_rej_event(work);
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
switch (cm_id_priv->id.state) {
|
|
|
|
case IB_CM_REQ_SENT:
|
|
|
|
case IB_CM_MRA_REQ_RCVD:
|
|
|
|
case IB_CM_REP_SENT:
|
|
|
|
case IB_CM_MRA_REP_RCVD:
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_REQ_RCVD:
|
|
|
|
case IB_CM_MRA_REQ_SENT:
|
2020-01-17 01:00:35 +08:00
|
|
|
if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_enter_timewait(cm_id_priv);
|
|
|
|
else
|
|
|
|
cm_reset_to_idle(cm_id_priv);
|
|
|
|
break;
|
|
|
|
case IB_CM_DREQ_SENT:
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_REP_RCVD:
|
|
|
|
case IB_CM_MRA_REP_SENT:
|
|
|
|
cm_enter_timewait(cm_id_priv);
|
|
|
|
break;
|
2011-03-04 07:31:06 +08:00
|
|
|
case IB_CM_ESTABLISHED:
|
|
|
|
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
|
|
|
|
cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
|
|
|
|
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
|
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent,
|
|
|
|
cm_id_priv->msg);
|
|
|
|
cm_enter_timewait(cm_id_priv);
|
|
|
|
break;
|
|
|
|
}
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2005-07-28 02:45:40 +08:00
|
|
|
default:
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_rej_unknown_err(&cm_id_priv->id);
|
2020-03-10 17:25:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ib_send_cm_mra(struct ib_cm_id *cm_id,
|
|
|
|
u8 service_timeout,
|
|
|
|
const void *private_data,
|
|
|
|
u8 private_data_len)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct ib_mad_send_buf *msg;
|
2007-08-02 04:49:53 +08:00
|
|
|
enum ib_cm_state cm_state;
|
|
|
|
enum ib_cm_lap_state lap_state;
|
|
|
|
enum cm_msg_response msg_response;
|
2005-07-28 02:45:40 +08:00
|
|
|
void *data;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
data = cm_copy_private_data(private_data, private_data_len);
|
|
|
|
if (IS_ERR(data))
|
|
|
|
return PTR_ERR(data);
|
|
|
|
|
|
|
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
2021-04-07 16:15:49 +08:00
|
|
|
switch (cm_id_priv->id.state) {
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_REQ_RCVD:
|
2007-08-02 04:49:53 +08:00
|
|
|
cm_state = IB_CM_MRA_REQ_SENT;
|
|
|
|
lap_state = cm_id->lap_state;
|
|
|
|
msg_response = CM_MSG_RESPONSE_REQ;
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
case IB_CM_REP_RCVD:
|
2007-08-02 04:49:53 +08:00
|
|
|
cm_state = IB_CM_MRA_REP_SENT;
|
|
|
|
lap_state = cm_id->lap_state;
|
|
|
|
msg_response = CM_MSG_RESPONSE_REP;
|
2005-07-28 02:45:40 +08:00
|
|
|
break;
|
|
|
|
case IB_CM_ESTABLISHED:
|
2010-07-22 07:36:52 +08:00
|
|
|
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
|
|
|
|
cm_state = cm_id->state;
|
|
|
|
lap_state = IB_CM_MRA_LAP_SENT;
|
|
|
|
msg_response = CM_MSG_RESPONSE_OTHER;
|
|
|
|
break;
|
|
|
|
}
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2007-08-02 04:49:53 +08:00
|
|
|
default:
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_send_mra_unknown_err(&cm_id_priv->id);
|
2007-08-02 04:49:53 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto error1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = cm_alloc_msg(cm_id_priv, &msg);
|
|
|
|
if (ret)
|
|
|
|
goto error1;
|
|
|
|
|
|
|
|
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
|
2007-08-02 04:49:53 +08:00
|
|
|
msg_response, service_timeout,
|
2005-07-28 02:45:40 +08:00
|
|
|
private_data, private_data_len);
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_send_mra(cm_id);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret)
|
|
|
|
goto error2;
|
|
|
|
}
|
2007-08-02 04:49:53 +08:00
|
|
|
|
|
|
|
cm_id->state = cm_state;
|
|
|
|
cm_id->lap_state = lap_state;
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->service_timeout = service_timeout;
|
|
|
|
cm_set_private_data(cm_id_priv, data, private_data_len);
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
kfree(data);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
kfree(data);
|
|
|
|
cm_free_msg(msg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_send_cm_mra);
|
|
|
|
|
2021-04-07 16:15:50 +08:00
|
|
|
static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
2020-01-17 01:00:33 +08:00
|
|
|
switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
|
2005-07-28 02:45:40 +08:00
|
|
|
case CM_MSG_RESPONSE_REQ:
|
2020-01-17 01:00:35 +08:00
|
|
|
return cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
|
|
|
|
0);
|
2005-07-28 02:45:40 +08:00
|
|
|
case CM_MSG_RESPONSE_REP:
|
|
|
|
case CM_MSG_RESPONSE_OTHER:
|
2020-01-17 01:00:35 +08:00
|
|
|
return cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
|
|
|
|
cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
|
2005-07-28 02:45:40 +08:00
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_mra_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_mra_msg *mra_msg;
|
2020-05-06 15:46:55 +08:00
|
|
|
int timeout;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
|
|
|
|
cm_id_priv = cm_acquire_mraed_id(mra_msg);
|
|
|
|
if (!cm_id_priv)
|
|
|
|
return -EINVAL;
|
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
work->cm_event.param.mra_rcvd.service_timeout =
|
2020-01-17 01:00:33 +08:00
|
|
|
IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
|
|
|
|
timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
|
2007-06-19 02:09:37 +08:00
|
|
|
cm_convert_to_ms(cm_id_priv->av.timeout);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
switch (cm_id_priv->id.state) {
|
|
|
|
case IB_CM_REQ_SENT:
|
2020-01-17 01:00:33 +08:00
|
|
|
if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
|
|
|
|
CM_MSG_RESPONSE_REQ ||
|
2005-07-28 02:45:40 +08:00
|
|
|
ib_modify_mad(cm_id_priv->av.port->mad_agent,
|
2005-10-26 01:51:39 +08:00
|
|
|
cm_id_priv->msg, timeout))
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
|
|
|
cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
|
|
|
|
break;
|
|
|
|
case IB_CM_REP_SENT:
|
2020-01-17 01:00:33 +08:00
|
|
|
if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
|
|
|
|
CM_MSG_RESPONSE_REP ||
|
2005-07-28 02:45:40 +08:00
|
|
|
ib_modify_mad(cm_id_priv->av.port->mad_agent,
|
2005-10-26 01:51:39 +08:00
|
|
|
cm_id_priv->msg, timeout))
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
|
|
|
cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
|
|
|
|
break;
|
|
|
|
case IB_CM_ESTABLISHED:
|
2020-01-17 01:00:33 +08:00
|
|
|
if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
|
|
|
|
CM_MSG_RESPONSE_OTHER ||
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
|
|
|
|
ib_modify_mad(cm_id_priv->av.port->mad_agent,
|
2007-07-17 12:49:35 +08:00
|
|
|
cm_id_priv->msg, timeout)) {
|
|
|
|
if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
|
|
|
|
atomic_long_inc(&work->port->
|
|
|
|
counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_MRA_COUNTER]);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
2007-07-17 12:49:35 +08:00
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
|
|
|
|
break;
|
2007-07-17 12:49:35 +08:00
|
|
|
case IB_CM_MRA_REQ_RCVD:
|
|
|
|
case IB_CM_MRA_REP_RCVD:
|
|
|
|
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_MRA_COUNTER]);
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2005-07-28 02:45:40 +08:00
|
|
|
default:
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_mra_unknown_err(&cm_id_priv->id);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
cm_id_priv->msg->context[1] = (void *) (unsigned long)
|
|
|
|
cm_id_priv->id.state;
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
out:
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-06-09 01:38:04 +08:00
|
|
|
static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
|
|
|
|
struct sa_path_rec *path)
|
|
|
|
{
|
|
|
|
u32 lid;
|
|
|
|
|
|
|
|
if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
|
2020-01-17 01:00:35 +08:00
|
|
|
sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
|
|
|
|
lap_msg));
|
|
|
|
sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
|
|
|
|
lap_msg));
|
2017-06-09 01:38:04 +08:00
|
|
|
} else {
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
|
|
|
|
CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
|
2017-10-07 04:06:17 +08:00
|
|
|
sa_path_set_dlid(path, lid);
|
2017-06-09 01:38:04 +08:00
|
|
|
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
|
|
|
|
CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
|
2017-10-07 04:06:17 +08:00
|
|
|
sa_path_set_slid(path, lid);
|
2017-06-09 01:38:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-29 06:57:13 +08:00
|
|
|
static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
|
2017-04-28 07:05:58 +08:00
|
|
|
struct sa_path_rec *path,
|
2005-07-28 02:45:40 +08:00
|
|
|
struct cm_lap_msg *lap_msg)
|
|
|
|
{
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
|
|
|
|
path->sgid =
|
|
|
|
*IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
|
2020-01-17 01:00:34 +08:00
|
|
|
path->flow_label =
|
|
|
|
cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
|
2020-01-17 01:00:35 +08:00
|
|
|
path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
|
2020-01-17 01:00:33 +08:00
|
|
|
path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
path->reversible = 1;
|
2006-11-29 06:57:13 +08:00
|
|
|
path->pkey = cm_id_priv->pkey;
|
2020-01-17 01:00:33 +08:00
|
|
|
path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
path->mtu_selector = IB_SA_EQ;
|
2006-11-29 06:57:13 +08:00
|
|
|
path->mtu = cm_id_priv->path_mtu;
|
2005-07-28 02:45:40 +08:00
|
|
|
path->rate_selector = IB_SA_EQ;
|
2020-01-17 01:00:33 +08:00
|
|
|
path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
path->packet_life_time_selector = IB_SA_EQ;
|
2020-01-17 01:00:33 +08:00
|
|
|
path->packet_life_time =
|
|
|
|
IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
path->packet_life_time -= (path->packet_life_time > 0);
|
2017-06-09 01:38:04 +08:00
|
|
|
cm_format_path_lid_from_lap(lap_msg, path);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_lap_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_lap_msg *lap_msg;
|
|
|
|
struct ib_cm_lap_event_param *param;
|
|
|
|
struct ib_mad_send_buf *msg = NULL;
|
|
|
|
int ret;
|
|
|
|
|
2018-03-25 18:40:19 +08:00
|
|
|
/* Currently Alternate path messages are not supported for
|
|
|
|
* RoCE link layer.
|
|
|
|
*/
|
|
|
|
if (rdma_protocol_roce(work->port->cm_dev->ib_device,
|
|
|
|
work->port->port_num))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
/* todo: verify LAP request and send reject APR if invalid. */
|
|
|
|
lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv = cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
|
|
|
|
cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!cm_id_priv)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
param = &work->cm_event.param.lap_rcvd;
|
2017-06-09 01:38:03 +08:00
|
|
|
memset(&work->path[0], 0, sizeof(work->path[1]));
|
|
|
|
cm_path_set_rec_type(work->port->cm_dev->ib_device,
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->port->port_num, &work->path[0],
|
|
|
|
IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
|
|
|
|
lap_msg));
|
2005-07-28 02:45:40 +08:00
|
|
|
param->alternate_path = &work->path[0];
|
2006-11-29 06:57:13 +08:00
|
|
|
cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
switch (cm_id_priv->id.lap_state) {
|
2006-11-29 06:57:13 +08:00
|
|
|
case IB_CM_LAP_UNINIT:
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_LAP_IDLE:
|
|
|
|
break;
|
|
|
|
case IB_CM_MRA_LAP_SENT:
|
2007-07-17 12:49:35 +08:00
|
|
|
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_LAP_COUNTER]);
|
2017-08-30 01:34:43 +08:00
|
|
|
msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
|
|
|
|
if (IS_ERR(msg))
|
2005-07-28 02:45:40 +08:00
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
|
|
|
|
CM_MSG_RESPONSE_OTHER,
|
|
|
|
cm_id_priv->service_timeout,
|
|
|
|
cm_id_priv->private_data,
|
|
|
|
cm_id_priv->private_data_len);
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2017-08-30 01:34:43 +08:00
|
|
|
if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
|
|
|
|
ib_post_send_mad(msg, NULL))
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_free_msg(msg);
|
|
|
|
goto deref;
|
2007-07-17 12:49:35 +08:00
|
|
|
case IB_CM_LAP_RCVD:
|
|
|
|
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_LAP_COUNTER]);
|
|
|
|
goto unlock;
|
2005-07-28 02:45:40 +08:00
|
|
|
default:
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2018-05-14 16:11:08 +08:00
|
|
|
ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
|
|
|
|
work->mad_recv_wc->recv_buf.grh,
|
|
|
|
&cm_id_priv->av);
|
2018-05-14 16:11:07 +08:00
|
|
|
if (ret)
|
|
|
|
goto unlock;
|
|
|
|
|
2018-10-12 03:36:10 +08:00
|
|
|
ret = cm_init_av_by_path(param->alternate_path, NULL,
|
|
|
|
&cm_id_priv->alt_av, cm_id_priv);
|
|
|
|
if (ret)
|
|
|
|
goto unlock;
|
|
|
|
|
2018-05-14 16:11:07 +08:00
|
|
|
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
|
|
|
|
cm_id_priv->tid = lap_msg->hdr.tid;
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
unlock: spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
deref: cm_deref_id(cm_id_priv);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_apr_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_apr_msg *apr_msg;
|
|
|
|
|
2018-03-25 18:40:19 +08:00
|
|
|
/* Currently Alternate path messages are not supported for
|
|
|
|
* RoCE link layer.
|
|
|
|
*/
|
|
|
|
if (rdma_protocol_roce(work->port->cm_dev->ib_device,
|
|
|
|
work->port->port_num))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv = cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
|
|
|
|
cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!cm_id_priv)
|
|
|
|
return -EINVAL; /* Unmatched reply. */
|
|
|
|
|
2020-01-17 01:00:35 +08:00
|
|
|
work->cm_event.param.apr_rcvd.ap_status =
|
|
|
|
IBA_GET(CM_APR_AR_STATUS, apr_msg);
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.param.apr_rcvd.apr_info =
|
|
|
|
IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
|
2020-01-17 01:00:35 +08:00
|
|
|
work->cm_event.param.apr_rcvd.info_len =
|
|
|
|
IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
|
|
|
|
(cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
|
|
|
|
cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->msg = NULL;
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_timewait_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_timewait_info *timewait_info;
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
|
2019-10-02 20:25:17 +08:00
|
|
|
timewait_info = container_of(work, struct cm_timewait_info, work);
|
2006-10-05 02:29:59 +08:00
|
|
|
spin_lock_irq(&cm.lock);
|
|
|
|
list_del(&timewait_info->list);
|
|
|
|
spin_unlock_irq(&cm.lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
|
|
|
|
timewait_info->work.remote_id);
|
|
|
|
if (!cm_id_priv)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2006-10-05 02:29:59 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
|
|
|
|
cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
|
2006-10-05 02:29:59 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cm_id_priv->id.state = IB_CM_IDLE;
|
2020-05-06 15:46:55 +08:00
|
|
|
cm_queue_work_unlock(cm_id_priv, work);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
|
|
|
|
struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_cm_sidr_req_param *param)
|
|
|
|
{
|
|
|
|
cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
|
2018-07-04 18:48:01 +08:00
|
|
|
cm_form_tid(cm_id_priv));
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.local_id));
|
|
|
|
IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
|
|
|
|
be16_to_cpu(param->path->pkey));
|
|
|
|
IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
|
|
|
|
be64_to_cpu(param->service_id));
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
if (param->private_data && param->private_data_len)
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
|
|
|
|
param->private_data, param->private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
|
|
|
|
struct ib_cm_sidr_req_param *param)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct ib_mad_send_buf *msg;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!param->path || (param->private_data &&
|
|
|
|
param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
2018-06-19 15:59:19 +08:00
|
|
|
ret = cm_init_av_by_path(param->path, param->sgid_attr,
|
|
|
|
&cm_id_priv->av,
|
|
|
|
cm_id_priv);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cm_id->service_id = param->service_id;
|
2009-01-18 09:11:57 +08:00
|
|
|
cm_id->service_mask = ~cpu_to_be64(0);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->timeout_ms = param->timeout_ms;
|
|
|
|
cm_id_priv->max_cm_retries = param->max_cm_retries;
|
|
|
|
ret = cm_alloc_msg(cm_id_priv, &msg);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
|
|
|
|
param);
|
2005-10-26 01:51:39 +08:00
|
|
|
msg->timeout_ms = cm_id_priv->timeout_ms;
|
2005-07-28 02:45:40 +08:00
|
|
|
msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
2020-08-17 21:53:22 +08:00
|
|
|
if (cm_id->state == IB_CM_IDLE) {
|
|
|
|
trace_icm_send_sidr_req(&cm_id_priv->id);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2020-08-17 21:53:22 +08:00
|
|
|
} else {
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EINVAL;
|
2020-08-17 21:53:22 +08:00
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
cm_free_msg(msg);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cm_id->state = IB_CM_SIDR_REQ_SENT;
|
|
|
|
cm_id_priv->msg = msg;
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_send_cm_sidr_req);
|
|
|
|
|
|
|
|
static void cm_format_sidr_req_event(struct cm_work *work,
|
2018-07-16 16:50:11 +08:00
|
|
|
const struct cm_id_private *rx_cm_id,
|
2005-07-28 02:45:40 +08:00
|
|
|
struct ib_cm_id *listen_id)
|
|
|
|
{
|
|
|
|
struct cm_sidr_req_msg *sidr_req_msg;
|
|
|
|
struct ib_cm_sidr_req_event_param *param;
|
|
|
|
|
|
|
|
sidr_req_msg = (struct cm_sidr_req_msg *)
|
|
|
|
work->mad_recv_wc->recv_buf.mad;
|
|
|
|
param = &work->cm_event.param.sidr_req_rcvd;
|
2020-01-17 01:00:35 +08:00
|
|
|
param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
param->listen_id = listen_id;
|
2020-01-17 01:00:35 +08:00
|
|
|
param->service_id =
|
|
|
|
cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
|
2015-07-30 22:50:21 +08:00
|
|
|
param->bth_pkey = cm_get_bth_pkey(work);
|
2005-07-28 02:45:40 +08:00
|
|
|
param->port = work->port->port_num;
|
2018-07-16 16:50:11 +08:00
|
|
|
param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_sidr_req_handler(struct cm_work *work)
|
|
|
|
{
|
2020-03-10 17:25:37 +08:00
|
|
|
struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
|
2005-07-28 02:45:40 +08:00
|
|
|
struct cm_sidr_req_msg *sidr_req_msg;
|
|
|
|
struct ib_wc *wc;
|
2017-11-14 20:52:13 +08:00
|
|
|
int ret;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-03-10 17:25:37 +08:00
|
|
|
cm_id_priv =
|
|
|
|
cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
|
|
|
|
if (IS_ERR(cm_id_priv))
|
|
|
|
return PTR_ERR(cm_id_priv);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
/* Record SGID/SLID and request ID for lookup. */
|
|
|
|
sidr_req_msg = (struct cm_sidr_req_msg *)
|
|
|
|
work->mad_recv_wc->recv_buf.mad;
|
2020-03-10 17:25:37 +08:00
|
|
|
|
|
|
|
cm_id_priv->id.remote_id =
|
|
|
|
cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
|
|
|
|
cm_id_priv->id.service_id =
|
|
|
|
cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
|
|
|
|
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
|
|
|
|
cm_id_priv->tid = sidr_req_msg->hdr.tid;
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
wc = work->mad_recv_wc->wc;
|
2005-08-14 12:05:57 +08:00
|
|
|
cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_id_priv->av.dgid.global.interface_id = 0;
|
2017-11-14 20:52:13 +08:00
|
|
|
ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
|
|
|
|
work->mad_recv_wc->recv_buf.grh,
|
|
|
|
&cm_id_priv->av);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm.lock);
|
2020-03-10 17:25:37 +08:00
|
|
|
listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
|
|
|
|
if (listen_cm_id_priv) {
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm.lock);
|
2007-07-17 12:49:35 +08:00
|
|
|
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
|
|
counter[CM_SIDR_REQ_COUNTER]);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out; /* Duplicate message. */
|
|
|
|
}
|
2007-07-03 05:36:19 +08:00
|
|
|
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
|
2020-03-10 17:25:37 +08:00
|
|
|
listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
|
|
|
|
cm_id_priv->id.service_id);
|
|
|
|
if (!listen_cm_id_priv) {
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm.lock);
|
2020-03-10 17:25:44 +08:00
|
|
|
ib_send_cm_sidr_rep(&cm_id_priv->id,
|
|
|
|
&(struct ib_cm_sidr_rep_param){
|
|
|
|
.status = IB_SIDR_UNSUPPORTED });
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out; /* No match. */
|
|
|
|
}
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm.lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-03-10 17:25:37 +08:00
|
|
|
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
|
|
|
|
cm_id_priv->id.context = listen_cm_id_priv->id.context;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-03-10 17:25:37 +08:00
|
|
|
/*
|
|
|
|
* A SIDR ID does not need to be in the xarray since it does not receive
|
|
|
|
* mads, is not placed in the remote_id or remote_qpn rbtree, and does
|
|
|
|
* not enter timewait.
|
|
|
|
*/
|
|
|
|
|
|
|
|
cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
|
|
|
|
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
|
|
|
|
cm_free_work(work);
|
|
|
|
/*
|
|
|
|
* A pointer to the listen_cm_id is held in the event, so this deref
|
|
|
|
* must be after the event is delivered above.
|
|
|
|
*/
|
|
|
|
cm_deref_id(listen_cm_id_priv);
|
|
|
|
if (ret)
|
|
|
|
cm_destroy_id(&cm_id_priv->id, ret);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
ib_destroy_cm_id(&cm_id_priv->id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
|
|
|
|
struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_cm_sidr_rep_param *param)
|
|
|
|
{
|
2020-05-26 18:33:02 +08:00
|
|
|
cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
|
|
|
|
cm_id_priv->tid, param->ece.attr_mod);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
|
|
|
|
be32_to_cpu(cm_id_priv->id.remote_id));
|
|
|
|
IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
|
2020-01-17 01:00:34 +08:00
|
|
|
IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
|
2020-01-17 01:00:35 +08:00
|
|
|
IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
|
|
|
|
be64_to_cpu(cm_id_priv->id.service_id));
|
|
|
|
IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
|
2020-05-26 18:33:02 +08:00
|
|
|
IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
|
|
|
|
param->ece.vendor_id & 0xFF);
|
|
|
|
IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
|
|
|
|
(param->ece.vendor_id >> 8) & 0xFF);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
if (param->info && param->info_length)
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
|
|
|
|
param->info, param->info_length);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
if (param->private_data && param->private_data_len)
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
|
|
|
|
param->private_data, param->private_data_len);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2020-03-10 17:25:44 +08:00
|
|
|
static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_cm_sidr_rep_param *param)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct ib_mad_send_buf *msg;
|
2021-03-01 16:18:44 +08:00
|
|
|
unsigned long flags;
|
2005-07-28 02:45:40 +08:00
|
|
|
int ret;
|
|
|
|
|
2020-03-10 17:25:44 +08:00
|
|
|
lockdep_assert_held(&cm_id_priv->lock);
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
|
|
|
|
(param->private_data &&
|
|
|
|
param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-03-10 17:25:44 +08:00
|
|
|
if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
|
|
|
|
return -EINVAL;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
ret = cm_alloc_msg(cm_id_priv, &msg);
|
|
|
|
if (ret)
|
2020-03-10 17:25:44 +08:00
|
|
|
return ret;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
|
|
|
|
param);
|
2020-08-17 21:53:22 +08:00
|
|
|
trace_icm_send_sidr_rep(&cm_id_priv->id);
|
2005-10-26 01:51:39 +08:00
|
|
|
ret = ib_post_send_mad(msg, NULL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret) {
|
|
|
|
cm_free_msg(msg);
|
|
|
|
return ret;
|
|
|
|
}
|
2020-03-10 17:25:44 +08:00
|
|
|
cm_id_priv->id.state = IB_CM_IDLE;
|
2021-03-01 16:18:44 +08:00
|
|
|
spin_lock_irqsave(&cm.lock, flags);
|
2015-10-11 20:58:17 +08:00
|
|
|
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
|
|
|
|
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
|
|
|
|
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
|
|
|
|
}
|
2021-03-01 16:18:44 +08:00
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
2005-07-28 02:45:40 +08:00
|
|
|
return 0;
|
2020-03-10 17:25:44 +08:00
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2020-03-10 17:25:44 +08:00
|
|
|
int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
|
|
|
|
struct ib_cm_sidr_rep_param *param)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv =
|
|
|
|
container_of(cm_id, struct cm_id_private, id);
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
ret = cm_send_sidr_rep_locked(cm_id_priv, param);
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
2005-07-28 02:45:40 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_send_cm_sidr_rep);
|
|
|
|
|
2018-06-19 15:59:18 +08:00
|
|
|
static void cm_format_sidr_rep_event(struct cm_work *work,
|
|
|
|
const struct cm_id_private *cm_id_priv)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_sidr_rep_msg *sidr_rep_msg;
|
|
|
|
struct ib_cm_sidr_rep_event_param *param;
|
|
|
|
|
|
|
|
sidr_rep_msg = (struct cm_sidr_rep_msg *)
|
|
|
|
work->mad_recv_wc->recv_buf.mad;
|
|
|
|
param = &work->cm_event.param.sidr_rep_rcvd;
|
2020-01-17 01:00:35 +08:00
|
|
|
param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
|
|
|
|
param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
|
2020-01-17 01:00:34 +08:00
|
|
|
param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
|
|
|
|
sidr_rep_msg);
|
2020-01-17 01:00:35 +08:00
|
|
|
param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
|
|
|
|
sidr_rep_msg);
|
2018-06-19 15:59:18 +08:00
|
|
|
param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
|
RDMA/cm: Use IBA functions for complex structure members
Use a Coccinelle spatch to replace CM structure members used as
structures, arrays, or pointers with IBA_GET/SET versions. Applied with
$ spatch --sp-file edits.sp --in-place drivers/infiniband/core/cm.c
The spatch file was generated using the template pattern:
@@
expression src;
expression len;
{struct} *msg;
@@
- memcpy(msg->{old_name}, src, len)
+ IBA_SET_MEM({new_name}, msg, src, len)
@@
{struct} *msg;
identifier x;
@@
- msg->{old_name}.x
+ IBA_GET_MEM_PTR({new_name}, msg)->x
@@
{struct} *msg;
@@
- &msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
For GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ *IBA_GET_MEM_PTR({new_name}, msg)
For non-GIDs:
@@
{struct} *msg;
@@
- msg->{old_name}
+ IBA_GET_MEM_PTR({new_name}, msg)
Iterated for every remaining IBA_CHECK_OFF()/IBA_CHECK_GET()
pairing. Touched up with clang-format after.
Link: https://lore.kernel.org/r/20200116170037.30109-7-jgg@ziepe.ca
Tested-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2020-01-17 01:00:36 +08:00
|
|
|
work->cm_event.private_data =
|
|
|
|
IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_sidr_rep_handler(struct cm_work *work)
|
|
|
|
{
|
|
|
|
struct cm_sidr_rep_msg *sidr_rep_msg;
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
|
|
|
|
sidr_rep_msg = (struct cm_sidr_rep_msg *)
|
|
|
|
work->mad_recv_wc->recv_buf.mad;
|
2020-01-17 01:00:35 +08:00
|
|
|
cm_id_priv = cm_acquire_id(
|
|
|
|
cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!cm_id_priv)
|
|
|
|
return -EINVAL; /* Unmatched reply. */
|
|
|
|
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cm_id_priv->id.state = IB_CM_IDLE;
|
2005-10-26 01:51:39 +08:00
|
|
|
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2018-06-19 15:59:18 +08:00
|
|
|
cm_format_sidr_rep_event(work, cm_id_priv);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_process_work(cm_id_priv, work);
|
|
|
|
return 0;
|
|
|
|
out:
|
|
|
|
cm_deref_id(cm_id_priv);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_process_send_error(struct ib_mad_send_buf *msg,
|
|
|
|
enum ib_wc_status wc_status)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct ib_cm_event cm_event;
|
|
|
|
enum ib_cm_state state;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
memset(&cm_event, 0, sizeof cm_event);
|
|
|
|
cm_id_priv = msg->context[0];
|
|
|
|
|
|
|
|
/* Discard old sends or ones without a response. */
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_lock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
state = (enum ib_cm_state) (unsigned long) msg->context[1];
|
|
|
|
if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
|
|
|
|
goto discard;
|
|
|
|
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_mad_send_err(state, wc_status);
|
2005-07-28 02:45:40 +08:00
|
|
|
switch (state) {
|
|
|
|
case IB_CM_REQ_SENT:
|
|
|
|
case IB_CM_MRA_REQ_RCVD:
|
|
|
|
cm_reset_to_idle(cm_id_priv);
|
|
|
|
cm_event.event = IB_CM_REQ_ERROR;
|
|
|
|
break;
|
|
|
|
case IB_CM_REP_SENT:
|
|
|
|
case IB_CM_MRA_REP_RCVD:
|
|
|
|
cm_reset_to_idle(cm_id_priv);
|
|
|
|
cm_event.event = IB_CM_REP_ERROR;
|
|
|
|
break;
|
|
|
|
case IB_CM_DREQ_SENT:
|
|
|
|
cm_enter_timewait(cm_id_priv);
|
|
|
|
cm_event.event = IB_CM_DREQ_ERROR;
|
|
|
|
break;
|
|
|
|
case IB_CM_SIDR_REQ_SENT:
|
|
|
|
cm_id_priv->id.state = IB_CM_IDLE;
|
|
|
|
cm_event.event = IB_CM_SIDR_REQ_ERROR;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto discard;
|
|
|
|
}
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_event.param.send_status = wc_status;
|
|
|
|
|
|
|
|
/* No other events can occur on the cm_id at this point. */
|
|
|
|
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
|
|
|
|
cm_free_msg(msg);
|
|
|
|
if (ret)
|
|
|
|
ib_destroy_cm_id(&cm_id_priv->id);
|
|
|
|
return;
|
|
|
|
discard:
|
2007-06-19 02:09:36 +08:00
|
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm_free_msg(msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_send_handler(struct ib_mad_agent *mad_agent,
|
|
|
|
struct ib_mad_send_wc *mad_send_wc)
|
|
|
|
{
|
2005-10-26 01:51:39 +08:00
|
|
|
struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
|
2007-07-17 12:49:35 +08:00
|
|
|
struct cm_port *port;
|
|
|
|
u16 attr_index;
|
|
|
|
|
|
|
|
port = mad_agent->context;
|
|
|
|
attr_index = be16_to_cpu(((struct ib_mad_hdr *)
|
|
|
|
msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the send was in response to a received message (context[0] is not
|
|
|
|
* set to a cm_id), and is not a REJ, then it is a send that was
|
|
|
|
* manually retried.
|
|
|
|
*/
|
|
|
|
if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
|
|
|
|
msg->retries = 1;
|
|
|
|
|
|
|
|
atomic_long_add(1 + msg->retries,
|
|
|
|
&port->counter_group[CM_XMIT].counter[attr_index]);
|
|
|
|
if (msg->retries)
|
|
|
|
atomic_long_add(msg->retries,
|
|
|
|
&port->counter_group[CM_XMIT_RETRIES].
|
|
|
|
counter[attr_index]);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
switch (mad_send_wc->status) {
|
|
|
|
case IB_WC_SUCCESS:
|
|
|
|
case IB_WC_WR_FLUSH_ERR:
|
|
|
|
cm_free_msg(msg);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (msg->context[0] && msg->context[1])
|
|
|
|
cm_process_send_error(msg, mad_send_wc->status);
|
|
|
|
else
|
|
|
|
cm_free_msg(msg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-22 22:57:56 +08:00
|
|
|
static void cm_work_handler(struct work_struct *_work)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
2006-11-22 22:57:56 +08:00
|
|
|
struct cm_work *work = container_of(_work, struct cm_work, work.work);
|
2005-07-28 02:45:40 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (work->cm_event.event) {
|
|
|
|
case IB_CM_REQ_RECEIVED:
|
|
|
|
ret = cm_req_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_MRA_RECEIVED:
|
|
|
|
ret = cm_mra_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_REJ_RECEIVED:
|
|
|
|
ret = cm_rej_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_REP_RECEIVED:
|
|
|
|
ret = cm_rep_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_RTU_RECEIVED:
|
|
|
|
ret = cm_rtu_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_USER_ESTABLISHED:
|
|
|
|
ret = cm_establish_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_DREQ_RECEIVED:
|
|
|
|
ret = cm_dreq_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_DREP_RECEIVED:
|
|
|
|
ret = cm_drep_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_SIDR_REQ_RECEIVED:
|
|
|
|
ret = cm_sidr_req_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_SIDR_REP_RECEIVED:
|
|
|
|
ret = cm_sidr_rep_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_LAP_RECEIVED:
|
|
|
|
ret = cm_lap_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_APR_RECEIVED:
|
|
|
|
ret = cm_apr_handler(work);
|
|
|
|
break;
|
|
|
|
case IB_CM_TIMEWAIT_EXIT:
|
|
|
|
ret = cm_timewait_handler(work);
|
|
|
|
break;
|
|
|
|
default:
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_handler_err(work->cm_event.event);
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ret)
|
|
|
|
cm_free_work(work);
|
|
|
|
}
|
|
|
|
|
2006-11-29 06:57:13 +08:00
|
|
|
static int cm_establish(struct ib_cm_id *cm_id)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct cm_work *work;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
2015-06-25 22:13:22 +08:00
|
|
|
struct cm_device *cm_dev;
|
|
|
|
|
|
|
|
cm_dev = ib_get_client_data(cm_id->device, &cm_client);
|
|
|
|
if (!cm_dev)
|
|
|
|
return -ENODEV;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
work = kmalloc(sizeof *work, GFP_ATOMIC);
|
|
|
|
if (!work)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
2021-04-07 16:15:51 +08:00
|
|
|
switch (cm_id->state) {
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_REP_SENT:
|
|
|
|
case IB_CM_MRA_REP_RCVD:
|
|
|
|
cm_id->state = IB_CM_ESTABLISHED;
|
|
|
|
break;
|
|
|
|
case IB_CM_ESTABLISHED:
|
|
|
|
ret = -EISCONN;
|
|
|
|
break;
|
|
|
|
default:
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_establish_err(cm_id);
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
kfree(work);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The CM worker thread may try to destroy the cm_id before it
|
|
|
|
* can execute this work item. To prevent potential deadlock,
|
|
|
|
* we need to find the cm_id once we're in the context of the
|
|
|
|
* worker thread, rather than holding a reference on it.
|
|
|
|
*/
|
2006-11-22 22:57:56 +08:00
|
|
|
INIT_DELAYED_WORK(&work->work, cm_work_handler);
|
2005-07-28 02:45:40 +08:00
|
|
|
work->local_id = cm_id->local_id;
|
|
|
|
work->remote_id = cm_id->remote_id;
|
|
|
|
work->mad_recv_wc = NULL;
|
|
|
|
work->cm_event.event = IB_CM_USER_ESTABLISHED;
|
2015-06-25 22:13:22 +08:00
|
|
|
|
|
|
|
/* Check if the device started its remove_one */
|
2016-03-25 23:33:16 +08:00
|
|
|
spin_lock_irqsave(&cm.lock, flags);
|
2015-06-25 22:13:22 +08:00
|
|
|
if (!cm_dev->going_down) {
|
|
|
|
queue_delayed_work(cm.wq, &work->work, 0);
|
|
|
|
} else {
|
|
|
|
kfree(work);
|
|
|
|
ret = -ENODEV;
|
|
|
|
}
|
2016-03-25 23:33:16 +08:00
|
|
|
spin_unlock_irqrestore(&cm.lock, flags);
|
2015-06-25 22:13:22 +08:00
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
2006-11-29 06:57:13 +08:00
|
|
|
|
|
|
|
static int cm_migrate(struct ib_cm_id *cm_id)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
2016-10-27 21:36:27 +08:00
|
|
|
struct cm_av tmp_av;
|
2006-11-29 06:57:13 +08:00
|
|
|
unsigned long flags;
|
2016-10-27 21:36:27 +08:00
|
|
|
int tmp_send_port_not_ready;
|
2006-11-29 06:57:13 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
if (cm_id->state == IB_CM_ESTABLISHED &&
|
|
|
|
(cm_id->lap_state == IB_CM_LAP_UNINIT ||
|
|
|
|
cm_id->lap_state == IB_CM_LAP_IDLE)) {
|
|
|
|
cm_id->lap_state = IB_CM_LAP_IDLE;
|
2016-10-27 21:36:27 +08:00
|
|
|
/* Swap address vector */
|
|
|
|
tmp_av = cm_id_priv->av;
|
2006-11-29 06:57:13 +08:00
|
|
|
cm_id_priv->av = cm_id_priv->alt_av;
|
2016-10-27 21:36:27 +08:00
|
|
|
cm_id_priv->alt_av = tmp_av;
|
|
|
|
/* Swap port send ready state */
|
|
|
|
tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
|
|
|
|
cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
|
|
|
|
cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
|
2006-11-29 06:57:13 +08:00
|
|
|
} else
|
|
|
|
ret = -EINVAL;
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case IB_EVENT_COMM_EST:
|
|
|
|
ret = cm_establish(cm_id);
|
|
|
|
break;
|
|
|
|
case IB_EVENT_PATH_MIG:
|
|
|
|
ret = cm_migrate(cm_id);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_cm_notify);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
|
2016-01-04 21:15:58 +08:00
|
|
|
struct ib_mad_send_buf *send_buf,
|
2005-07-28 02:45:40 +08:00
|
|
|
struct ib_mad_recv_wc *mad_recv_wc)
|
|
|
|
{
|
2007-07-17 12:49:35 +08:00
|
|
|
struct cm_port *port = mad_agent->context;
|
2005-07-28 02:45:40 +08:00
|
|
|
struct cm_work *work;
|
|
|
|
enum ib_cm_event_type event;
|
2017-10-19 13:40:30 +08:00
|
|
|
bool alt_path = false;
|
2007-07-17 12:49:35 +08:00
|
|
|
u16 attr_id;
|
2005-07-28 02:45:40 +08:00
|
|
|
int paths = 0;
|
2015-06-25 22:13:22 +08:00
|
|
|
int going_down = 0;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
|
|
|
|
case CM_REQ_ATTR_ID:
|
2017-10-19 13:40:30 +08:00
|
|
|
alt_path = cm_req_has_alt_path((struct cm_req_msg *)
|
|
|
|
mad_recv_wc->recv_buf.mad);
|
|
|
|
paths = 1 + (alt_path != 0);
|
2005-07-28 02:45:40 +08:00
|
|
|
event = IB_CM_REQ_RECEIVED;
|
|
|
|
break;
|
|
|
|
case CM_MRA_ATTR_ID:
|
|
|
|
event = IB_CM_MRA_RECEIVED;
|
|
|
|
break;
|
|
|
|
case CM_REJ_ATTR_ID:
|
|
|
|
event = IB_CM_REJ_RECEIVED;
|
|
|
|
break;
|
|
|
|
case CM_REP_ATTR_ID:
|
|
|
|
event = IB_CM_REP_RECEIVED;
|
|
|
|
break;
|
|
|
|
case CM_RTU_ATTR_ID:
|
|
|
|
event = IB_CM_RTU_RECEIVED;
|
|
|
|
break;
|
|
|
|
case CM_DREQ_ATTR_ID:
|
|
|
|
event = IB_CM_DREQ_RECEIVED;
|
|
|
|
break;
|
|
|
|
case CM_DREP_ATTR_ID:
|
|
|
|
event = IB_CM_DREP_RECEIVED;
|
|
|
|
break;
|
|
|
|
case CM_SIDR_REQ_ATTR_ID:
|
|
|
|
event = IB_CM_SIDR_REQ_RECEIVED;
|
|
|
|
break;
|
|
|
|
case CM_SIDR_REP_ATTR_ID:
|
|
|
|
event = IB_CM_SIDR_REP_RECEIVED;
|
|
|
|
break;
|
|
|
|
case CM_LAP_ATTR_ID:
|
|
|
|
paths = 1;
|
|
|
|
event = IB_CM_LAP_RECEIVED;
|
|
|
|
break;
|
|
|
|
case CM_APR_ATTR_ID:
|
|
|
|
event = IB_CM_APR_RECEIVED;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ib_free_recv_mad(mad_recv_wc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2007-07-17 12:49:35 +08:00
|
|
|
attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
|
|
|
|
atomic_long_inc(&port->counter_group[CM_RECV].
|
|
|
|
counter[attr_id - CM_ATTR_ID_OFFSET]);
|
|
|
|
|
2019-01-08 01:26:16 +08:00
|
|
|
work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!work) {
|
|
|
|
ib_free_recv_mad(mad_recv_wc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-11-22 22:57:56 +08:00
|
|
|
INIT_DELAYED_WORK(&work->work, cm_work_handler);
|
2005-07-28 02:45:40 +08:00
|
|
|
work->cm_event.event = event;
|
|
|
|
work->mad_recv_wc = mad_recv_wc;
|
2007-07-17 12:49:35 +08:00
|
|
|
work->port = port;
|
2015-06-25 22:13:22 +08:00
|
|
|
|
|
|
|
/* Check if the device started its remove_one */
|
|
|
|
spin_lock_irq(&cm.lock);
|
|
|
|
if (!port->cm_dev->going_down)
|
|
|
|
queue_delayed_work(cm.wq, &work->work, 0);
|
|
|
|
else
|
|
|
|
going_down = 1;
|
|
|
|
spin_unlock_irq(&cm.lock);
|
|
|
|
|
|
|
|
if (going_down) {
|
|
|
|
kfree(work);
|
|
|
|
ib_free_recv_mad(mad_recv_wc);
|
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_qp_attr *qp_attr,
|
|
|
|
int *qp_attr_mask)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
switch (cm_id_priv->id.state) {
|
|
|
|
case IB_CM_REQ_SENT:
|
|
|
|
case IB_CM_MRA_REQ_RCVD:
|
|
|
|
case IB_CM_REQ_RCVD:
|
|
|
|
case IB_CM_MRA_REQ_SENT:
|
|
|
|
case IB_CM_REP_RCVD:
|
|
|
|
case IB_CM_MRA_REP_SENT:
|
|
|
|
case IB_CM_REP_SENT:
|
|
|
|
case IB_CM_MRA_REP_RCVD:
|
|
|
|
case IB_CM_ESTABLISHED:
|
|
|
|
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
|
|
|
|
IB_QP_PKEY_INDEX | IB_QP_PORT;
|
2006-10-25 04:35:27 +08:00
|
|
|
qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
|
2005-07-28 02:45:40 +08:00
|
|
|
if (cm_id_priv->responder_resources)
|
2006-08-29 02:55:52 +08:00
|
|
|
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
|
|
|
|
IB_ACCESS_REMOTE_ATOMIC;
|
2005-07-28 02:45:40 +08:00
|
|
|
qp_attr->pkey_index = cm_id_priv->av.pkey_index;
|
|
|
|
qp_attr->port_num = cm_id_priv->av.port->port_num;
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
default:
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_qp_init_err(&cm_id_priv->id);
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_qp_attr *qp_attr,
|
|
|
|
int *qp_attr_mask)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
switch (cm_id_priv->id.state) {
|
|
|
|
case IB_CM_REQ_RCVD:
|
|
|
|
case IB_CM_MRA_REQ_SENT:
|
|
|
|
case IB_CM_REP_RCVD:
|
|
|
|
case IB_CM_MRA_REP_SENT:
|
|
|
|
case IB_CM_REP_SENT:
|
|
|
|
case IB_CM_MRA_REP_RCVD:
|
|
|
|
case IB_CM_ESTABLISHED:
|
|
|
|
*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
|
2005-10-25 03:33:56 +08:00
|
|
|
IB_QP_DEST_QPN | IB_QP_RQ_PSN;
|
2005-07-28 02:45:40 +08:00
|
|
|
qp_attr->ah_attr = cm_id_priv->av.ah_attr;
|
|
|
|
qp_attr->path_mtu = cm_id_priv->path_mtu;
|
|
|
|
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
|
|
|
|
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
|
2011-05-14 01:46:20 +08:00
|
|
|
if (cm_id_priv->qp_type == IB_QPT_RC ||
|
|
|
|
cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
|
2005-10-25 03:33:56 +08:00
|
|
|
*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
|
|
|
|
IB_QP_MIN_RNR_TIMER;
|
|
|
|
qp_attr->max_dest_rd_atomic =
|
|
|
|
cm_id_priv->responder_resources;
|
|
|
|
qp_attr->min_rnr_timer = 0;
|
|
|
|
}
|
2017-04-30 02:41:28 +08:00
|
|
|
if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
|
2005-07-28 02:45:40 +08:00
|
|
|
*qp_attr_mask |= IB_QP_ALT_PATH;
|
2006-07-01 05:10:14 +08:00
|
|
|
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
|
2006-11-29 06:57:13 +08:00
|
|
|
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
|
2007-06-19 02:09:37 +08:00
|
|
|
qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
|
2005-07-28 02:45:40 +08:00
|
|
|
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
default:
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_qp_rtr_err(&cm_id_priv->id);
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
|
|
|
|
struct ib_qp_attr *qp_attr,
|
|
|
|
int *qp_attr_mask)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
|
|
|
switch (cm_id_priv->id.state) {
|
2006-12-01 08:37:15 +08:00
|
|
|
/* Allow transition to RTS before sending REP */
|
|
|
|
case IB_CM_REQ_RCVD:
|
|
|
|
case IB_CM_MRA_REQ_SENT:
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
case IB_CM_REP_RCVD:
|
|
|
|
case IB_CM_MRA_REP_SENT:
|
|
|
|
case IB_CM_REP_SENT:
|
|
|
|
case IB_CM_MRA_REP_RCVD:
|
|
|
|
case IB_CM_ESTABLISHED:
|
2006-11-29 06:57:13 +08:00
|
|
|
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
|
|
|
|
*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
|
|
|
|
qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
|
2011-05-14 01:46:20 +08:00
|
|
|
switch (cm_id_priv->qp_type) {
|
|
|
|
case IB_QPT_RC:
|
|
|
|
case IB_QPT_XRC_INI:
|
|
|
|
*qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
|
2006-11-29 06:57:13 +08:00
|
|
|
IB_QP_MAX_QP_RD_ATOMIC;
|
|
|
|
qp_attr->retry_cnt = cm_id_priv->retry_count;
|
|
|
|
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
|
2011-05-14 01:46:20 +08:00
|
|
|
qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2011-05-14 01:46:20 +08:00
|
|
|
case IB_QPT_XRC_TGT:
|
|
|
|
*qp_attr_mask |= IB_QP_TIMEOUT;
|
|
|
|
qp_attr->timeout = cm_id_priv->av.timeout;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2006-11-29 06:57:13 +08:00
|
|
|
}
|
2017-04-30 02:41:28 +08:00
|
|
|
if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
|
2006-11-29 06:57:13 +08:00
|
|
|
*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
|
|
|
|
qp_attr->path_mig_state = IB_MIG_REARM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
|
|
|
|
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
|
|
|
|
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
|
2007-06-19 02:09:37 +08:00
|
|
|
qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
|
2006-11-29 06:57:13 +08:00
|
|
|
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
|
2005-07-28 02:45:40 +08:00
|
|
|
qp_attr->path_mig_state = IB_MIG_REARM;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
default:
|
2020-08-17 21:53:16 +08:00
|
|
|
trace_icm_qp_rts_err(&cm_id_priv->id);
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
|
|
|
|
struct ib_qp_attr *qp_attr,
|
|
|
|
int *qp_attr_mask)
|
|
|
|
{
|
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
|
|
|
switch (qp_attr->qp_state) {
|
|
|
|
case IB_QPS_INIT:
|
|
|
|
ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
|
|
|
|
break;
|
|
|
|
case IB_QPS_RTR:
|
|
|
|
ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
|
|
|
|
break;
|
|
|
|
case IB_QPS_RTS:
|
|
|
|
ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ib_cm_init_qp_attr);
|
|
|
|
|
2007-07-17 12:49:35 +08:00
|
|
|
static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct cm_counter_group *group;
|
|
|
|
struct cm_counter_attribute *cm_attr;
|
|
|
|
|
|
|
|
group = container_of(obj, struct cm_counter_group, obj);
|
|
|
|
cm_attr = container_of(attr, struct cm_counter_attribute, attr);
|
|
|
|
|
2020-10-08 10:36:27 +08:00
|
|
|
return sysfs_emit(buf, "%ld\n",
|
|
|
|
atomic_long_read(&group->counter[cm_attr->index]));
|
2007-07-17 12:49:35 +08:00
|
|
|
}
|
|
|
|
|
2010-01-19 09:58:23 +08:00
|
|
|
static const struct sysfs_ops cm_counter_ops = {
|
2007-07-17 12:49:35 +08:00
|
|
|
.show = cm_show_counter
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kobj_type cm_counter_obj_type = {
|
|
|
|
.sysfs_ops = &cm_counter_ops,
|
|
|
|
.default_attrs = cm_counter_default_attrs
|
|
|
|
};
|
|
|
|
|
|
|
|
static int cm_create_port_fs(struct cm_port *port)
|
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < CM_COUNTER_GROUPS; i++) {
|
2019-03-11 20:40:31 +08:00
|
|
|
ret = ib_port_register_module_stat(port->cm_dev->ib_device,
|
|
|
|
port->port_num,
|
|
|
|
&port->counter_group[i].obj,
|
|
|
|
&cm_counter_obj_type,
|
|
|
|
counter_group_names[i]);
|
2007-07-17 12:49:35 +08:00
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
while (i--)
|
2019-03-11 20:40:31 +08:00
|
|
|
ib_port_unregister_module_stat(&port->counter_group[i].obj);
|
2007-07-17 12:49:35 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cm_remove_port_fs(struct cm_port *port)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < CM_COUNTER_GROUPS; i++)
|
2019-03-11 20:40:31 +08:00
|
|
|
ib_port_unregister_module_stat(&port->counter_group[i].obj);
|
2007-07-17 12:49:35 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-04-22 01:24:40 +08:00
|
|
|
static int cm_add_one(struct ib_device *ib_device)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
|
|
|
struct cm_device *cm_dev;
|
|
|
|
struct cm_port *port;
|
|
|
|
struct ib_mad_reg_req reg_req = {
|
|
|
|
.mgmt_class = IB_MGMT_CLASS_CM,
|
2014-08-09 07:00:55 +08:00
|
|
|
.mgmt_class_version = IB_CM_CLASS_VERSION,
|
2005-07-28 02:45:40 +08:00
|
|
|
};
|
|
|
|
struct ib_port_modify port_modify = {
|
|
|
|
.set_port_cap_mask = IB_PORT_CM_SUP
|
|
|
|
};
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
2015-05-05 20:50:21 +08:00
|
|
|
int count = 0;
|
2021-03-01 15:04:20 +08:00
|
|
|
u32 i;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
treewide: Use struct_size() for kmalloc()-family
One of the more common cases of allocation size calculations is finding
the size of a structure that has a zero-sized array at the end, along
with memory for some number of elements for that array. For example:
struct foo {
int stuff;
void *entry[];
};
instance = kmalloc(sizeof(struct foo) + sizeof(void *) * count, GFP_KERNEL);
Instead of leaving these open-coded and prone to type mistakes, we can
now use the new struct_size() helper:
instance = kmalloc(struct_size(instance, entry, count), GFP_KERNEL);
This patch makes the changes for kmalloc()-family (and kvmalloc()-family)
uses. It was done via automatic conversion with manual review for the
"CHECKME" non-standard cases noted below, using the following Coccinelle
script:
// pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
// sizeof *pkey_cache->table, GFP_KERNEL);
@@
identifier alloc =~ "kmalloc|kzalloc|kvmalloc|kvzalloc";
expression GFP;
identifier VAR, ELEMENT;
expression COUNT;
@@
- alloc(sizeof(*VAR) + COUNT * sizeof(*VAR->ELEMENT), GFP)
+ alloc(struct_size(VAR, ELEMENT, COUNT), GFP)
// mr = kzalloc(sizeof(*mr) + m * sizeof(mr->map[0]), GFP_KERNEL);
@@
identifier alloc =~ "kmalloc|kzalloc|kvmalloc|kvzalloc";
expression GFP;
identifier VAR, ELEMENT;
expression COUNT;
@@
- alloc(sizeof(*VAR) + COUNT * sizeof(VAR->ELEMENT[0]), GFP)
+ alloc(struct_size(VAR, ELEMENT, COUNT), GFP)
// Same pattern, but can't trivially locate the trailing element name,
// or variable name.
@@
identifier alloc =~ "kmalloc|kzalloc|kvmalloc|kvzalloc";
expression GFP;
expression SOMETHING, COUNT, ELEMENT;
@@
- alloc(sizeof(SOMETHING) + COUNT * sizeof(ELEMENT), GFP)
+ alloc(CHECKME_struct_size(&SOMETHING, ELEMENT, COUNT), GFP)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-05-09 04:45:50 +08:00
|
|
|
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
|
|
|
|
GFP_KERNEL);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (!cm_dev)
|
2020-04-22 01:24:40 +08:00
|
|
|
return -ENOMEM;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2008-05-27 15:17:53 +08:00
|
|
|
cm_dev->ib_device = ib_device;
|
2015-12-18 16:59:45 +08:00
|
|
|
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
|
2015-06-25 22:13:22 +08:00
|
|
|
cm_dev->going_down = 0;
|
2007-07-17 12:49:35 +08:00
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
|
2021-01-27 23:00:08 +08:00
|
|
|
rdma_for_each_port (ib_device, i) {
|
2015-05-05 20:50:34 +08:00
|
|
|
if (!rdma_cap_ib_cm(ib_device, i))
|
2015-05-05 20:50:21 +08:00
|
|
|
continue;
|
|
|
|
|
2007-07-17 12:49:35 +08:00
|
|
|
port = kzalloc(sizeof *port, GFP_KERNEL);
|
2020-04-22 01:24:40 +08:00
|
|
|
if (!port) {
|
|
|
|
ret = -ENOMEM;
|
2007-07-17 12:49:35 +08:00
|
|
|
goto error1;
|
2020-04-22 01:24:40 +08:00
|
|
|
}
|
2007-07-17 12:49:35 +08:00
|
|
|
|
|
|
|
cm_dev->port[i-1] = port;
|
2005-07-28 02:45:40 +08:00
|
|
|
port->cm_dev = cm_dev;
|
|
|
|
port->port_num = i;
|
2007-07-17 12:49:35 +08:00
|
|
|
|
2016-10-27 21:36:27 +08:00
|
|
|
INIT_LIST_HEAD(&port->cm_priv_prim_list);
|
|
|
|
INIT_LIST_HEAD(&port->cm_priv_altr_list);
|
|
|
|
|
2007-07-17 12:49:35 +08:00
|
|
|
ret = cm_create_port_fs(port);
|
|
|
|
if (ret)
|
|
|
|
goto error1;
|
|
|
|
|
2008-05-27 15:17:53 +08:00
|
|
|
port->mad_agent = ib_register_mad_agent(ib_device, i,
|
2005-07-28 02:45:40 +08:00
|
|
|
IB_QPT_GSI,
|
|
|
|
®_req,
|
|
|
|
0,
|
|
|
|
cm_send_handler,
|
|
|
|
cm_recv_handler,
|
2014-08-09 07:00:55 +08:00
|
|
|
port,
|
|
|
|
0);
|
2020-04-22 01:24:40 +08:00
|
|
|
if (IS_ERR(port->mad_agent)) {
|
|
|
|
ret = PTR_ERR(port->mad_agent);
|
2007-07-17 12:49:35 +08:00
|
|
|
goto error2;
|
2020-04-22 01:24:40 +08:00
|
|
|
}
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2008-05-27 15:17:53 +08:00
|
|
|
ret = ib_modify_port(ib_device, i, 0, &port_modify);
|
2005-07-28 02:45:40 +08:00
|
|
|
if (ret)
|
2007-07-17 12:49:35 +08:00
|
|
|
goto error3;
|
2015-05-05 20:50:21 +08:00
|
|
|
|
|
|
|
count++;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2015-05-05 20:50:21 +08:00
|
|
|
|
2020-04-22 01:24:40 +08:00
|
|
|
if (!count) {
|
|
|
|
ret = -EOPNOTSUPP;
|
2015-05-05 20:50:21 +08:00
|
|
|
goto free;
|
2020-04-22 01:24:40 +08:00
|
|
|
}
|
2015-05-05 20:50:21 +08:00
|
|
|
|
2008-05-27 15:17:53 +08:00
|
|
|
ib_set_client_data(ib_device, &cm_client, cm_dev);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
write_lock_irqsave(&cm.device_lock, flags);
|
|
|
|
list_add_tail(&cm_dev->list, &cm.device_list);
|
|
|
|
write_unlock_irqrestore(&cm.device_lock, flags);
|
2020-04-22 01:24:40 +08:00
|
|
|
return 0;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2007-07-17 12:49:35 +08:00
|
|
|
error3:
|
2006-01-10 23:39:34 +08:00
|
|
|
ib_unregister_mad_agent(port->mad_agent);
|
2007-07-17 12:49:35 +08:00
|
|
|
error2:
|
|
|
|
cm_remove_port_fs(port);
|
2006-01-10 23:39:34 +08:00
|
|
|
error1:
|
2005-07-28 02:45:40 +08:00
|
|
|
port_modify.set_port_cap_mask = 0;
|
|
|
|
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
|
2019-09-16 15:11:51 +08:00
|
|
|
kfree(port);
|
2005-07-28 02:45:40 +08:00
|
|
|
while (--i) {
|
2015-05-05 20:50:34 +08:00
|
|
|
if (!rdma_cap_ib_cm(ib_device, i))
|
2015-05-05 20:50:21 +08:00
|
|
|
continue;
|
|
|
|
|
2007-07-17 12:49:35 +08:00
|
|
|
port = cm_dev->port[i-1];
|
2008-05-27 15:17:53 +08:00
|
|
|
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
|
2005-07-28 02:45:40 +08:00
|
|
|
ib_unregister_mad_agent(port->mad_agent);
|
2007-07-17 12:49:35 +08:00
|
|
|
cm_remove_port_fs(port);
|
2019-09-16 15:11:51 +08:00
|
|
|
kfree(port);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2015-05-05 20:50:21 +08:00
|
|
|
free:
|
2008-10-01 01:36:54 +08:00
|
|
|
kfree(cm_dev);
|
2020-04-22 01:24:40 +08:00
|
|
|
return ret;
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
2015-07-30 22:50:14 +08:00
|
|
|
static void cm_remove_one(struct ib_device *ib_device, void *client_data)
|
2005-07-28 02:45:40 +08:00
|
|
|
{
|
2015-07-30 22:50:14 +08:00
|
|
|
struct cm_device *cm_dev = client_data;
|
2005-07-28 02:45:40 +08:00
|
|
|
struct cm_port *port;
|
2016-10-27 21:36:27 +08:00
|
|
|
struct cm_id_private *cm_id_priv;
|
|
|
|
struct ib_mad_agent *cur_mad_agent;
|
2005-07-28 02:45:40 +08:00
|
|
|
struct ib_port_modify port_modify = {
|
|
|
|
.clr_port_cap_mask = IB_PORT_CM_SUP
|
|
|
|
};
|
|
|
|
unsigned long flags;
|
2021-03-01 15:04:20 +08:00
|
|
|
u32 i;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
write_lock_irqsave(&cm.device_lock, flags);
|
|
|
|
list_del(&cm_dev->list);
|
|
|
|
write_unlock_irqrestore(&cm.device_lock, flags);
|
|
|
|
|
2015-06-25 22:13:22 +08:00
|
|
|
spin_lock_irq(&cm.lock);
|
|
|
|
cm_dev->going_down = 1;
|
|
|
|
spin_unlock_irq(&cm.lock);
|
|
|
|
|
2021-01-27 23:00:08 +08:00
|
|
|
rdma_for_each_port (ib_device, i) {
|
2015-05-05 20:50:34 +08:00
|
|
|
if (!rdma_cap_ib_cm(ib_device, i))
|
2015-05-05 20:50:21 +08:00
|
|
|
continue;
|
|
|
|
|
2007-07-17 12:49:35 +08:00
|
|
|
port = cm_dev->port[i-1];
|
2008-05-27 15:17:53 +08:00
|
|
|
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
|
2016-10-27 21:36:27 +08:00
|
|
|
/* Mark all the cm_id's as not valid */
|
|
|
|
spin_lock_irq(&cm.lock);
|
|
|
|
list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
|
|
|
|
cm_id_priv->altr_send_port_not_ready = 1;
|
|
|
|
list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
|
|
|
|
cm_id_priv->prim_send_port_not_ready = 1;
|
|
|
|
spin_unlock_irq(&cm.lock);
|
2015-06-25 22:13:22 +08:00
|
|
|
/*
|
|
|
|
* We flush the queue here after the going_down set, this
|
|
|
|
* verify that no new works will be queued in the recv handler,
|
|
|
|
* after that we can call the unregister_mad_agent
|
|
|
|
*/
|
2008-02-23 02:40:45 +08:00
|
|
|
flush_workqueue(cm.wq);
|
2016-10-27 21:36:27 +08:00
|
|
|
spin_lock_irq(&cm.state_lock);
|
|
|
|
cur_mad_agent = port->mad_agent;
|
|
|
|
port->mad_agent = NULL;
|
|
|
|
spin_unlock_irq(&cm.state_lock);
|
|
|
|
ib_unregister_mad_agent(cur_mad_agent);
|
2007-07-17 12:49:35 +08:00
|
|
|
cm_remove_port_fs(port);
|
2019-09-16 15:11:51 +08:00
|
|
|
kfree(port);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
2016-10-27 21:36:27 +08:00
|
|
|
|
2008-10-01 01:36:54 +08:00
|
|
|
kfree(cm_dev);
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init ib_cm_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&cm.device_list);
|
|
|
|
rwlock_init(&cm.device_lock);
|
|
|
|
spin_lock_init(&cm.lock);
|
2016-10-27 21:36:27 +08:00
|
|
|
spin_lock_init(&cm.state_lock);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm.listen_service_table = RB_ROOT;
|
2009-01-18 09:11:57 +08:00
|
|
|
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
|
2005-07-28 02:45:40 +08:00
|
|
|
cm.remote_id_table = RB_ROOT;
|
|
|
|
cm.remote_qp_table = RB_ROOT;
|
|
|
|
cm.remote_sidr_table = RB_ROOT;
|
2020-11-05 05:40:59 +08:00
|
|
|
xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
|
2006-08-29 06:15:18 +08:00
|
|
|
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
|
2006-10-05 02:29:59 +08:00
|
|
|
INIT_LIST_HEAD(&cm.timewait_list);
|
2005-07-28 02:45:40 +08:00
|
|
|
|
2017-08-16 03:20:37 +08:00
|
|
|
cm.wq = alloc_workqueue("ib_cm", 0, 1);
|
2007-07-17 12:49:35 +08:00
|
|
|
if (!cm.wq) {
|
|
|
|
ret = -ENOMEM;
|
2012-07-11 23:39:31 +08:00
|
|
|
goto error2;
|
2007-07-17 12:49:35 +08:00
|
|
|
}
|
|
|
|
|
2005-07-28 02:45:40 +08:00
|
|
|
ret = ib_register_client(&cm_client);
|
|
|
|
if (ret)
|
2012-07-11 23:39:31 +08:00
|
|
|
goto error3;
|
2005-07-28 02:45:40 +08:00
|
|
|
|
|
|
|
return 0;
|
2012-07-11 23:39:31 +08:00
|
|
|
error3:
|
2005-07-28 02:45:40 +08:00
|
|
|
destroy_workqueue(cm.wq);
|
2012-07-11 23:39:31 +08:00
|
|
|
error2:
|
2005-07-28 02:45:40 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit ib_cm_cleanup(void)
|
|
|
|
{
|
2006-10-05 02:29:59 +08:00
|
|
|
struct cm_timewait_info *timewait_info, *tmp;
|
|
|
|
|
|
|
|
spin_lock_irq(&cm.lock);
|
|
|
|
list_for_each_entry(timewait_info, &cm.timewait_list, list)
|
|
|
|
cancel_delayed_work(&timewait_info->work.work);
|
|
|
|
spin_unlock_irq(&cm.lock);
|
|
|
|
|
2008-02-23 02:40:45 +08:00
|
|
|
ib_unregister_client(&cm_client);
|
2005-07-28 02:45:40 +08:00
|
|
|
destroy_workqueue(cm.wq);
|
2006-10-05 02:29:59 +08:00
|
|
|
|
|
|
|
list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
|
|
|
|
list_del(&timewait_info->list);
|
|
|
|
kfree(timewait_info);
|
|
|
|
}
|
|
|
|
|
2019-02-21 08:20:43 +08:00
|
|
|
WARN_ON(!xa_empty(&cm.local_id_table));
|
2005-07-28 02:45:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(ib_cm_init);
|
|
|
|
module_exit(ib_cm_cleanup);
|