mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 02:34:01 +08:00
[PATCH] IB: Combine some MAD routines
Combine response_mad() and solicited_mad() routines into a single function and simplify/encapsulate its usage. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Hal Rosenstock <halr@voltaire.com> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
824c8ae7d0
commit
4a0754fae8
@ -58,7 +58,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
|
|||||||
static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
|
static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
|
||||||
static struct ib_mad_agent_private *find_mad_agent(
|
static struct ib_mad_agent_private *find_mad_agent(
|
||||||
struct ib_mad_port_private *port_priv,
|
struct ib_mad_port_private *port_priv,
|
||||||
struct ib_mad *mad, int solicited);
|
struct ib_mad *mad);
|
||||||
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
||||||
struct ib_mad_private *mad);
|
struct ib_mad_private *mad);
|
||||||
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
|
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
|
||||||
@ -67,7 +67,6 @@ static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
|
|||||||
static void timeout_sends(void *data);
|
static void timeout_sends(void *data);
|
||||||
static void cancel_sends(void *data);
|
static void cancel_sends(void *data);
|
||||||
static void local_completions(void *data);
|
static void local_completions(void *data);
|
||||||
static int solicited_mad(struct ib_mad *mad);
|
|
||||||
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
|
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
|
||||||
struct ib_mad_agent_private *agent_priv,
|
struct ib_mad_agent_private *agent_priv,
|
||||||
u8 mgmt_class);
|
u8 mgmt_class);
|
||||||
@ -558,6 +557,13 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ib_unregister_mad_agent);
|
EXPORT_SYMBOL(ib_unregister_mad_agent);
|
||||||
|
|
||||||
|
static inline int response_mad(struct ib_mad *mad)
|
||||||
|
{
|
||||||
|
/* Trap represses are responses although response bit is reset */
|
||||||
|
return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
|
||||||
|
(mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
|
||||||
|
}
|
||||||
|
|
||||||
static void dequeue_mad(struct ib_mad_list_head *mad_list)
|
static void dequeue_mad(struct ib_mad_list_head *mad_list)
|
||||||
{
|
{
|
||||||
struct ib_mad_queue *mad_queue;
|
struct ib_mad_queue *mad_queue;
|
||||||
@ -650,7 +656,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|||||||
struct ib_smp *smp,
|
struct ib_smp *smp,
|
||||||
struct ib_send_wr *send_wr)
|
struct ib_send_wr *send_wr)
|
||||||
{
|
{
|
||||||
int ret, solicited;
|
int ret;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct ib_mad_local_private *local;
|
struct ib_mad_local_private *local;
|
||||||
struct ib_mad_private *mad_priv;
|
struct ib_mad_private *mad_priv;
|
||||||
@ -696,11 +702,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|||||||
switch (ret)
|
switch (ret)
|
||||||
{
|
{
|
||||||
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
|
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
|
||||||
/*
|
if (response_mad(&mad_priv->mad.mad) &&
|
||||||
* See if response is solicited and
|
|
||||||
* there is a recv handler
|
|
||||||
*/
|
|
||||||
if (solicited_mad(&mad_priv->mad.mad) &&
|
|
||||||
mad_agent_priv->agent.recv_handler) {
|
mad_agent_priv->agent.recv_handler) {
|
||||||
local->mad_priv = mad_priv;
|
local->mad_priv = mad_priv;
|
||||||
local->recv_mad_agent = mad_agent_priv;
|
local->recv_mad_agent = mad_agent_priv;
|
||||||
@ -717,15 +719,13 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|||||||
break;
|
break;
|
||||||
case IB_MAD_RESULT_SUCCESS:
|
case IB_MAD_RESULT_SUCCESS:
|
||||||
/* Treat like an incoming receive MAD */
|
/* Treat like an incoming receive MAD */
|
||||||
solicited = solicited_mad(&mad_priv->mad.mad);
|
|
||||||
port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
|
port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
|
||||||
mad_agent_priv->agent.port_num);
|
mad_agent_priv->agent.port_num);
|
||||||
if (port_priv) {
|
if (port_priv) {
|
||||||
mad_priv->mad.mad.mad_hdr.tid =
|
mad_priv->mad.mad.mad_hdr.tid =
|
||||||
((struct ib_mad *)smp)->mad_hdr.tid;
|
((struct ib_mad *)smp)->mad_hdr.tid;
|
||||||
recv_mad_agent = find_mad_agent(port_priv,
|
recv_mad_agent = find_mad_agent(port_priv,
|
||||||
&mad_priv->mad.mad,
|
&mad_priv->mad.mad);
|
||||||
solicited);
|
|
||||||
}
|
}
|
||||||
if (!port_priv || !recv_mad_agent) {
|
if (!port_priv || !recv_mad_agent) {
|
||||||
kmem_cache_free(ib_mad_cache, mad_priv);
|
kmem_cache_free(ib_mad_cache, mad_priv);
|
||||||
@ -1421,42 +1421,15 @@ out:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int response_mad(struct ib_mad *mad)
|
|
||||||
{
|
|
||||||
/* Trap represses are responses although response bit is reset */
|
|
||||||
return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
|
|
||||||
(mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int solicited_mad(struct ib_mad *mad)
|
|
||||||
{
|
|
||||||
/* CM MADs are never solicited */
|
|
||||||
if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CM) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* XXX: Determine whether MAD is using RMPP */
|
|
||||||
|
|
||||||
/* Not using RMPP */
|
|
||||||
/* Is this MAD a response to a previous MAD ? */
|
|
||||||
return response_mad(mad);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ib_mad_agent_private *
|
static struct ib_mad_agent_private *
|
||||||
find_mad_agent(struct ib_mad_port_private *port_priv,
|
find_mad_agent(struct ib_mad_port_private *port_priv,
|
||||||
struct ib_mad *mad,
|
struct ib_mad *mad)
|
||||||
int solicited)
|
|
||||||
{
|
{
|
||||||
struct ib_mad_agent_private *mad_agent = NULL;
|
struct ib_mad_agent_private *mad_agent = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&port_priv->reg_lock, flags);
|
spin_lock_irqsave(&port_priv->reg_lock, flags);
|
||||||
|
if (response_mad(mad)) {
|
||||||
/*
|
|
||||||
* Whether MAD was solicited determines type of routing to
|
|
||||||
* MAD client.
|
|
||||||
*/
|
|
||||||
if (solicited) {
|
|
||||||
u32 hi_tid;
|
u32 hi_tid;
|
||||||
struct ib_mad_agent_private *entry;
|
struct ib_mad_agent_private *entry;
|
||||||
|
|
||||||
@ -1560,18 +1533,6 @@ out:
|
|||||||
return valid;
|
return valid;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Return start of fully reassembled MAD, or NULL, if MAD isn't assembled yet
|
|
||||||
*/
|
|
||||||
static struct ib_mad_private *
|
|
||||||
reassemble_recv(struct ib_mad_agent_private *mad_agent_priv,
|
|
||||||
struct ib_mad_private *recv)
|
|
||||||
{
|
|
||||||
/* Until we have RMPP, all receives are reassembled!... */
|
|
||||||
INIT_LIST_HEAD(&recv->header.recv_wc.recv_buf.list);
|
|
||||||
return recv;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ib_mad_send_wr_private*
|
static struct ib_mad_send_wr_private*
|
||||||
find_send_req(struct ib_mad_agent_private *mad_agent_priv,
|
find_send_req(struct ib_mad_agent_private *mad_agent_priv,
|
||||||
u64 tid)
|
u64 tid)
|
||||||
@ -1600,29 +1561,22 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
||||||
struct ib_mad_private *recv,
|
struct ib_mad_recv_wc *mad_recv_wc)
|
||||||
int solicited)
|
|
||||||
{
|
{
|
||||||
struct ib_mad_send_wr_private *mad_send_wr;
|
struct ib_mad_send_wr_private *mad_send_wr;
|
||||||
struct ib_mad_send_wc mad_send_wc;
|
struct ib_mad_send_wc mad_send_wc;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
u64 tid;
|
||||||
|
|
||||||
/* Fully reassemble receive before processing */
|
INIT_LIST_HEAD(&mad_recv_wc->recv_buf.list);
|
||||||
recv = reassemble_recv(mad_agent_priv, recv);
|
|
||||||
if (!recv) {
|
|
||||||
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
|
||||||
wake_up(&mad_agent_priv->wait);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Complete corresponding request */
|
/* Complete corresponding request */
|
||||||
if (solicited) {
|
if (response_mad(mad_recv_wc->recv_buf.mad)) {
|
||||||
|
tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
|
||||||
spin_lock_irqsave(&mad_agent_priv->lock, flags);
|
spin_lock_irqsave(&mad_agent_priv->lock, flags);
|
||||||
mad_send_wr = find_send_req(mad_agent_priv,
|
mad_send_wr = find_send_req(mad_agent_priv, tid);
|
||||||
recv->mad.mad.mad_hdr.tid);
|
|
||||||
if (!mad_send_wr) {
|
if (!mad_send_wr) {
|
||||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
||||||
ib_free_recv_mad(&recv->header.recv_wc);
|
ib_free_recv_mad(mad_recv_wc);
|
||||||
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
||||||
wake_up(&mad_agent_priv->wait);
|
wake_up(&mad_agent_priv->wait);
|
||||||
return;
|
return;
|
||||||
@ -1632,10 +1586,9 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
|||||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
||||||
|
|
||||||
/* Defined behavior is to complete response before request */
|
/* Defined behavior is to complete response before request */
|
||||||
recv->header.recv_wc.wc->wr_id = mad_send_wr->wr_id;
|
mad_recv_wc->wc->wr_id = mad_send_wr->wr_id;
|
||||||
mad_agent_priv->agent.recv_handler(
|
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
|
||||||
&mad_agent_priv->agent,
|
mad_recv_wc);
|
||||||
&recv->header.recv_wc);
|
|
||||||
atomic_dec(&mad_agent_priv->refcount);
|
atomic_dec(&mad_agent_priv->refcount);
|
||||||
|
|
||||||
mad_send_wc.status = IB_WC_SUCCESS;
|
mad_send_wc.status = IB_WC_SUCCESS;
|
||||||
@ -1643,9 +1596,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
|||||||
mad_send_wc.wr_id = mad_send_wr->wr_id;
|
mad_send_wc.wr_id = mad_send_wr->wr_id;
|
||||||
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
|
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
|
||||||
} else {
|
} else {
|
||||||
mad_agent_priv->agent.recv_handler(
|
mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
|
||||||
&mad_agent_priv->agent,
|
mad_recv_wc);
|
||||||
&recv->header.recv_wc);
|
|
||||||
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
if (atomic_dec_and_test(&mad_agent_priv->refcount))
|
||||||
wake_up(&mad_agent_priv->wait);
|
wake_up(&mad_agent_priv->wait);
|
||||||
}
|
}
|
||||||
@ -1659,7 +1611,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
|||||||
struct ib_mad_private *recv, *response;
|
struct ib_mad_private *recv, *response;
|
||||||
struct ib_mad_list_head *mad_list;
|
struct ib_mad_list_head *mad_list;
|
||||||
struct ib_mad_agent_private *mad_agent;
|
struct ib_mad_agent_private *mad_agent;
|
||||||
int solicited;
|
|
||||||
|
|
||||||
response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
|
response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
|
||||||
if (!response)
|
if (!response)
|
||||||
@ -1745,11 +1696,9 @@ local:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Determine corresponding MAD agent for incoming receive MAD */
|
mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
|
||||||
solicited = solicited_mad(&recv->mad.mad);
|
|
||||||
mad_agent = find_mad_agent(port_priv, &recv->mad.mad, solicited);
|
|
||||||
if (mad_agent) {
|
if (mad_agent) {
|
||||||
ib_mad_complete_recv(mad_agent, recv, solicited);
|
ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
|
||||||
/*
|
/*
|
||||||
* recv is freed up in error cases in ib_mad_complete_recv
|
* recv is freed up in error cases in ib_mad_complete_recv
|
||||||
* or via recv_handler in ib_mad_complete_recv()
|
* or via recv_handler in ib_mad_complete_recv()
|
||||||
|
Loading…
Reference in New Issue
Block a user