2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00
linux-next/drivers/infiniband/core/cm_msgs.h
Roland Dreier 3910f44d79 [IB] cm: Add missing break in switch
Add missing "break" in switch statement.  Without the break, the
CM ended up always falling through and setting every connection
request to use RC transport, which meant that UC connections
didn't work.

Signed-off-by: Roland Dreier <rolandd@cisco.com>
2005-10-20 12:29:36 -07:00

819 lines
21 KiB
C

/*
* Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING the madirectory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use source and binary forms, with or
* withmodification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retathe above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
* SOFTWARE.
*/
#if !defined(CM_MSGS_H)
#define CM_MSGS_H
#include <rdma/ib_mad.h>
/*
* Parameters to routines below should be in network-byte order, and values
* are returned in network-byte order.
*/
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
#define CM_REQ_ATTR_ID __constant_htons(0x0010)
#define CM_MRA_ATTR_ID __constant_htons(0x0011)
#define CM_REJ_ATTR_ID __constant_htons(0x0012)
#define CM_REP_ATTR_ID __constant_htons(0x0013)
#define CM_RTU_ATTR_ID __constant_htons(0x0014)
#define CM_DREQ_ATTR_ID __constant_htons(0x0015)
#define CM_DREP_ATTR_ID __constant_htons(0x0016)
#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
#define CM_LAP_ATTR_ID __constant_htons(0x0019)
#define CM_APR_ATTR_ID __constant_htons(0x001A)
enum cm_msg_sequence {
CM_MSG_SEQUENCE_REQ,
CM_MSG_SEQUENCE_LAP,
CM_MSG_SEQUENCE_DREQ,
CM_MSG_SEQUENCE_SIDR
};
struct cm_req_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 rsvd4;
__be64 service_id;
__be64 local_ca_guid;
__be32 rsvd24;
__be32 local_qkey;
/* local QPN:24, responder resources:8 */
__be32 offset32;
/* local EECN:24, initiator depth:8 */
__be32 offset36;
/*
* remote EECN:24, remote CM response timeout:5,
* transport service type:2, end-to-end flow control:1
*/
__be32 offset40;
/* starting PSN:24, local CM response timeout:5, retry count:3 */
__be32 offset44;
__be16 pkey;
/* path MTU:4, RDC exists:1, RNR retry count:3. */
u8 offset50;
/* max CM Retries:4, SRQ:1, rsvd:3 */
u8 offset51;
__be16 primary_local_lid;
__be16 primary_remote_lid;
union ib_gid primary_local_gid;
union ib_gid primary_remote_gid;
/* flow label:20, rsvd:6, packet rate:6 */
__be32 primary_offset88;
u8 primary_traffic_class;
u8 primary_hop_limit;
/* SL:4, subnet local:1, rsvd:3 */
u8 primary_offset94;
/* local ACK timeout:5, rsvd:3 */
u8 primary_offset95;
__be16 alt_local_lid;
__be16 alt_remote_lid;
union ib_gid alt_local_gid;
union ib_gid alt_remote_gid;
/* flow label:20, rsvd:6, packet rate:6 */
__be32 alt_offset132;
u8 alt_traffic_class;
u8 alt_hop_limit;
/* SL:4, subnet local:1, rsvd:3 */
u8 alt_offset138;
/* local ACK timeout:5, rsvd:3 */
u8 alt_offset139;
u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
{
return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
}
static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
{
req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
(be32_to_cpu(req_msg->offset32) &
0x000000FF));
}
static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
{
return (u8) be32_to_cpu(req_msg->offset32);
}
static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
{
req_msg->offset32 = cpu_to_be32(resp_res |
(be32_to_cpu(req_msg->offset32) &
0xFFFFFF00));
}
static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
{
return (u8) be32_to_cpu(req_msg->offset36);
}
static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
u8 init_depth)
{
req_msg->offset36 = cpu_to_be32(init_depth |
(be32_to_cpu(req_msg->offset36) &
0xFFFFFF00));
}
static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
{
return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
}
static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
u8 resp_timeout)
{
req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
(be32_to_cpu(req_msg->offset40) &
0xFFFFFF07));
}
static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
{
u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
switch(transport_type) {
case 0: return IB_QPT_RC;
case 1: return IB_QPT_UC;
default: return 0;
}
}
static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
enum ib_qp_type qp_type)
{
switch(qp_type) {
case IB_QPT_UC:
req_msg->offset40 = cpu_to_be32((be32_to_cpu(
req_msg->offset40) &
0xFFFFFFF9) | 0x2);
break;
default:
req_msg->offset40 = cpu_to_be32(be32_to_cpu(
req_msg->offset40) &
0xFFFFFFF9);
}
}
static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
{
return be32_to_cpu(req_msg->offset40) & 0x1;
}
static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
u8 flow_ctrl)
{
req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
(be32_to_cpu(req_msg->offset40) &
0xFFFFFFFE));
}
static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
{
return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
}
static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
__be32 starting_psn)
{
req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
(be32_to_cpu(req_msg->offset44) & 0x000000FF));
}
static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
{
return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
}
static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
u8 resp_timeout)
{
req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
(be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
}
static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
{
return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
}
static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
u8 retry_count)
{
req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
(be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
}
static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
{
return req_msg->offset50 >> 4;
}
static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
{
req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
}
static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
{
return req_msg->offset50 & 0x7;
}
static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
u8 rnr_retry_count)
{
req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
(rnr_retry_count & 0x7));
}
static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
{
return req_msg->offset51 >> 4;
}
static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
u8 retries)
{
req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
}
static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
{
return (req_msg->offset51 & 0x8) >> 3;
}
static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
{
req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
((srq & 0x1) << 3));
}
static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
{
return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
}
static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
__be32 flow_label)
{
req_msg->primary_offset88 = cpu_to_be32(
(be32_to_cpu(req_msg->primary_offset88) &
0x00000FFF) |
(be32_to_cpu(flow_label) << 12));
}
static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
{
return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
}
static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
u8 rate)
{
req_msg->primary_offset88 = cpu_to_be32(
(be32_to_cpu(req_msg->primary_offset88) &
0xFFFFFFC0) | (rate & 0x3F));
}
static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
{
return (u8) (req_msg->primary_offset94 >> 4);
}
static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
{
req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
(sl << 4));
}
static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
{
return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
}
static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
u8 subnet_local)
{
req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
((subnet_local & 0x1) << 3));
}
static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
{
return (u8) (req_msg->primary_offset95 >> 3);
}
static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
u8 local_ack_timeout)
{
req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
(local_ack_timeout << 3));
}
static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
{
return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
}
static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
__be32 flow_label)
{
req_msg->alt_offset132 = cpu_to_be32(
(be32_to_cpu(req_msg->alt_offset132) &
0x00000FFF) |
(be32_to_cpu(flow_label) << 12));
}
static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
{
return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
}
static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
u8 rate)
{
req_msg->alt_offset132 = cpu_to_be32(
(be32_to_cpu(req_msg->alt_offset132) &
0xFFFFFFC0) | (rate & 0x3F));
}
static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
{
return (u8) (req_msg->alt_offset138 >> 4);
}
static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
{
req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
(sl << 4));
}
static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
{
return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
}
static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
u8 subnet_local)
{
req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
((subnet_local & 0x1) << 3));
}
static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
{
return (u8) (req_msg->alt_offset139 >> 3);
}
static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
u8 local_ack_timeout)
{
req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
(local_ack_timeout << 3));
}
/* Message REJected or MRAed */
enum cm_msg_response {
CM_MSG_RESPONSE_REQ = 0x0,
CM_MSG_RESPONSE_REP = 0x1,
CM_MSG_RESPONSE_OTHER = 0x2
};
struct cm_mra_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
/* message MRAed:2, rsvd:6 */
u8 offset8;
/* service timeout:5, rsvd:3 */
u8 offset9;
u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
{
return (u8) (mra_msg->offset8 >> 6);
}
static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
{
mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
}
static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
{
return (u8) (mra_msg->offset9 >> 3);
}
static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
u8 service_timeout)
{
mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
(service_timeout << 3));
}
struct cm_rej_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
/* message REJected:2, rsvd:6 */
u8 offset8;
/* reject info length:7, rsvd:1. */
u8 offset9;
__be16 reason;
u8 ari[IB_CM_REJ_ARI_LENGTH];
u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
{
return (u8) (rej_msg->offset8 >> 6);
}
static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
{
rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
}
static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
{
return (u8) (rej_msg->offset9 >> 1);
}
static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
u8 len)
{
rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
}
struct cm_rep_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
__be32 local_qkey;
/* local QPN:24, rsvd:8 */
__be32 offset12;
/* local EECN:24, rsvd:8 */
__be32 offset16;
/* starting PSN:24 rsvd:8 */
__be32 offset20;
u8 resp_resources;
u8 initiator_depth;
/* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
u8 offset26;
/* RNR retry count:3, SRQ:1, rsvd:5 */
u8 offset27;
__be64 local_ca_guid;
u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
{
return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
}
static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
{
rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
(be32_to_cpu(rep_msg->offset12) & 0x000000FF));
}
static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
{
return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
}
static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
__be32 starting_psn)
{
rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
(be32_to_cpu(rep_msg->offset20) & 0x000000FF));
}
static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
{
return (u8) (rep_msg->offset26 >> 3);
}
static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
u8 target_ack_delay)
{
rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
(target_ack_delay << 3));
}
static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
{
return (u8) ((rep_msg->offset26 & 0x06) >> 1);
}
static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
{
rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
((failover & 0x3) << 1));
}
static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
{
return (u8) (rep_msg->offset26 & 0x01);
}
static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
u8 flow_ctrl)
{
rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
(flow_ctrl & 0x1));
}
static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
{
return (u8) (rep_msg->offset27 >> 5);
}
static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
u8 rnr_retry_count)
{
rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
(rnr_retry_count << 5));
}
static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
{
return (u8) ((rep_msg->offset27 >> 4) & 0x1);
}
static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
{
rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
((srq & 0x1) << 4));
}
struct cm_rtu_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
struct cm_dreq_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
/* remote QPN/EECN:24, rsvd:8 */
__be32 offset8;
u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
{
return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
}
static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
{
dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
(be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
}
struct cm_drep_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
struct cm_lap_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
__be32 rsvd8;
/* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
__be32 offset12;
__be32 rsvd16;
__be16 alt_local_lid;
__be16 alt_remote_lid;
union ib_gid alt_local_gid;
union ib_gid alt_remote_gid;
/* flow label:20, rsvd:4, traffic class:8 */
__be32 offset56;
u8 alt_hop_limit;
/* rsvd:2, packet rate:6 */
u8 offset61;
/* SL:4, subnet local:1, rsvd:3 */
u8 offset62;
/* local ACK timeout:5, rsvd:3 */
u8 offset63;
u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
{
return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
}
static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
{
lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
(be32_to_cpu(lap_msg->offset12) &
0x000000FF));
}
static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
{
return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
}
static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
u8 resp_timeout)
{
lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
(be32_to_cpu(lap_msg->offset12) &
0xFFFFFF07));
}
static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
{
return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
}
static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
__be32 flow_label)
{
lap_msg->offset56 = cpu_to_be32(
(be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
(be32_to_cpu(flow_label) << 12));
}
static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
{
return (u8) be32_to_cpu(lap_msg->offset56);
}
static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
u8 traffic_class)
{
lap_msg->offset56 = cpu_to_be32(traffic_class |
(be32_to_cpu(lap_msg->offset56) &
0xFFFFFF00));
}
static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
{
return lap_msg->offset61 & 0x3F;
}
static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
u8 packet_rate)
{
lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
}
static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
{
return lap_msg->offset62 >> 4;
}
static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
{
lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
}
static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
{
return (lap_msg->offset62 >> 3) & 0x1;
}
static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
u8 subnet_local)
{
lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
(lap_msg->offset61 & 0xF7);
}
static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
{
return lap_msg->offset63 >> 3;
}
static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
u8 local_ack_timeout)
{
lap_msg->offset63 = (local_ack_timeout << 3) |
(lap_msg->offset63 & 0x07);
}
struct cm_apr_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
u8 info_length;
u8 ap_status;
u8 info[IB_CM_APR_INFO_LENGTH];
u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
struct cm_sidr_req_msg {
struct ib_mad_hdr hdr;
__be32 request_id;
__be16 pkey;
__be16 rsvd;
__be64 service_id;
u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
struct cm_sidr_rep_msg {
struct ib_mad_hdr hdr;
__be32 request_id;
u8 status;
u8 info_length;
__be16 rsvd;
/* QPN:24, rsvd:8 */
__be32 offset8;
__be64 service_id;
__be32 qkey;
u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
{
return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
}
static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
__be32 qpn)
{
sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
(be32_to_cpu(sidr_rep_msg->offset8) &
0x000000FF));
}
#endif /* CM_MSGS_H */