[INET]: Remove struct dst_entry *dst from request_sock_ops.rtx_syn_ack.

It looks like dst parameter is used in this API due to historical
reasons.  Actually, it is really used in the direct call to
tcp_v4_send_synack only.  So, create a wrapper for tcp_v4_send_synack
and remove dst from rtx_syn_ack.

Signed-off-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Denis V. Lunev 2008-02-29 11:43:03 -08:00 committed by David S. Miller
parent 58fbbed4fb
commit fd80eb942a
8 changed files with 55 additions and 57 deletions

View File

@ -31,8 +31,7 @@ struct request_sock_ops {
int obj_size;
struct kmem_cache *slab;
int (*rtx_syn_ack)(struct sock *sk,
struct request_sock *req,
struct dst_entry *dst);
struct request_sock *req);
void (*send_ack)(struct sk_buff *skb,
struct request_sock *req);
void (*send_reset)(struct sock *sk,

View File

@ -471,15 +471,14 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk,
return &rt->u.dst;
}
static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
struct dst_entry *dst)
static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
{
int err = -1;
struct sk_buff *skb;
struct dst_entry *dst;
/* First, grab a route. */
if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
dst = inet_csk_route_req(sk, req);
if (dst == NULL)
goto out;
skb = dccp_make_response(sk, dst, req);
@ -620,7 +619,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
dreq->dreq_iss = dccp_v4_init_sequence(skb);
dreq->dreq_service = service;
if (dccp_v4_send_response(sk, req, NULL))
if (dccp_v4_send_response(sk, req))
goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);

View File

@ -224,8 +224,7 @@ out:
}
static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
struct dst_entry *dst)
static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
{
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
@ -234,6 +233,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
struct in6_addr *final_p = NULL, final;
struct flowi fl;
int err = -1;
struct dst_entry *dst;
memset(&fl, 0, sizeof(fl));
fl.proto = IPPROTO_DCCP;
@ -245,29 +245,27 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
fl.fl_ip_sport = inet_sk(sk)->sport;
security_req_classify_flow(req, &fl);
if (dst == NULL) {
opt = np->opt;
opt = np->opt;
if (opt != NULL && opt->srcrt != NULL) {
const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
if (opt != NULL && opt->srcrt != NULL) {
const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
ipv6_addr_copy(&final, &fl.fl6_dst);
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
final_p = &final;
}
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
goto done;
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
err = xfrm_lookup(&dst, &fl, sk, 0);
if (err < 0)
goto done;
ipv6_addr_copy(&final, &fl.fl6_dst);
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
final_p = &final;
}
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
goto done;
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
err = xfrm_lookup(&dst, &fl, sk, 0);
if (err < 0)
goto done;
skb = dccp_make_response(sk, dst, req);
if (skb != NULL) {
struct dccp_hdr *dh = dccp_hdr(skb);
@ -448,7 +446,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
dreq->dreq_iss = dccp_v6_init_sequence(skb);
dreq->dreq_service = service;
if (dccp_v6_send_response(sk, req, NULL))
if (dccp_v6_send_response(sk, req))
goto drop_and_free;
inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);

View File

@ -216,7 +216,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
* counter (backoff, monitored by dccp_response_timer).
*/
req->retrans++;
req->rsk_ops->rtx_syn_ack(sk, req, NULL);
req->rsk_ops->rtx_syn_ack(sk, req);
}
/* Network Duplicate, discard packet */
return NULL;

View File

@ -463,7 +463,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
if (time_after_eq(now, req->expires)) {
if ((req->retrans < thresh ||
(inet_rsk(req)->acked && req->retrans < max_retries))
&& !req->rsk_ops->rtx_syn_ack(parent, req, NULL)) {
&& !req->rsk_ops->rtx_syn_ack(parent, req)) {
unsigned long timeo;
if (req->retrans++ == 0)

View File

@ -723,8 +723,8 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
* This still operates on a request_sock only, not on a big
* socket.
*/
static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
struct dst_entry *dst)
static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
struct dst_entry *dst)
{
const struct inet_request_sock *ireq = inet_rsk(req);
int err = -1;
@ -732,7 +732,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
/* First, grab a route. */
if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
goto out;
return -1;
skb = tcp_make_synack(sk, dst, req);
@ -751,11 +751,15 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
err = net_xmit_eval(err);
}
out:
dst_release(dst);
return err;
}
static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
{
return __tcp_v4_send_synack(sk, req, NULL);
}
/*
* IPv4 request_sock destructor.
*/
@ -1380,7 +1384,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
}
tcp_rsk(req)->snt_isn = isn;
if (tcp_v4_send_synack(sk, req, dst))
if (__tcp_v4_send_synack(sk, req, dst))
goto drop_and_free;
if (want_cookie) {

View File

@ -536,7 +536,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
* Enforce "SYN-ACK" according to figure 8, figure 6
* of RFC793, fixed by RFC1122.
*/
req->rsk_ops->rtx_syn_ack(sk, req, NULL);
req->rsk_ops->rtx_syn_ack(sk, req);
return NULL;
}

View File

@ -455,8 +455,7 @@ out:
}
static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
struct dst_entry *dst)
static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
{
struct inet6_request_sock *treq = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
@ -464,6 +463,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
struct ipv6_txoptions *opt = NULL;
struct in6_addr * final_p = NULL, final;
struct flowi fl;
struct dst_entry *dst;
int err = -1;
memset(&fl, 0, sizeof(fl));
@ -476,24 +476,22 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
fl.fl_ip_sport = inet_sk(sk)->sport;
security_req_classify_flow(req, &fl);
if (dst == NULL) {
opt = np->opt;
if (opt && opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
ipv6_addr_copy(&final, &fl.fl6_dst);
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
final_p = &final;
}
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
goto done;
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
goto done;
opt = np->opt;
if (opt && opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
ipv6_addr_copy(&final, &fl.fl6_dst);
ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
final_p = &final;
}
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
goto done;
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
goto done;
skb = tcp_make_synack(sk, dst, req);
if (skb) {
struct tcphdr *th = tcp_hdr(skb);
@ -1294,7 +1292,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
security_inet_conn_request(sk, skb, req);
if (tcp_v6_send_synack(sk, req, NULL))
if (tcp_v6_send_synack(sk, req))
goto drop;
inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);