mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 20:23:57 +08:00
a1399f8bb0
Each channel on a connection has a separate, independent number space from which to allocate callNumber values. It is entirely possible, for example, to have a connection with four active calls, each with call number 1. Note that the callNumber values for any particular channel don't have to start at 1, but they are supposed to increment monotonically for that channel from a client's perspective and may not be reused once the call number is transmitted (until the epoch cycles all the way back round). Currently, however, call numbers are allocated on a per-connection basis and, further, are held in an rb-tree. The rb-tree is redundant as the four channel pointers in the rxrpc_connection struct are entirely capable of pointing to all the calls currently in progress on a connection. To this end, make the following changes: (1) Handle call number allocation independently per channel. (2) Get rid of the conn->calls rb-tree. This is overkill as a connection may have a maximum of four calls in progress at any one time. Use the pointers in the channels[] array instead, indexed by the channel number from the packet. (3) For each channel, save the result of the last call that was in progress on that channel in conn->channels[] so that the final ACK or ABORT packet can be replayed if necessary. Any call earlier than that is just ignored. If we've seen the next call number in a packet, the last one is most definitely defunct. (4) When generating a RESPONSE packet for a connection, the call number counter for each channel must be included in it. (5) When parsing a RESPONSE packet for a connection, the call number counters contained therein should be used to set the minimum expected call numbers on each channel. To do in future commits: (1) Replay terminal packets based on the last call stored in conn->channels[]. (2) Connections should be retired before the callNumber space on any channel runs out. (3) A server is expected to disregard or reject any new incoming call that has a call number less than the current call number counter. The call number counter for that channel must be advanced to the new call number. Note that the server cannot just require that the next call that it sees on a channel be exactly the call number counter + 1 because then there's a scenario that could cause a problem: The client transmits a packet to initiate a connection, the network goes out, the server sends an ACK (which gets lost), the client sends an ABORT (which also gets lost); the network then reconnects, the client then reuses the call number for the next call (it doesn't know the server already saw the call number), but the server thinks it already has the first packet of this call (it doesn't know that the client doesn't know that it saw the call number the first time). Signed-off-by: David Howells <dhowells@redhat.com>
193 lines
5.1 KiB
C
193 lines
5.1 KiB
C
/* /proc/net/ support for AF_RXRPC
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <net/sock.h>
|
|
#include <net/af_rxrpc.h>
|
|
#include "ar-internal.h"
|
|
|
|
static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
|
|
[RXRPC_CONN_UNUSED] = "Unused ",
|
|
[RXRPC_CONN_CLIENT] = "Client ",
|
|
[RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ",
|
|
[RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ",
|
|
[RXRPC_CONN_SERVICE] = "SvSecure",
|
|
[RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort",
|
|
[RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort",
|
|
[RXRPC_CONN_NETWORK_ERROR] = "NetError",
|
|
};
|
|
|
|
/*
|
|
* generate a list of extant and dead calls in /proc/net/rxrpc_calls
|
|
*/
|
|
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
|
|
{
|
|
read_lock(&rxrpc_call_lock);
|
|
return seq_list_start_head(&rxrpc_calls, *_pos);
|
|
}
|
|
|
|
static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
{
|
|
return seq_list_next(v, &rxrpc_calls, pos);
|
|
}
|
|
|
|
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
|
|
{
|
|
read_unlock(&rxrpc_call_lock);
|
|
}
|
|
|
|
static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
|
|
{
|
|
struct rxrpc_connection *conn;
|
|
struct rxrpc_call *call;
|
|
char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
|
|
|
|
if (v == &rxrpc_calls) {
|
|
seq_puts(seq,
|
|
"Proto Local Remote "
|
|
" SvID ConnID CallID End Use State Abort "
|
|
" UserID\n");
|
|
return 0;
|
|
}
|
|
|
|
call = list_entry(v, struct rxrpc_call, link);
|
|
|
|
sprintf(lbuff, "%pI4:%u",
|
|
&call->local->srx.transport.sin.sin_addr,
|
|
ntohs(call->local->srx.transport.sin.sin_port));
|
|
|
|
conn = call->conn;
|
|
if (conn)
|
|
sprintf(rbuff, "%pI4:%u",
|
|
&conn->params.peer->srx.transport.sin.sin_addr,
|
|
ntohs(conn->params.peer->srx.transport.sin.sin_port));
|
|
else
|
|
strcpy(rbuff, "no_connection");
|
|
|
|
seq_printf(seq,
|
|
"UDP %-22.22s %-22.22s %4x %08x %08x %s %3u"
|
|
" %-8.8s %08x %lx\n",
|
|
lbuff,
|
|
rbuff,
|
|
call->service_id,
|
|
call->cid,
|
|
call->call_id,
|
|
call->in_clientflag ? "Svc" : "Clt",
|
|
atomic_read(&call->usage),
|
|
rxrpc_call_states[call->state],
|
|
call->remote_abort ?: call->local_abort,
|
|
call->user_call_ID);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations rxrpc_call_seq_ops = {
|
|
.start = rxrpc_call_seq_start,
|
|
.next = rxrpc_call_seq_next,
|
|
.stop = rxrpc_call_seq_stop,
|
|
.show = rxrpc_call_seq_show,
|
|
};
|
|
|
|
static int rxrpc_call_seq_open(struct inode *inode, struct file *file)
|
|
{
|
|
return seq_open(file, &rxrpc_call_seq_ops);
|
|
}
|
|
|
|
const struct file_operations rxrpc_call_seq_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = rxrpc_call_seq_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = seq_release,
|
|
};
|
|
|
|
/*
|
|
* generate a list of extant virtual connections in /proc/net/rxrpc_conns
|
|
*/
|
|
static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
|
|
{
|
|
read_lock(&rxrpc_connection_lock);
|
|
return seq_list_start_head(&rxrpc_connections, *_pos);
|
|
}
|
|
|
|
static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
|
|
loff_t *pos)
|
|
{
|
|
return seq_list_next(v, &rxrpc_connections, pos);
|
|
}
|
|
|
|
static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
|
|
{
|
|
read_unlock(&rxrpc_connection_lock);
|
|
}
|
|
|
|
static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
|
|
{
|
|
struct rxrpc_connection *conn;
|
|
char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
|
|
|
|
if (v == &rxrpc_connections) {
|
|
seq_puts(seq,
|
|
"Proto Local Remote "
|
|
" SvID ConnID End Use State Key "
|
|
" Serial ISerial\n"
|
|
);
|
|
return 0;
|
|
}
|
|
|
|
conn = list_entry(v, struct rxrpc_connection, link);
|
|
|
|
sprintf(lbuff, "%pI4:%u",
|
|
&conn->params.local->srx.transport.sin.sin_addr,
|
|
ntohs(conn->params.local->srx.transport.sin.sin_port));
|
|
|
|
sprintf(rbuff, "%pI4:%u",
|
|
&conn->params.peer->srx.transport.sin.sin_addr,
|
|
ntohs(conn->params.peer->srx.transport.sin.sin_port));
|
|
|
|
seq_printf(seq,
|
|
"UDP %-22.22s %-22.22s %4x %08x %s %3u"
|
|
" %s %08x %08x %08x\n",
|
|
lbuff,
|
|
rbuff,
|
|
conn->params.service_id,
|
|
conn->proto.cid,
|
|
rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
|
|
atomic_read(&conn->usage),
|
|
rxrpc_conn_states[conn->state],
|
|
key_serial(conn->params.key),
|
|
atomic_read(&conn->serial),
|
|
atomic_read(&conn->hi_serial));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct seq_operations rxrpc_connection_seq_ops = {
|
|
.start = rxrpc_connection_seq_start,
|
|
.next = rxrpc_connection_seq_next,
|
|
.stop = rxrpc_connection_seq_stop,
|
|
.show = rxrpc_connection_seq_show,
|
|
};
|
|
|
|
|
|
static int rxrpc_connection_seq_open(struct inode *inode, struct file *file)
|
|
{
|
|
return seq_open(file, &rxrpc_connection_seq_ops);
|
|
}
|
|
|
|
const struct file_operations rxrpc_connection_seq_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = rxrpc_connection_seq_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = seq_release,
|
|
};
|