2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 14:43:58 +08:00
linux-next/include/linux/netlink.h

269 lines
8.2 KiB
C
Raw Normal View History

#ifndef __LINUX_NETLINK_H
#define __LINUX_NETLINK_H
#include <linux/socket.h> /* for sa_family_t */
#include <linux/types.h>
#define NETLINK_ROUTE 0 /* Routing/device hook */
#define NETLINK_UNUSED 1 /* Unused number */
#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
#define NETLINK_FIREWALL 3 /* Firewalling hook */
#define NETLINK_INET_DIAG 4 /* INET socket monitoring */
#define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */
#define NETLINK_XFRM 6 /* ipsec */
#define NETLINK_SELINUX 7 /* SELinux event notifications */
#define NETLINK_ISCSI 8 /* Open-iSCSI */
#define NETLINK_AUDIT 9 /* auditing */
#define NETLINK_FIB_LOOKUP 10
[NET]: Add netlink connector. Kernel connector - new userspace <-> kernel space easy to use communication module which implements easy to use bidirectional message bus using netlink as it's backend. Connector was created to eliminate complex skb handling both in send and receive message bus direction. Connector driver adds possibility to connect various agents using as one of it's backends netlink based network. One must register callback and identifier. When driver receives special netlink message with appropriate identifier, appropriate callback will be called. From the userspace point of view it's quite straightforward: socket(); bind(); send(); recv(); But if kernelspace want to use full power of such connections, driver writer must create special sockets, must know about struct sk_buff handling... Connector allows any kernelspace agents to use netlink based networking for inter-process communication in a significantly easier way: int cn_add_callback(struct cb_id *id, char *name, void (*callback) (void *)); void cn_netlink_send(struct cn_msg *msg, u32 __groups, int gfp_mask); struct cb_id { __u32 idx; __u32 val; }; idx and val are unique identifiers which must be registered in connector.h for in-kernel usage. void (*callback) (void *) - is a callback function which will be called when message with above idx.val will be received by connector core. Using connector completely hides low-level transport layer from it's users. Connector uses new netlink ability to have many groups in one socket. [ Incorporating many cleanups and fixes by myself and Andrew Morton -DaveM ] Signed-off-by: Evgeniy Polyakov <johnpol@2ka.mipt.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-12 10:15:07 +08:00
#define NETLINK_CONNECTOR 11
#define NETLINK_NETFILTER 12 /* netfilter subsystem */
#define NETLINK_IP6_FW 13
#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */
#define NETLINK_GENERIC 16
[SCSI] SCSI and FC Transport: add netlink support for posting of transport events This patch formally adds support for the posting of FC events via netlink. It is a followup to the original RFC at: http://marc.theaimsgroup.com/?l=linux-scsi&m=114530667923464&w=2 and the initial posting at: http://marc.theaimsgroup.com/?l=linux-scsi&m=115507374832500&w=2 The patch has been updated to optimize the send path, per the discussions in the initial posting. Per discussions at the Storage Summit and at OLS, we are to use netlink for async events from transports. Also per discussions, to avoid a netlink protocol per transport, I've create a single NETLINK_SCSITRANSPORT protocol, which can then be used by all transports. This patch: - Creates new files scsi_netlink.c and scsi_netlink.h, which contains the single and shared definitions for the SCSI Transport. It is tied into the base SCSI subsystem intialization. Contains a single interface routine, scsi_send_transport_event(), for a transport to send an event (via multicast to a protocol specific group). - Creates a new scsi_netlink_fc.h file, which contains the FC netlink event messages - Adds 3 new routines to the fc transport: fc_get_event_number() - to get a FC event # fc_host_post_event() - to send a simple FC event (32 bits of data) fc_host_post_vendor_event() - to send a Vendor unique event, with arbitrary amounts of data. Note: the separation of event number allows for a LLD to send a standard event, followed by vendor-specific data for the event. Note: This patch assumes 2 prior fc transport patches have been installed: http://marc.theaimsgroup.com/?l=linux-scsi&m=115555807316329&w=2 http://marc.theaimsgroup.com/?l=linux-scsi&m=115581614930261&w=2 Sorry - next time I'll do something like making these individual patches of the same posting when I know they'll be posted closely together. Signed-off-by: James Smart <James.Smart@emulex.com> Tidy up configuration not to make SCSI always select NET Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
2006-08-19 05:30:09 +08:00
/* leave room for NETLINK_DM (DM Events) */
#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */
[PATCH] eCryptfs: Public key transport mechanism This is the transport code for public key functionality in eCryptfs. It manages encryption/decryption request queues with a transport mechanism. Currently, netlink is the only implemented transport. Each inode has a unique File Encryption Key (FEK). Under passphrase, a File Encryption Key Encryption Key (FEKEK) is generated from a salt/passphrase combo on mount. This FEKEK encrypts each FEK and writes it into the header of each file using the packet format specified in RFC 2440. This is all symmetric key encryption, so it can all be done via the kernel crypto API. These new patches introduce public key encryption of the FEK. There is no asymmetric key encryption support in the kernel crypto API, so eCryptfs pushes the FEK encryption and decryption out to a userspace daemon. After considering our requirements and determining the complexity of using various transport mechanisms, we settled on netlink for this communication. eCryptfs stores authentication tokens into the kernel keyring. These tokens correlate with individual keys. For passphrase mode of operation, the authentication token contains the symmetric FEKEK. For public key, the authentication token contains a PKI type and an opaque data blob managed by individual PKI modules in userspace. Each user who opens a file under an eCryptfs partition mounted in public key mode must be running a daemon. That daemon has the user's credentials and has access to all of the keys to which the user should have access. The daemon, when started, initializes the pluggable PKI modules available on the system and registers itself with the eCryptfs kernel module. Userspace utilities register public key authentication tokens into the user session keyring. These authentication tokens correlate key signatures with PKI modules and PKI blobs. The PKI blobs contain PKI-specific information necessary for the PKI module to carry out asymmetric key encryption and decryption. When the eCryptfs module parses the header of an existing file and finds a Tag 1 (Public Key) packet (see RFC 2440), it reads in the public key identifier (signature). The asymmetrically encrypted FEK is in the Tag 1 packet; eCryptfs puts together a decrypt request packet containing the signature and the encrypted FEK, then it passes it to the daemon registered for the current->euid via a netlink unicast to the PID of the daemon, which was registered at the time the daemon was started by the user. The daemon actually just makes calls to libecryptfs, which implements request packet parsing and manages PKI modules. libecryptfs grabs the public key authentication token for the given signature from the user session keyring. This auth tok tells libecryptfs which PKI module should receive the request. libecryptfs then makes a decrypt() call to the PKI module, and it passes along the PKI block from the auth tok. The PKI uses the blob to figure out how it should decrypt the data passed to it; it performs the decryption and passes the decrypted data back to libecryptfs. libecryptfs then puts together a reply packet with the decrypted FEK and passes that back to the eCryptfs module. The eCryptfs module manages these request callouts to userspace code via message context structs. The module maintains an array of message context structs and places the elements of the array on two lists: a free and an allocated list. When eCryptfs wants to make a request, it moves a msg ctx from the free list to the allocated list, sets its state to pending, and fires off the message to the user's registered daemon. When eCryptfs receives a netlink message (via the callback), it correlates the msg ctx struct in the alloc list with the data in the message itself. The msg->index contains the offset of the array of msg ctx structs. It verifies that the registered daemon PID is the same as the PID of the process that sent the message. It also validates a sequence number between the received packet and the msg ctx. Then, it copies the contents of the message (the reply packet) into the msg ctx struct, sets the state in the msg ctx to done, and wakes up the process that was sleeping while waiting for the reply. The sleeping process was whatever was performing the sys_open(). This process originally called ecryptfs_send_message(); it is now in ecryptfs_wait_for_response(). When it wakes up and sees that the msg ctx state was set to done, it returns a pointer to the message contents (the reply packet) and returns. If all went well, this packet contains the decrypted FEK, which is then copied into the crypt_stat struct, and life continues as normal. The case for creation of a new file is very similar, only instead of a decrypt request, eCryptfs sends out an encrypt request. > - We have a great clod of key mangement code in-kernel. Why is that > not suitable (or growable) for public key management? eCryptfs uses Howells' keyring to store persistent key data and PKI state information. It defers public key cryptographic transformations to userspace code. The userspace data manipulation request really is orthogonal to key management in and of itself. What eCryptfs basically needs is a secure way to communicate with a particular daemon for a particular task doing a syscall, based on the UID. Nothing running under another UID should be able to access that channel of communication. > - Is it appropriate that new infrastructure for public key > management be private to a particular fs? The messaging.c file contains a lot of code that, perhaps, could be extracted into a separate kernel service. In essence, this would be a sort of request/reply mechanism that would involve a userspace daemon. I am not aware of anything that does quite what eCryptfs does, so I was not aware of any existing tools to do just what we wanted. > What happens if one of these daemons exits without sending a quit > message? There is a stale uid<->pid association in the hash table for that user. When the user registers a new daemon, eCryptfs cleans up the old association and generates a new one. See ecryptfs_process_helo(). > - _why_ does it use netlink? Netlink provides the transport mechanism that would minimize the complexity of the implementation, given that we can have multiple daemons (one per user). I explored the possibility of using relayfs, but that would involve having to introduce control channels and a protocol for creating and tearing down channels for the daemons. We do not have to worry about any of that with netlink. Signed-off-by: Michael Halcrow <mhalcrow@us.ibm.com> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-12 16:53:43 +08:00
#define NETLINK_ECRYPTFS 19
#define MAX_LINKS 32
struct net;
struct sockaddr_nl
{
sa_family_t nl_family; /* AF_NETLINK */
unsigned short nl_pad; /* zero */
__u32 nl_pid; /* port ID */
__u32 nl_groups; /* multicast groups mask */
};
struct nlmsghdr
{
__u32 nlmsg_len; /* Length of message including header */
__u16 nlmsg_type; /* Message content */
__u16 nlmsg_flags; /* Additional flags */
__u32 nlmsg_seq; /* Sequence number */
__u32 nlmsg_pid; /* Sending process port ID */
};
/* Flags values */
#define NLM_F_REQUEST 1 /* It is request message. */
#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */
#define NLM_F_ECHO 8 /* Echo this request */
/* Modifiers to GET request */
#define NLM_F_ROOT 0x100 /* specify tree root */
#define NLM_F_MATCH 0x200 /* return all matching */
#define NLM_F_ATOMIC 0x400 /* atomic GET */
#define NLM_F_DUMP (NLM_F_ROOT|NLM_F_MATCH)
/* Modifiers to NEW request */
#define NLM_F_REPLACE 0x100 /* Override existing */
#define NLM_F_EXCL 0x200 /* Do not touch, if it exists */
#define NLM_F_CREATE 0x400 /* Create, if it does not exist */
#define NLM_F_APPEND 0x800 /* Add to end of list */
/*
4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL
4.4BSD CHANGE NLM_F_REPLACE
True CHANGE NLM_F_CREATE|NLM_F_REPLACE
Append NLM_F_CREATE
Check NLM_F_EXCL
*/
#define NLMSG_ALIGNTO 4
#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN))
#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len))
#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
(struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len)))
#define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len <= (len))
#define NLMSG_PAYLOAD(nlh,len) ((nlh)->nlmsg_len - NLMSG_SPACE((len)))
#define NLMSG_NOOP 0x1 /* Nothing. */
#define NLMSG_ERROR 0x2 /* Error */
#define NLMSG_DONE 0x3 /* End of a dump */
#define NLMSG_OVERRUN 0x4 /* Data lost */
#define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */
struct nlmsgerr
{
int error;
struct nlmsghdr msg;
};
#define NETLINK_ADD_MEMBERSHIP 1
#define NETLINK_DROP_MEMBERSHIP 2
#define NETLINK_PKTINFO 3
struct nl_pktinfo
{
__u32 group;
};
#define NET_MAJOR 36 /* Major 36 is reserved for networking */
enum {
NETLINK_UNCONNECTED = 0,
NETLINK_CONNECTED,
};
/*
* <------- NLA_HDRLEN ------> <-- NLA_ALIGN(payload)-->
* +---------------------+- - -+- - - - - - - - - -+- - -+
* | Header | Pad | Payload | Pad |
* | (struct nlattr) | ing | | ing |
* +---------------------+- - -+- - - - - - - - - -+- - -+
* <-------------- nlattr->nla_len -------------->
*/
struct nlattr
{
__u16 nla_len;
__u16 nla_type;
};
/*
* nla_type (16 bits)
* +---+---+-------------------------------+
* | N | O | Attribute Type |
* +---+---+-------------------------------+
* N := Carries nested attributes
* O := Payload stored in network byte order
*
* Note: The N and O flag are mutually exclusive.
*/
#define NLA_F_NESTED (1 << 15)
#define NLA_F_NET_BYTEORDER (1 << 14)
#define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER)
#define NLA_ALIGNTO 4
#define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1))
#define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr)))
#ifdef __KERNEL__
#include <linux/capability.h>
#include <linux/skbuff.h>
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
}
struct netlink_skb_parms
{
struct ucred creds; /* Skb credentials */
__u32 pid;
__u32 dst_group;
kernel_cap_t eff_cap;
__u32 loginuid; /* Login (audit) uid */
__u32 sid; /* SELinux security id */
};
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
extern struct sock *netlink_kernel_create(struct net *net,
int unit,unsigned int groups,
void (*input)(struct sk_buff *skb),
struct mutex *cb_mutex,
struct module *module);
extern void netlink_kernel_release(struct sock *sk);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
__u32 group, gfp_t allocation);
extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
extern int netlink_register_notifier(struct notifier_block *nb);
extern int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
long *timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
/*
* skb should fit one page. This choice is good for headerless malloc.
* But we should limit to 8K so that userspace does not have to
* use enormous buffer sizes on recvmsg() calls just to avoid
* MSG_TRUNC when PAGE_SIZE is very large.
*/
#if PAGE_SIZE < 8192UL
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE)
#else
#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL)
#endif
#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
struct netlink_callback
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
int (*dump)(struct sk_buff * skb, struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
int family;
long args[6];
};
struct netlink_notify
{
struct net *net;
int pid;
int protocol;
};
static __inline__ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
{
struct nlmsghdr *nlh;
int size = NLMSG_LENGTH(len);
nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = flags;
nlh->nlmsg_pid = pid;
nlh->nlmsg_seq = seq;
memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
return nlh;
}
#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
({ if (unlikely(skb_tailroom(skb) < (int)NLMSG_SPACE(len))) \
goto nlmsg_failure; \
__nlmsg_put(skb, pid, seq, type, len, flags); })
#define NLMSG_PUT(skb, pid, seq, type, len) \
NLMSG_NEW(skb, pid, seq, type, len, 0)
extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct nlmsghdr *nlh,
int (*dump)(struct sk_buff *skb, struct netlink_callback*),
int (*done)(struct netlink_callback*));
#define NL_NONROOT_RECV 0x1
#define NL_NONROOT_SEND 0x2
extern void netlink_set_nonroot(int protocol, unsigned flag);
#endif /* __KERNEL__ */
#endif /* __LINUX_NETLINK_H */