2019-05-19 20:08:20 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-07-14 10:49:37 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/kernel.h>
|
2015-08-27 14:46:50 +08:00
|
|
|
#include <net/dst_metadata.h>
|
2014-07-14 10:49:37 +08:00
|
|
|
#include <net/udp.h>
|
|
|
|
#include <net/udp_tunnel.h>
|
|
|
|
|
2014-09-17 08:31:16 +08:00
|
|
|
int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
|
|
|
|
struct socket **sockp)
|
2014-07-14 10:49:37 +08:00
|
|
|
{
|
2014-09-17 08:31:17 +08:00
|
|
|
int err;
|
2014-07-14 10:49:37 +08:00
|
|
|
struct socket *sock = NULL;
|
2014-09-17 08:31:16 +08:00
|
|
|
struct sockaddr_in udp_addr;
|
2014-07-14 10:49:37 +08:00
|
|
|
|
2015-05-09 10:10:31 +08:00
|
|
|
err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock);
|
2014-09-17 08:31:16 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto error;
|
2014-07-14 10:49:37 +08:00
|
|
|
|
2018-12-03 17:54:38 +08:00
|
|
|
if (cfg->bind_ifindex) {
|
2020-05-31 05:09:00 +08:00
|
|
|
err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true);
|
2018-12-03 17:54:38 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2014-09-17 08:31:16 +08:00
|
|
|
udp_addr.sin_family = AF_INET;
|
|
|
|
udp_addr.sin_addr = cfg->local_ip;
|
|
|
|
udp_addr.sin_port = cfg->local_udp_port;
|
|
|
|
err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
|
|
|
|
sizeof(udp_addr));
|
|
|
|
if (err < 0)
|
|
|
|
goto error;
|
2014-07-14 10:49:37 +08:00
|
|
|
|
2014-09-17 08:31:16 +08:00
|
|
|
if (cfg->peer_udp_port) {
|
2014-07-14 10:49:37 +08:00
|
|
|
udp_addr.sin_family = AF_INET;
|
2014-09-17 08:31:16 +08:00
|
|
|
udp_addr.sin_addr = cfg->peer_ip;
|
|
|
|
udp_addr.sin_port = cfg->peer_udp_port;
|
|
|
|
err = kernel_connect(sock, (struct sockaddr *)&udp_addr,
|
|
|
|
sizeof(udp_addr), 0);
|
2014-07-14 10:49:37 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2014-09-17 08:31:16 +08:00
|
|
|
sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
|
2014-07-14 10:49:37 +08:00
|
|
|
|
|
|
|
*sockp = sock;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
if (sock) {
|
|
|
|
kernel_sock_shutdown(sock, SHUT_RDWR);
|
2015-05-09 10:10:31 +08:00
|
|
|
sock_release(sock);
|
2014-07-14 10:49:37 +08:00
|
|
|
}
|
|
|
|
*sockp = NULL;
|
|
|
|
return err;
|
|
|
|
}
|
2014-09-17 08:31:16 +08:00
|
|
|
EXPORT_SYMBOL(udp_sock_create4);
|
2014-07-14 10:49:37 +08:00
|
|
|
|
2014-09-17 08:31:17 +08:00
|
|
|
void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
|
|
|
|
struct udp_tunnel_sock_cfg *cfg)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
|
|
|
|
/* Disable multicast loopback */
|
2023-08-16 16:15:39 +08:00
|
|
|
inet_clear_bit(MC_LOOP, sk);
|
2014-09-17 08:31:17 +08:00
|
|
|
|
|
|
|
/* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
|
2015-01-06 05:56:14 +08:00
|
|
|
inet_inc_convert_csum(sk);
|
2014-09-17 08:31:17 +08:00
|
|
|
|
|
|
|
rcu_assign_sk_user_data(sk, cfg->sk_user_data);
|
|
|
|
|
|
|
|
udp_sk(sk)->encap_type = cfg->encap_type;
|
|
|
|
udp_sk(sk)->encap_rcv = cfg->encap_rcv;
|
2022-08-26 22:39:28 +08:00
|
|
|
udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv;
|
udp: Handle ICMP errors for tunnels with same destination port on both endpoints
For both IPv4 and IPv6, if we can't match errors to a socket, try
tunnels before ignoring them. Look up a socket with the original source
and destination ports as found in the UDP packet inside the ICMP payload,
this will work for tunnels that force the same destination port for both
endpoints, i.e. VXLAN and GENEVE.
Actually, lwtunnels could break this assumption if they are configured by
an external control plane to have different destination ports on the
endpoints: in this case, we won't be able to trace ICMP messages back to
them.
For IPv6 redirect messages, call ip6_redirect() directly with the output
interface argument set to the interface we received the packet from (as
it's the very interface we should build the exception on), otherwise the
new nexthop will be rejected. There's no such need for IPv4.
Tunnels can now export an encap_err_lookup() operation that indicates a
match. Pass the packet to the lookup function, and if the tunnel driver
reports a matching association, continue with regular ICMP error handling.
v2:
- Added newline between network and transport header sets in
__udp{4,6}_lib_err_encap() (David Miller)
- Removed redundant skb_reset_network_header(skb); in
__udp4_lib_err_encap()
- Removed redundant reassignment of iph in __udp4_lib_err_encap()
(Sabrina Dubroca)
- Edited comment to __udp{4,6}_lib_err_encap() to reflect the fact this
won't work with lwtunnels configured to use asymmetric ports. By the way,
it's VXLAN, not VxLAN (Jiri Benc)
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-08 19:19:14 +08:00
|
|
|
udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
|
2014-09-17 08:31:17 +08:00
|
|
|
udp_sk(sk)->encap_destroy = cfg->encap_destroy;
|
2016-04-05 23:22:52 +08:00
|
|
|
udp_sk(sk)->gro_receive = cfg->gro_receive;
|
|
|
|
udp_sk(sk)->gro_complete = cfg->gro_complete;
|
2014-09-17 08:31:17 +08:00
|
|
|
|
2023-09-12 17:17:27 +08:00
|
|
|
udp_tunnel_encap_enable(sk);
|
2014-09-17 08:31:17 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
|
|
|
|
|
2016-06-17 03:20:52 +08:00
|
|
|
void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
|
|
|
|
unsigned short type)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct udp_tunnel_info ti;
|
|
|
|
|
|
|
|
ti.type = type;
|
|
|
|
ti.sa_family = sk->sk_family;
|
|
|
|
ti.port = inet_sk(sk)->inet_sport;
|
|
|
|
|
2021-01-07 05:06:34 +08:00
|
|
|
udp_tunnel_nic_add_port(dev, &ti);
|
2016-06-17 03:20:52 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port);
|
|
|
|
|
2017-07-21 18:49:30 +08:00
|
|
|
void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
|
|
|
|
unsigned short type)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct udp_tunnel_info ti;
|
|
|
|
|
|
|
|
ti.type = type;
|
|
|
|
ti.sa_family = sk->sk_family;
|
|
|
|
ti.port = inet_sk(sk)->inet_sport;
|
|
|
|
|
2021-01-07 05:06:34 +08:00
|
|
|
udp_tunnel_nic_del_port(dev, &ti);
|
2017-07-21 18:49:30 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port);
|
|
|
|
|
2016-06-17 03:20:52 +08:00
|
|
|
/* Notify netdevs that UDP port started listening */
|
|
|
|
void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct net *net = sock_net(sk);
|
|
|
|
struct udp_tunnel_info ti;
|
|
|
|
struct net_device *dev;
|
|
|
|
|
|
|
|
ti.type = type;
|
|
|
|
ti.sa_family = sk->sk_family;
|
|
|
|
ti.port = inet_sk(sk)->inet_sport;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2016-06-17 03:23:12 +08:00
|
|
|
for_each_netdev_rcu(net, dev) {
|
2021-01-07 05:06:34 +08:00
|
|
|
udp_tunnel_nic_add_port(dev, &ti);
|
2016-06-17 03:23:12 +08:00
|
|
|
}
|
2016-06-17 03:20:52 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port);
|
|
|
|
|
|
|
|
/* Notify netdevs that UDP port is no more listening */
|
|
|
|
void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct net *net = sock_net(sk);
|
|
|
|
struct udp_tunnel_info ti;
|
|
|
|
struct net_device *dev;
|
|
|
|
|
|
|
|
ti.type = type;
|
|
|
|
ti.sa_family = sk->sk_family;
|
|
|
|
ti.port = inet_sk(sk)->inet_sport;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2016-06-17 03:23:12 +08:00
|
|
|
for_each_netdev_rcu(net, dev) {
|
2021-01-07 05:06:34 +08:00
|
|
|
udp_tunnel_nic_del_port(dev, &ti);
|
2016-06-17 03:23:12 +08:00
|
|
|
}
|
2016-06-17 03:20:52 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port);
|
|
|
|
|
2015-12-25 06:34:54 +08:00
|
|
|
void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
|
|
|
|
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
|
|
|
|
__be16 df, __be16 src_port, __be16 dst_port,
|
|
|
|
bool xnet, bool nocheck)
|
2014-09-17 08:31:17 +08:00
|
|
|
{
|
|
|
|
struct udphdr *uh;
|
|
|
|
|
|
|
|
__skb_push(skb, sizeof(*uh));
|
|
|
|
skb_reset_transport_header(skb);
|
|
|
|
uh = udp_hdr(skb);
|
|
|
|
|
|
|
|
uh->dest = dst_port;
|
|
|
|
uh->source = src_port;
|
|
|
|
uh->len = htons(skb->len);
|
|
|
|
|
2016-02-22 07:58:05 +08:00
|
|
|
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
|
|
|
|
2015-01-21 03:23:04 +08:00
|
|
|
udp_set_csum(nocheck, skb, src, dst, skb->len);
|
2014-09-17 08:31:17 +08:00
|
|
|
|
2015-12-25 06:34:54 +08:00
|
|
|
iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
|
2014-09-17 08:31:17 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
|
|
|
|
|
|
|
|
void udp_tunnel_sock_release(struct socket *sock)
|
|
|
|
{
|
|
|
|
rcu_assign_sk_user_data(sock->sk, NULL);
|
2022-12-08 20:04:52 +08:00
|
|
|
synchronize_rcu();
|
2014-09-17 08:31:17 +08:00
|
|
|
kernel_sock_shutdown(sock, SHUT_RDWR);
|
2015-05-09 10:10:31 +08:00
|
|
|
sock_release(sock);
|
2014-09-17 08:31:17 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
|
|
|
|
|
2015-08-27 14:46:50 +08:00
|
|
|
struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
|
ip_tunnel: convert __be16 tunnel flags to bitmaps
Historically, tunnel flags like TUNNEL_CSUM or TUNNEL_ERSPAN_OPT
have been defined as __be16. Now all of those 16 bits are occupied
and there's no more free space for new flags.
It can't be simply switched to a bigger container with no
adjustments to the values, since it's an explicit Endian storage,
and on LE systems (__be16)0x0001 equals to
(__be64)0x0001000000000000.
We could probably define new 64-bit flags depending on the
Endianness, i.e. (__be64)0x0001 on BE and (__be64)0x00010000... on
LE, but that would introduce an Endianness dependency and spawn a
ton of Sparse warnings. To mitigate them, all of those places which
were adjusted with this change would be touched anyway, so why not
define stuff properly if there's no choice.
Define IP_TUNNEL_*_BIT counterparts as a bit number instead of the
value already coded and a fistful of <16 <-> bitmap> converters and
helpers. The two flags which have a different bit position are
SIT_ISATAP_BIT and VTI_ISVTI_BIT, as they were defined not as
__cpu_to_be16(), but as (__force __be16), i.e. had different
positions on LE and BE. Now they both have strongly defined places.
Change all __be16 fields which were used to store those flags, to
IP_TUNNEL_DECLARE_FLAGS() -> DECLARE_BITMAP(__IP_TUNNEL_FLAG_NUM) ->
unsigned long[1] for now, and replace all TUNNEL_* occurrences to
their bitmap counterparts. Use the converters in the places which talk
to the userspace, hardware (NFP) or other hosts (GRE header). The rest
must explicitly use the new flags only. This must be done at once,
otherwise there will be too many conversions throughout the code in
the intermediate commits.
Finally, disable the old __be16 flags for use in the kernel code
(except for the two 'irregular' flags mentioned above), to prevent
any accidental (mis)use of them. For the userspace, nothing is
changed, only additions were made.
Most noticeable bloat-o-meter difference (.text):
vmlinux: 307/-1 (306)
gre.ko: 62/0 (62)
ip_gre.ko: 941/-217 (724) [*]
ip_tunnel.ko: 390/-900 (-510) [**]
ip_vti.ko: 138/0 (138)
ip6_gre.ko: 534/-18 (516) [*]
ip6_tunnel.ko: 118/-10 (108)
[*] gre_flags_to_tnl_flags() grew, but still is inlined
[**] ip_tunnel_find() got uninlined, hence such decrease
The average code size increase in non-extreme case is 100-200 bytes
per module, mostly due to sizeof(long) > sizeof(__be16), as
%__IP_TUNNEL_FLAG_NUM is less than %BITS_PER_LONG and the compilers
are able to expand the majority of bitmap_*() calls here into direct
operations on scalars.
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-27 23:23:53 +08:00
|
|
|
const unsigned long *flags,
|
|
|
|
__be64 tunnel_id, int md_size)
|
2015-08-27 14:46:50 +08:00
|
|
|
{
|
|
|
|
struct metadata_dst *tun_dst;
|
|
|
|
struct ip_tunnel_info *info;
|
|
|
|
|
|
|
|
if (family == AF_INET)
|
|
|
|
tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size);
|
|
|
|
else
|
|
|
|
tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size);
|
|
|
|
if (!tun_dst)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
info = &tun_dst->u.tun_info;
|
|
|
|
info->key.tp_src = udp_hdr(skb)->source;
|
|
|
|
info->key.tp_dst = udp_hdr(skb)->dest;
|
|
|
|
if (udp_hdr(skb)->check)
|
ip_tunnel: convert __be16 tunnel flags to bitmaps
Historically, tunnel flags like TUNNEL_CSUM or TUNNEL_ERSPAN_OPT
have been defined as __be16. Now all of those 16 bits are occupied
and there's no more free space for new flags.
It can't be simply switched to a bigger container with no
adjustments to the values, since it's an explicit Endian storage,
and on LE systems (__be16)0x0001 equals to
(__be64)0x0001000000000000.
We could probably define new 64-bit flags depending on the
Endianness, i.e. (__be64)0x0001 on BE and (__be64)0x00010000... on
LE, but that would introduce an Endianness dependency and spawn a
ton of Sparse warnings. To mitigate them, all of those places which
were adjusted with this change would be touched anyway, so why not
define stuff properly if there's no choice.
Define IP_TUNNEL_*_BIT counterparts as a bit number instead of the
value already coded and a fistful of <16 <-> bitmap> converters and
helpers. The two flags which have a different bit position are
SIT_ISATAP_BIT and VTI_ISVTI_BIT, as they were defined not as
__cpu_to_be16(), but as (__force __be16), i.e. had different
positions on LE and BE. Now they both have strongly defined places.
Change all __be16 fields which were used to store those flags, to
IP_TUNNEL_DECLARE_FLAGS() -> DECLARE_BITMAP(__IP_TUNNEL_FLAG_NUM) ->
unsigned long[1] for now, and replace all TUNNEL_* occurrences to
their bitmap counterparts. Use the converters in the places which talk
to the userspace, hardware (NFP) or other hosts (GRE header). The rest
must explicitly use the new flags only. This must be done at once,
otherwise there will be too many conversions throughout the code in
the intermediate commits.
Finally, disable the old __be16 flags for use in the kernel code
(except for the two 'irregular' flags mentioned above), to prevent
any accidental (mis)use of them. For the userspace, nothing is
changed, only additions were made.
Most noticeable bloat-o-meter difference (.text):
vmlinux: 307/-1 (306)
gre.ko: 62/0 (62)
ip_gre.ko: 941/-217 (724) [*]
ip_tunnel.ko: 390/-900 (-510) [**]
ip_vti.ko: 138/0 (138)
ip6_gre.ko: 534/-18 (516) [*]
ip6_tunnel.ko: 118/-10 (108)
[*] gre_flags_to_tnl_flags() grew, but still is inlined
[**] ip_tunnel_find() got uninlined, hence such decrease
The average code size increase in non-extreme case is 100-200 bytes
per module, mostly due to sizeof(long) > sizeof(__be16), as
%__IP_TUNNEL_FLAG_NUM is less than %BITS_PER_LONG and the compilers
are able to expand the majority of bitmap_*() calls here into direct
operations on scalars.
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-27 23:23:53 +08:00
|
|
|
__set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
|
2015-08-27 14:46:50 +08:00
|
|
|
return tun_dst;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
|
|
|
|
|
2023-10-16 15:15:20 +08:00
|
|
|
struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
2023-10-16 15:15:22 +08:00
|
|
|
struct net *net, int oif,
|
|
|
|
__be32 *saddr,
|
|
|
|
const struct ip_tunnel_key *key,
|
|
|
|
__be16 sport, __be16 dport, u8 tos,
|
|
|
|
struct dst_cache *dst_cache)
|
2023-10-16 15:15:20 +08:00
|
|
|
{
|
|
|
|
struct rtable *rt = NULL;
|
|
|
|
struct flowi4 fl4;
|
|
|
|
|
|
|
|
#ifdef CONFIG_DST_CACHE
|
2023-10-16 15:15:22 +08:00
|
|
|
if (dst_cache) {
|
2023-10-16 15:15:20 +08:00
|
|
|
rt = dst_cache_get_ip4(dst_cache, saddr);
|
|
|
|
if (rt)
|
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
#endif
|
2023-10-16 15:15:22 +08:00
|
|
|
|
2023-10-16 15:15:20 +08:00
|
|
|
memset(&fl4, 0, sizeof(fl4));
|
|
|
|
fl4.flowi4_mark = skb->mark;
|
2023-10-16 15:15:21 +08:00
|
|
|
fl4.flowi4_proto = IPPROTO_UDP;
|
2023-10-16 15:15:22 +08:00
|
|
|
fl4.flowi4_oif = oif;
|
|
|
|
fl4.daddr = key->u.ipv4.dst;
|
|
|
|
fl4.saddr = key->u.ipv4.src;
|
|
|
|
fl4.fl4_dport = dport;
|
|
|
|
fl4.fl4_sport = sport;
|
2023-10-16 15:15:20 +08:00
|
|
|
fl4.flowi4_tos = RT_TOS(tos);
|
2023-10-16 15:15:23 +08:00
|
|
|
fl4.flowi4_flags = key->flow_flags;
|
2023-10-16 15:15:20 +08:00
|
|
|
|
|
|
|
rt = ip_route_output_key(net, &fl4);
|
|
|
|
if (IS_ERR(rt)) {
|
|
|
|
netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
|
|
|
|
return ERR_PTR(-ENETUNREACH);
|
|
|
|
}
|
|
|
|
if (rt->dst.dev == dev) { /* is this necessary? */
|
|
|
|
netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
|
|
|
|
ip_rt_put(rt);
|
|
|
|
return ERR_PTR(-ELOOP);
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_DST_CACHE
|
2023-10-16 15:15:22 +08:00
|
|
|
if (dst_cache)
|
2023-10-16 15:15:20 +08:00
|
|
|
dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
|
|
|
|
#endif
|
|
|
|
*saddr = fl4.saddr;
|
|
|
|
return rt;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup);
|
|
|
|
|
2024-02-09 00:42:41 +08:00
|
|
|
MODULE_DESCRIPTION("IPv4 Foo over UDP tunnel driver");
|
2014-07-14 10:49:37 +08:00
|
|
|
MODULE_LICENSE("GPL");
|