mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Stale SKB data pointer access across pskb_may_pull() calls in L2TP, from Haishuang Yan. 2) Fix multicast frame handling in mac80211 AP code, from Felix Fietkau. 3) mac80211 station hashtable insert errors not handled properly, fix from Johannes Berg. 4) Fix TX descriptor count limit handling in e1000, from Alexander Duyck. 5) Revert a buggy netdev refcount fix in netpoll, from Bjorn Helgaas. 6) Must assign rtnl_link_ops of the device before registering it, fix in ip6_tunnel from Thadeu Lima de Souza Cascardo. 7) Memory leak fix in tc action net exit, from WANG Cong. 8) Add missing AF_KCM entries to name tables, from Dexuan Cui. 9) Fix regression in GRE handling of csums wrt. FOU, from Alexander Duyck. 10) Fix memory allocation alignment and congestion map corruption in RDS, from Shamir Rabinovitch. 11) Fix default qdisc regression in tuntap driver, from Jason Wang. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits) bridge, netem: mark mailing lists as moderated tuntap: restore default qdisc mpls: find_outdev: check for err ptr in addition to NULL check ipv6: Count in extension headers in skb->network_header RDS: fix congestion map corruption for PAGE_SIZE > 4k RDS: memory allocated must be align to 8 GRE: Disable segmentation offloads w/ CSUM and we are encapsulated via FOU net: add the AF_KCM entries to family name tables MAINTAINERS: intel-wired-lan list is moderated lib/test_bpf: Add additional BPF_ADD tests lib/test_bpf: Add test to check for result of 32-bit add that overflows lib/test_bpf: Add tests for unsigned BPF_JGT lib/test_bpf: Fix JMP_JSET tests VSOCK: Detach QP check should filter out non matching QPs. stmmac: fix adjust link call in case of a switch is attached af_packet: tone down the Tx-ring unsupported spew. net_sched: fix a memory leak in tc action samples/bpf: Enable powerpc support samples/bpf: Use llc in PATH, rather than a hardcoded value samples/bpf: Fix build breakage with map_perf_test_user.c ...
This commit is contained in:
commit
9ef11ceb0d
@ -4302,7 +4302,7 @@ F: drivers/net/ethernet/agere/
|
||||
|
||||
ETHERNET BRIDGE
|
||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
||||
L: bridge@lists.linux-foundation.org
|
||||
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.linuxfoundation.org/en/Net:Bridge
|
||||
S: Maintained
|
||||
@ -5751,7 +5751,7 @@ R: Don Skidmore <donald.c.skidmore@intel.com>
|
||||
R: Bruce Allan <bruce.w.allan@intel.com>
|
||||
R: John Ronciak <john.ronciak@intel.com>
|
||||
R: Mitch Williams <mitch.a.williams@intel.com>
|
||||
L: intel-wired-lan@lists.osuosl.org
|
||||
L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
|
||||
W: http://www.intel.com/support/feedback.htm
|
||||
W: http://e1000.sourceforge.net/
|
||||
Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
|
||||
@ -7576,7 +7576,7 @@ F: drivers/infiniband/hw/nes/
|
||||
|
||||
NETEM NETWORK EMULATOR
|
||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
||||
L: netem@lists.linux-foundation.org
|
||||
L: netem@lists.linux-foundation.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: net/sched/sch_netem.c
|
||||
|
||||
|
@ -166,6 +166,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
|
||||
CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
|
||||
CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
|
||||
|
||||
/* T6 adapters:
|
||||
*/
|
||||
|
@ -3106,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev,
|
||||
return __e1000_maybe_stop_tx(netdev, size);
|
||||
}
|
||||
|
||||
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
|
||||
#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
|
||||
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
@ -3256,12 +3256,29 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
nr_frags, mss);
|
||||
|
||||
if (count) {
|
||||
/* The descriptors needed is higher than other Intel drivers
|
||||
* due to a number of workarounds. The breakdown is below:
|
||||
* Data descriptors: MAX_SKB_FRAGS + 1
|
||||
* Context Descriptor: 1
|
||||
* Keep head from touching tail: 2
|
||||
* Workarounds: 3
|
||||
*/
|
||||
int desc_needed = MAX_SKB_FRAGS + 7;
|
||||
|
||||
netdev_sent_queue(netdev, skb->len);
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
e1000_tx_queue(adapter, tx_ring, tx_flags, count);
|
||||
|
||||
/* 82544 potentially requires twice as many data descriptors
|
||||
* in order to guarantee buffers don't end on evenly-aligned
|
||||
* dwords
|
||||
*/
|
||||
if (adapter->pcix_82544)
|
||||
desc_needed += MAX_SKB_FRAGS + 1;
|
||||
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
|
||||
e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
|
||||
|
||||
if (!skb->xmit_more ||
|
||||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
|
||||
|
@ -8559,6 +8559,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
|
||||
I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
|
||||
I40E_FLAG_WB_ON_ITR_CAPABLE |
|
||||
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
|
||||
I40E_FLAG_NO_PCI_LINK_CHECK |
|
||||
I40E_FLAG_100M_SGMII_CAPABLE |
|
||||
I40E_FLAG_USE_SET_LLDP_MIB |
|
||||
I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
|
||||
|
@ -288,10 +288,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
(priv->pcs == STMMAC_PCS_RTBI))
|
||||
goto out;
|
||||
|
||||
/* Never init EEE in case of a switch is attached */
|
||||
if (priv->phydev->is_pseudo_fixed_link)
|
||||
goto out;
|
||||
|
||||
/* MAC core supports the EEE feature. */
|
||||
if (priv->dma_cap.eee) {
|
||||
int tx_lpi_timer = priv->tx_lpi_timer;
|
||||
@ -771,10 +767,16 @@ static void stmmac_adjust_link(struct net_device *dev)
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* At this stage, it could be needed to setup the EEE or adjust some
|
||||
* MAC related HW registers.
|
||||
*/
|
||||
priv->eee_enabled = stmmac_eee_init(priv);
|
||||
if (phydev->is_pseudo_fixed_link)
|
||||
/* Stop PHY layer to call the hook to adjust the link in case
|
||||
* of a switch is attached to the stmmac driver.
|
||||
*/
|
||||
phydev->irq = PHY_IGNORE_INTERRUPT;
|
||||
else
|
||||
/* At this stage, init the EEE if supported.
|
||||
* Never called in case of fixed_link.
|
||||
*/
|
||||
priv->eee_enabled = stmmac_eee_init(priv);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -865,10 +867,6 @@ static int stmmac_init_phy(struct net_device *dev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* If attached to a switch, there is no reason to poll phy handler */
|
||||
if (phydev->is_pseudo_fixed_link)
|
||||
phydev->irq = PHY_IGNORE_INTERRUPT;
|
||||
|
||||
pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
|
||||
" Link = %d\n", dev->name, phydev->phy_id, phydev->link);
|
||||
|
||||
|
@ -1015,7 +1015,6 @@ static void tun_net_init(struct net_device *dev)
|
||||
/* Zero header length */
|
||||
dev->type = ARPHRD_NONE;
|
||||
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
|
||||
dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
|
||||
break;
|
||||
|
||||
case IFF_TAP:
|
||||
@ -1027,7 +1026,6 @@ static void tun_net_init(struct net_device *dev)
|
||||
|
||||
eth_hw_addr_random(dev);
|
||||
|
||||
dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1481,6 +1479,8 @@ static void tun_setup(struct net_device *dev)
|
||||
|
||||
dev->ethtool_ops = &tun_ethtool_ops;
|
||||
dev->destructor = tun_free_netdev;
|
||||
/* We prefer our own queue length */
|
||||
dev->tx_queue_len = TUN_READQ_SIZE;
|
||||
}
|
||||
|
||||
/* Trivial set of netlink ops to allow deleting tun or tap
|
||||
|
@ -2120,7 +2120,10 @@ struct napi_gro_cb {
|
||||
/* Used in foo-over-udp, set in udp[46]_gro_receive */
|
||||
u8 is_ipv6:1;
|
||||
|
||||
/* 7 bit hole */
|
||||
/* Used in GRE, set in fou/gue_gro_receive */
|
||||
u8 is_fou:1;
|
||||
|
||||
/* 6 bit hole */
|
||||
|
||||
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
|
||||
__wsum csum;
|
||||
|
@ -135,6 +135,7 @@ void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
|
||||
static inline void tc_action_net_exit(struct tc_action_net *tn)
|
||||
{
|
||||
tcf_hashinfo_destroy(tn->ops, tn->hinfo);
|
||||
kfree(tn->hinfo);
|
||||
}
|
||||
|
||||
int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
|
||||
|
@ -1001,6 +1001,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
|
||||
* flag indicates that the PN was verified for replay protection.
|
||||
* Note that this flag is also currently only supported when a frame
|
||||
* is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
|
||||
* @RX_FLAG_DUP_VALIDATED: The driver should set this flag if it did
|
||||
* de-duplication by itself.
|
||||
* @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
|
||||
* the frame.
|
||||
* @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
|
||||
|
@ -386,11 +386,9 @@ static inline struct list_head *sctp_list_dequeue(struct list_head *list)
|
||||
{
|
||||
struct list_head *result = NULL;
|
||||
|
||||
if (list->next != list) {
|
||||
if (!list_empty(list)) {
|
||||
result = list->next;
|
||||
list->next = result->next;
|
||||
list->next->prev = list;
|
||||
INIT_LIST_HEAD(result);
|
||||
list_del_init(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
229
lib/test_bpf.c
229
lib/test_bpf.c
@ -2443,6 +2443,22 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 4294967295U } },
|
||||
},
|
||||
{
|
||||
"ALU_ADD_X: 2 + 4294967294 = 0",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 2),
|
||||
BPF_LD_IMM64(R1, 4294967294U),
|
||||
BPF_ALU32_REG(BPF_ADD, R0, R1),
|
||||
BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
},
|
||||
{
|
||||
"ALU64_ADD_X: 1 + 2 = 3",
|
||||
.u.insns_int = {
|
||||
@ -2467,6 +2483,23 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 4294967295U } },
|
||||
},
|
||||
{
|
||||
"ALU64_ADD_X: 2 + 4294967294 = 4294967296",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 2),
|
||||
BPF_LD_IMM64(R1, 4294967294U),
|
||||
BPF_LD_IMM64(R2, 4294967296ULL),
|
||||
BPF_ALU64_REG(BPF_ADD, R0, R1),
|
||||
BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
|
||||
BPF_MOV32_IMM(R0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
},
|
||||
/* BPF_ALU | BPF_ADD | BPF_K */
|
||||
{
|
||||
"ALU_ADD_K: 1 + 2 = 3",
|
||||
@ -2501,6 +2534,21 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 4294967295U } },
|
||||
},
|
||||
{
|
||||
"ALU_ADD_K: 4294967294 + 2 = 0",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 4294967294U),
|
||||
BPF_ALU32_IMM(BPF_ADD, R0, 2),
|
||||
BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
},
|
||||
{
|
||||
"ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
|
||||
.u.insns_int = {
|
||||
@ -2517,6 +2565,70 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU_ADD_K: 0 + 0xffff = 0xffff",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x0),
|
||||
BPF_LD_IMM64(R3, 0xffff),
|
||||
BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x0),
|
||||
BPF_LD_IMM64(R3, 0x7fffffff),
|
||||
BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x0),
|
||||
BPF_LD_IMM64(R3, 0x80000000),
|
||||
BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x0),
|
||||
BPF_LD_IMM64(R3, 0x80008000),
|
||||
BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU64_ADD_K: 1 + 2 = 3",
|
||||
.u.insns_int = {
|
||||
@ -2550,6 +2662,22 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 2147483647 } },
|
||||
},
|
||||
{
|
||||
"ALU64_ADD_K: 4294967294 + 2 = 4294967296",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R0, 4294967294U),
|
||||
BPF_LD_IMM64(R1, 4294967296ULL),
|
||||
BPF_ALU64_IMM(BPF_ADD, R0, 2),
|
||||
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
},
|
||||
{
|
||||
"ALU64_ADD_K: 2147483646 + -2147483647 = -1",
|
||||
.u.insns_int = {
|
||||
@ -2593,6 +2721,70 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU64_ADD_K: 0 + 0xffff = 0xffff",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x0),
|
||||
BPF_LD_IMM64(R3, 0xffff),
|
||||
BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x0),
|
||||
BPF_LD_IMM64(R3, 0x7fffffff),
|
||||
BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x0),
|
||||
BPF_LD_IMM64(R3, 0xffffffff80000000LL),
|
||||
BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
{
|
||||
"ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
|
||||
.u.insns_int = {
|
||||
BPF_LD_IMM64(R2, 0x0),
|
||||
BPF_LD_IMM64(R3, 0xffffffff80008000LL),
|
||||
BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
|
||||
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
|
||||
BPF_MOV32_IMM(R0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV32_IMM(R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 0x1 } },
|
||||
},
|
||||
/* BPF_ALU | BPF_SUB | BPF_X */
|
||||
{
|
||||
"ALU_SUB_X: 3 - 1 = 2",
|
||||
@ -4222,6 +4414,20 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
},
|
||||
{
|
||||
"JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
|
||||
.u.insns_int = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_LD_IMM64(R1, -1),
|
||||
BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
},
|
||||
/* BPF_JMP | BPF_JGE | BPF_K */
|
||||
{
|
||||
"JMP_JGE_K: if (3 >= 2) return 1",
|
||||
@ -4303,7 +4509,7 @@ static struct bpf_test tests[] = {
|
||||
.u.insns_int = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_LD_IMM64(R1, 3),
|
||||
BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
|
||||
BPF_JMP_IMM(BPF_JSET, R1, 2, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
@ -4317,7 +4523,7 @@ static struct bpf_test tests[] = {
|
||||
.u.insns_int = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_LD_IMM64(R1, 3),
|
||||
BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1),
|
||||
BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
@ -4404,6 +4610,21 @@ static struct bpf_test tests[] = {
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
},
|
||||
{
|
||||
"JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
|
||||
.u.insns_int = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_LD_IMM64(R1, -1),
|
||||
BPF_LD_IMM64(R2, 1),
|
||||
BPF_JMP_REG(BPF_JGT, R1, R2, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
INTERNAL,
|
||||
{ },
|
||||
{ { 0, 1 } },
|
||||
},
|
||||
/* BPF_JMP | BPF_JGE | BPF_X */
|
||||
{
|
||||
"JMP_JGE_X: if (3 >= 2) return 1",
|
||||
@ -4474,7 +4695,7 @@ static struct bpf_test tests[] = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_LD_IMM64(R1, 3),
|
||||
BPF_LD_IMM64(R2, 2),
|
||||
BPF_JMP_REG(BPF_JNE, R1, R2, 1),
|
||||
BPF_JMP_REG(BPF_JSET, R1, R2, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
@ -4489,7 +4710,7 @@ static struct bpf_test tests[] = {
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 0),
|
||||
BPF_LD_IMM64(R1, 3),
|
||||
BPF_LD_IMM64(R2, 0xffffffff),
|
||||
BPF_JMP_REG(BPF_JNE, R1, R2, 1),
|
||||
BPF_JMP_REG(BPF_JSET, R1, R2, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU32_IMM(BPF_MOV, R0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
|
@ -4439,6 +4439,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
||||
NAPI_GRO_CB(skb)->flush = 0;
|
||||
NAPI_GRO_CB(skb)->free = 0;
|
||||
NAPI_GRO_CB(skb)->encap_mark = 0;
|
||||
NAPI_GRO_CB(skb)->is_fou = 0;
|
||||
NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
|
||||
|
||||
/* Setup for GRO checksum validation */
|
||||
|
@ -603,6 +603,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
|
||||
const struct net_device_ops *ops;
|
||||
int err;
|
||||
|
||||
np->dev = ndev;
|
||||
strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
|
||||
INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
|
||||
|
||||
@ -669,7 +670,6 @@ int netpoll_setup(struct netpoll *np)
|
||||
goto unlock;
|
||||
}
|
||||
dev_hold(ndev);
|
||||
np->dev = ndev;
|
||||
|
||||
if (netdev_master_upper_dev_get(ndev)) {
|
||||
np_err(np, "%s is a slave device, aborting\n", np->dev_name);
|
||||
@ -770,7 +770,6 @@ int netpoll_setup(struct netpoll *np)
|
||||
return 0;
|
||||
|
||||
put:
|
||||
np->dev = NULL;
|
||||
dev_put(ndev);
|
||||
unlock:
|
||||
rtnl_unlock();
|
||||
|
@ -221,7 +221,8 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
|
||||
"sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
|
||||
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
|
||||
"sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
|
||||
"sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
|
||||
"sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
|
||||
"sk_lock-AF_MAX"
|
||||
};
|
||||
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
|
||||
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
|
||||
@ -237,7 +238,8 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
|
||||
"slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
|
||||
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
|
||||
"slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
|
||||
"slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
|
||||
"slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
|
||||
"slock-AF_MAX"
|
||||
};
|
||||
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
|
||||
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
|
||||
@ -253,7 +255,8 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
|
||||
"clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
|
||||
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
|
||||
"clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
|
||||
"clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
|
||||
"clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
|
||||
"clock-AF_MAX"
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -203,6 +203,9 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
|
||||
*/
|
||||
NAPI_GRO_CB(skb)->encap_mark = 0;
|
||||
|
||||
/* Flag this frame as already having an outer encap header */
|
||||
NAPI_GRO_CB(skb)->is_fou = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
@ -368,6 +371,9 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
|
||||
*/
|
||||
NAPI_GRO_CB(skb)->encap_mark = 0;
|
||||
|
||||
/* Flag this frame as already having an outer encap header */
|
||||
NAPI_GRO_CB(skb)->is_fou = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[guehdr->proto_ctype]);
|
||||
|
@ -150,6 +150,14 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
|
||||
if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
|
||||
goto out;
|
||||
|
||||
/* We can only support GRE_CSUM if we can track the location of
|
||||
* the GRE header. In the case of FOU/GUE we cannot because the
|
||||
* outer UDP header displaces the GRE header leaving us in a state
|
||||
* of limbo.
|
||||
*/
|
||||
if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
|
||||
goto out;
|
||||
|
||||
type = greh->protocol;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -862,9 +862,16 @@ static void __gre_tunnel_init(struct net_device *dev)
|
||||
dev->hw_features |= GRE_FEATURES;
|
||||
|
||||
if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
|
||||
/* TCP offload with GRE SEQ is not supported. */
|
||||
dev->features |= NETIF_F_GSO_SOFTWARE;
|
||||
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
|
||||
/* TCP offload with GRE SEQ is not supported, nor
|
||||
* can we support 2 levels of outer headers requiring
|
||||
* an update.
|
||||
*/
|
||||
if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
|
||||
(tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
|
||||
dev->features |= NETIF_F_GSO_SOFTWARE;
|
||||
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
|
||||
}
|
||||
|
||||
/* Can use a lockless transmit, unless we generate
|
||||
* output sequences
|
||||
*/
|
||||
|
@ -1090,8 +1090,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
|
||||
int getfrag(void *from, char *to, int offset, int len,
|
||||
int odd, struct sk_buff *skb),
|
||||
void *from, int length, int hh_len, int fragheaderlen,
|
||||
int transhdrlen, int mtu, unsigned int flags,
|
||||
const struct flowi6 *fl6)
|
||||
int exthdrlen, int transhdrlen, int mtu,
|
||||
unsigned int flags, const struct flowi6 *fl6)
|
||||
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@ -1116,7 +1116,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
|
||||
skb_put(skb, fragheaderlen + transhdrlen);
|
||||
|
||||
/* initialize network header pointer */
|
||||
skb_reset_network_header(skb);
|
||||
skb_set_network_header(skb, exthdrlen);
|
||||
|
||||
/* initialize protocol header pointer */
|
||||
skb->transport_header = skb->network_header + fragheaderlen;
|
||||
@ -1358,7 +1358,7 @@ emsgsize:
|
||||
(rt->dst.dev->features & NETIF_F_UFO) &&
|
||||
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
|
||||
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen,
|
||||
hh_len, fragheaderlen, exthdrlen,
|
||||
transhdrlen, mtu, flags, fl6);
|
||||
if (err)
|
||||
goto error;
|
||||
|
@ -252,12 +252,12 @@ static int ip6_tnl_create2(struct net_device *dev)
|
||||
|
||||
t = netdev_priv(dev);
|
||||
|
||||
dev->rtnl_link_ops = &ip6_link_ops;
|
||||
err = register_netdevice(dev);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
strcpy(t->parms.name, dev->name);
|
||||
dev->rtnl_link_ops = &ip6_link_ops;
|
||||
|
||||
dev_hold(dev);
|
||||
ip6_tnl_link(ip6n, t);
|
||||
|
@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
|
||||
struct l2tp_tunnel *tunnel = NULL;
|
||||
int length;
|
||||
|
||||
/* Point to L2TP header */
|
||||
optr = ptr = skb->data;
|
||||
|
||||
if (!pskb_may_pull(skb, 4))
|
||||
goto discard;
|
||||
|
||||
/* Point to L2TP header */
|
||||
optr = ptr = skb->data;
|
||||
session_id = ntohl(*((__be32 *) ptr));
|
||||
ptr += 4;
|
||||
|
||||
@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
|
||||
if (!pskb_may_pull(skb, length))
|
||||
goto discard;
|
||||
|
||||
/* Point to L2TP header */
|
||||
optr = ptr = skb->data;
|
||||
ptr += 4;
|
||||
pr_debug("%s: ip recv\n", tunnel->name);
|
||||
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
|
||||
}
|
||||
|
@ -136,12 +136,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
|
||||
struct l2tp_tunnel *tunnel = NULL;
|
||||
int length;
|
||||
|
||||
/* Point to L2TP header */
|
||||
optr = ptr = skb->data;
|
||||
|
||||
if (!pskb_may_pull(skb, 4))
|
||||
goto discard;
|
||||
|
||||
/* Point to L2TP header */
|
||||
optr = ptr = skb->data;
|
||||
session_id = ntohl(*((__be32 *) ptr));
|
||||
ptr += 4;
|
||||
|
||||
@ -169,6 +168,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
|
||||
if (!pskb_may_pull(skb, length))
|
||||
goto discard;
|
||||
|
||||
/* Point to L2TP header */
|
||||
optr = ptr = skb->data;
|
||||
ptr += 4;
|
||||
pr_debug("%s: ip recv\n", tunnel->name);
|
||||
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
|
||||
}
|
||||
|
@ -343,8 +343,10 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
|
||||
struct ieee80211_chanctx *ctx,
|
||||
const struct cfg80211_chan_def *chandef)
|
||||
{
|
||||
if (cfg80211_chandef_identical(&ctx->conf.def, chandef))
|
||||
if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
|
||||
ieee80211_recalc_chanctx_min_def(local, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
|
||||
|
||||
|
@ -1719,6 +1719,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
|
||||
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
|
||||
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
|
||||
void ieee80211_sta_set_rx_nss(struct sta_info *sta);
|
||||
enum ieee80211_sta_rx_bandwidth
|
||||
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
|
||||
enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta);
|
||||
void ieee80211_sta_set_rx_nss(struct sta_info *sta);
|
||||
void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_mgmt *mgmt);
|
||||
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
|
||||
|
@ -530,7 +530,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
|
||||
const u8 *target_addr, *orig_addr;
|
||||
const u8 *da;
|
||||
u8 target_flags, ttl, flags;
|
||||
u32 orig_sn, target_sn, lifetime, target_metric;
|
||||
u32 orig_sn, target_sn, lifetime, target_metric = 0;
|
||||
bool reply = false;
|
||||
bool forward = true;
|
||||
bool root_is_gate;
|
||||
|
@ -67,6 +67,7 @@
|
||||
|
||||
static const struct rhashtable_params sta_rht_params = {
|
||||
.nelem_hint = 3, /* start small */
|
||||
.insecure_elasticity = true, /* Disable chain-length checks. */
|
||||
.automatic_shrinking = true,
|
||||
.head_offset = offsetof(struct sta_info, hash_node),
|
||||
.key_offset = offsetof(struct sta_info, addr),
|
||||
@ -258,11 +259,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
|
||||
}
|
||||
|
||||
/* Caller must hold local->sta_mtx */
|
||||
static void sta_info_hash_add(struct ieee80211_local *local,
|
||||
struct sta_info *sta)
|
||||
static int sta_info_hash_add(struct ieee80211_local *local,
|
||||
struct sta_info *sta)
|
||||
{
|
||||
rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
|
||||
sta_rht_params);
|
||||
return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
|
||||
sta_rht_params);
|
||||
}
|
||||
|
||||
static void sta_deliver_ps_frames(struct work_struct *wk)
|
||||
@ -524,7 +525,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
|
||||
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
|
||||
|
||||
/* make the station visible */
|
||||
sta_info_hash_add(local, sta);
|
||||
err = sta_info_hash_add(local, sta);
|
||||
if (err)
|
||||
goto out_drop_sta;
|
||||
|
||||
list_add_tail_rcu(&sta->list, &local->sta_list);
|
||||
|
||||
@ -557,6 +560,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
|
||||
out_remove:
|
||||
sta_info_hash_del(local, sta);
|
||||
list_del_rcu(&sta->list);
|
||||
out_drop_sta:
|
||||
local->num_sta--;
|
||||
synchronize_net();
|
||||
__cleanup_single_sta(sta);
|
||||
|
@ -377,7 +377,6 @@ DECLARE_EWMA(signal, 1024, 8)
|
||||
* @uploaded: set to true when sta is uploaded to the driver
|
||||
* @sta: station information we share with the driver
|
||||
* @sta_state: duplicates information about station state (for debug)
|
||||
* @beacon_loss_count: number of times beacon loss has triggered
|
||||
* @rcu_head: RCU head used for freeing this station struct
|
||||
* @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
|
||||
* taken from HT/VHT capabilities or VHT operating mode notification
|
||||
|
@ -4,7 +4,7 @@
|
||||
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
|
||||
* Copyright 2014, Intel Corporation
|
||||
* Copyright 2014 Intel Mobile Communications GmbH
|
||||
* Copyright 2015 Intel Deutschland GmbH
|
||||
* Copyright 2015 - 2016 Intel Deutschland GmbH
|
||||
*
|
||||
* This file is GPLv2 as found in COPYING.
|
||||
*/
|
||||
@ -15,6 +15,7 @@
|
||||
#include <linux/rtnetlink.h>
|
||||
#include "ieee80211_i.h"
|
||||
#include "driver-ops.h"
|
||||
#include "rate.h"
|
||||
|
||||
/* give usermode some time for retries in setting up the TDLS session */
|
||||
#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
|
||||
@ -302,7 +303,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
|
||||
/* IEEE802.11ac-2013 Table E-4 */
|
||||
u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
|
||||
struct cfg80211_chan_def uc = sta->tdls_chandef;
|
||||
enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta);
|
||||
enum nl80211_chan_width max_width = ieee80211_sta_cap_chan_bw(sta);
|
||||
int i;
|
||||
|
||||
/* only support upgrading non-narrow channels up to 80Mhz */
|
||||
@ -313,7 +314,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
|
||||
if (max_width > NL80211_CHAN_WIDTH_80)
|
||||
max_width = NL80211_CHAN_WIDTH_80;
|
||||
|
||||
if (uc.width == max_width)
|
||||
if (uc.width >= max_width)
|
||||
return;
|
||||
/*
|
||||
* Channel usage constrains in the IEEE802.11ac-2013 specification only
|
||||
@ -324,6 +325,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
|
||||
for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
|
||||
if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
|
||||
uc.center_freq1 = centers_80mhz[i];
|
||||
uc.center_freq2 = 0;
|
||||
uc.width = NL80211_CHAN_WIDTH_80;
|
||||
break;
|
||||
}
|
||||
@ -332,7 +334,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
|
||||
return;
|
||||
|
||||
/* proceed to downgrade the chandef until usable or the same */
|
||||
while (uc.width > max_width &&
|
||||
while (uc.width > max_width ||
|
||||
!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
|
||||
sdata->wdev.iftype))
|
||||
ieee80211_chandef_downgrade(&uc);
|
||||
@ -1242,18 +1244,44 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
|
||||
static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
|
||||
struct sta_info *sta)
|
||||
{
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct ieee80211_chanctx_conf *conf;
|
||||
struct ieee80211_chanctx *ctx;
|
||||
enum nl80211_chan_width width;
|
||||
struct ieee80211_supported_band *sband;
|
||||
|
||||
mutex_lock(&local->chanctx_mtx);
|
||||
conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
|
||||
lockdep_is_held(&local->chanctx_mtx));
|
||||
if (conf) {
|
||||
width = conf->def.width;
|
||||
sband = local->hw.wiphy->bands[conf->def.chan->band];
|
||||
ctx = container_of(conf, struct ieee80211_chanctx, conf);
|
||||
ieee80211_recalc_chanctx_chantype(local, ctx);
|
||||
|
||||
/* if width changed and a peer is given, update its BW */
|
||||
if (width != conf->def.width && sta &&
|
||||
test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) {
|
||||
enum ieee80211_sta_rx_bandwidth bw;
|
||||
|
||||
bw = ieee80211_chan_width_to_rx_bw(conf->def.width);
|
||||
bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
|
||||
if (bw != sta->sta.bandwidth) {
|
||||
sta->sta.bandwidth = bw;
|
||||
rate_control_rate_update(local, sband, sta,
|
||||
IEEE80211_RC_BW_CHANGED);
|
||||
/*
|
||||
* if a TDLS peer BW was updated, we need to
|
||||
* recalc the chandef width again, to get the
|
||||
* correct chanctx min_def
|
||||
*/
|
||||
ieee80211_recalc_chanctx_chantype(local, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
mutex_unlock(&local->chanctx_mtx);
|
||||
}
|
||||
@ -1350,8 +1378,6 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
|
||||
break;
|
||||
}
|
||||
|
||||
iee80211_tdls_recalc_chanctx(sdata);
|
||||
|
||||
mutex_lock(&local->sta_mtx);
|
||||
sta = sta_info_get(sdata, peer);
|
||||
if (!sta) {
|
||||
@ -1360,6 +1386,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
|
||||
break;
|
||||
}
|
||||
|
||||
iee80211_tdls_recalc_chanctx(sdata, sta);
|
||||
iee80211_tdls_recalc_ht_protection(sdata, sta);
|
||||
|
||||
set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
|
||||
@ -1390,7 +1417,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
|
||||
iee80211_tdls_recalc_ht_protection(sdata, NULL);
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
|
||||
iee80211_tdls_recalc_chanctx(sdata);
|
||||
iee80211_tdls_recalc_chanctx(sdata, NULL);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTSUPP;
|
||||
|
@ -1116,11 +1116,15 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
|
||||
reset_agg_timer = true;
|
||||
} else {
|
||||
queued = true;
|
||||
if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
|
||||
clear_sta_flag(tx->sta, WLAN_STA_SP);
|
||||
ps_dbg(tx->sta->sdata,
|
||||
"STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
|
||||
tx->sta->sta.addr, tx->sta->sta.aid);
|
||||
}
|
||||
info->control.vif = &tx->sdata->vif;
|
||||
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
|
||||
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
|
||||
IEEE80211_TX_CTL_NO_PS_BUFFER |
|
||||
IEEE80211_TX_STATUS_EOSP;
|
||||
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
|
||||
__skb_queue_tail(&tid_tx->pending, skb);
|
||||
if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
|
||||
purge_skb = __skb_dequeue(&tid_tx->pending);
|
||||
@ -1247,7 +1251,8 @@ static void ieee80211_drv_tx(struct ieee80211_local *local,
|
||||
struct txq_info *txqi;
|
||||
u8 ac;
|
||||
|
||||
if (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)
|
||||
if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
|
||||
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
|
||||
goto tx_normal;
|
||||
|
||||
if (!ieee80211_is_data(hdr->frame_control))
|
||||
|
@ -319,7 +319,30 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
|
||||
return IEEE80211_STA_RX_BW_80;
|
||||
}
|
||||
|
||||
static enum ieee80211_sta_rx_bandwidth
|
||||
enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
|
||||
{
|
||||
struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
|
||||
u32 cap_width;
|
||||
|
||||
if (!vht_cap->vht_supported) {
|
||||
if (!sta->sta.ht_cap.ht_supported)
|
||||
return NL80211_CHAN_WIDTH_20_NOHT;
|
||||
|
||||
return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
|
||||
NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20;
|
||||
}
|
||||
|
||||
cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
|
||||
|
||||
if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)
|
||||
return NL80211_CHAN_WIDTH_160;
|
||||
else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
|
||||
return NL80211_CHAN_WIDTH_80P80;
|
||||
|
||||
return NL80211_CHAN_WIDTH_80;
|
||||
}
|
||||
|
||||
enum ieee80211_sta_rx_bandwidth
|
||||
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
|
||||
{
|
||||
switch (width) {
|
||||
@ -347,10 +370,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
|
||||
|
||||
bw = ieee80211_sta_cap_rx_bw(sta);
|
||||
bw = min(bw, sta->cur_max_bandwidth);
|
||||
|
||||
/* do not cap the BW of TDLS WIDER_BW peers by the bss */
|
||||
if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
|
||||
bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
|
||||
bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
|
||||
|
||||
return bw;
|
||||
}
|
||||
|
@ -543,6 +543,9 @@ static struct net_device *find_outdev(struct net *net,
|
||||
if (!dev)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (IS_ERR(dev))
|
||||
return dev;
|
||||
|
||||
/* The caller is holding rtnl anyways, so release the dev reference */
|
||||
dev_put(dev);
|
||||
|
||||
|
@ -4151,7 +4151,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
||||
|
||||
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
|
||||
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
|
||||
WARN(1, "Tx-ring is not supported.\n");
|
||||
net_warn_ratelimited("Tx-ring is not supported.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -796,7 +796,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
|
||||
|
||||
addr = kmap_atomic(sg_page(&frag->f_sg));
|
||||
|
||||
src = addr + frag_off;
|
||||
src = addr + frag->f_sg.offset + frag_off;
|
||||
dst = (void *)map->m_page_addrs[map_page] + map_off;
|
||||
for (k = 0; k < to_copy; k += 8) {
|
||||
/* Record ports that became uncongested, ie
|
||||
|
@ -135,8 +135,8 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
|
||||
if (rem->r_offset != 0)
|
||||
rds_stats_inc(s_page_remainder_hit);
|
||||
|
||||
rem->r_offset += bytes;
|
||||
if (rem->r_offset == PAGE_SIZE) {
|
||||
rem->r_offset += ALIGN(bytes, 8);
|
||||
if (rem->r_offset >= PAGE_SIZE) {
|
||||
__free_page(rem->r_page);
|
||||
rem->r_page = NULL;
|
||||
}
|
||||
|
@ -705,7 +705,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
|
||||
/* Check whether this chunk and all the rest of pending data will fit
|
||||
* or delay in hopes of bundling a full sized packet.
|
||||
*/
|
||||
if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead)
|
||||
if (chunk->skb->len + q->out_qlen >
|
||||
transport->pathmtu - packet->overhead - sizeof(sctp_data_chunk_t) - 4)
|
||||
/* Enough data queued to fill a packet */
|
||||
return SCTP_XMIT_OK;
|
||||
|
||||
|
@ -842,7 +842,7 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
|
||||
* qp_handle.
|
||||
*/
|
||||
if (vmci_handle_is_invalid(e_payload->handle) ||
|
||||
vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
|
||||
!vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
|
||||
return;
|
||||
|
||||
/* We don't ask for delayed CBs when we subscribe to this event (we
|
||||
@ -2154,7 +2154,7 @@ module_exit(vmci_transport_exit);
|
||||
|
||||
MODULE_AUTHOR("VMware, Inc.");
|
||||
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
|
||||
MODULE_VERSION("1.0.2.0-k");
|
||||
MODULE_VERSION("1.0.3.0-k");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("vmware_vsock");
|
||||
MODULE_ALIAS_NETPROTO(PF_VSOCK);
|
||||
|
@ -76,16 +76,10 @@ HOSTLOADLIBES_offwaketime += -lelf
|
||||
HOSTLOADLIBES_spintest += -lelf
|
||||
HOSTLOADLIBES_map_perf_test += -lelf -lrt
|
||||
|
||||
# point this to your LLVM backend with bpf support
|
||||
LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
|
||||
|
||||
# asm/sysreg.h inline assmbly used by it is incompatible with llvm.
|
||||
# But, ehere is not easy way to fix it, so just exclude it since it is
|
||||
# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
|
||||
# But, there is no easy way to fix it, so just exclude it since it is
|
||||
# useless for BPF samples.
|
||||
$(obj)/%.o: $(src)/%.c
|
||||
clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
|
||||
-D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
|
||||
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
|
||||
clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
|
||||
-D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
|
||||
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
|
||||
-O2 -emit-llvm -c $< -o -| llc -march=bpf -filetype=obj -o $@
|
||||
|
@ -82,6 +82,7 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
|
||||
#define PT_REGS_FP(x) ((x)->bp)
|
||||
#define PT_REGS_RC(x) ((x)->ax)
|
||||
#define PT_REGS_SP(x) ((x)->sp)
|
||||
#define PT_REGS_IP(x) ((x)->ip)
|
||||
|
||||
#elif defined(__s390x__)
|
||||
|
||||
@ -94,6 +95,7 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
|
||||
#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define PT_REGS_RC(x) ((x)->gprs[2])
|
||||
#define PT_REGS_SP(x) ((x)->gprs[15])
|
||||
#define PT_REGS_IP(x) ((x)->ip)
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
|
||||
@ -106,6 +108,30 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag
|
||||
#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
|
||||
#define PT_REGS_RC(x) ((x)->regs[0])
|
||||
#define PT_REGS_SP(x) ((x)->sp)
|
||||
#define PT_REGS_IP(x) ((x)->pc)
|
||||
|
||||
#elif defined(__powerpc__)
|
||||
|
||||
#define PT_REGS_PARM1(x) ((x)->gpr[3])
|
||||
#define PT_REGS_PARM2(x) ((x)->gpr[4])
|
||||
#define PT_REGS_PARM3(x) ((x)->gpr[5])
|
||||
#define PT_REGS_PARM4(x) ((x)->gpr[6])
|
||||
#define PT_REGS_PARM5(x) ((x)->gpr[7])
|
||||
#define PT_REGS_RC(x) ((x)->gpr[3])
|
||||
#define PT_REGS_SP(x) ((x)->sp)
|
||||
#define PT_REGS_IP(x) ((x)->nip)
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __powerpc__
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||
#else
|
||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
|
||||
bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
|
||||
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
|
||||
bpf_probe_read(&(ip), sizeof(ip), \
|
||||
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/bpf.h>
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
#include <sys/resource.h>
|
||||
#include "libbpf.h"
|
||||
#include "bpf_load.h"
|
||||
|
||||
|
@ -34,7 +34,7 @@ struct bpf_map_def SEC("maps") stackmap = {
|
||||
#define PROG(foo) \
|
||||
int foo(struct pt_regs *ctx) \
|
||||
{ \
|
||||
long v = ctx->ip, *val; \
|
||||
long v = PT_REGS_IP(ctx), *val; \
|
||||
\
|
||||
val = bpf_map_lookup_elem(&my_map, &v); \
|
||||
bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \
|
||||
|
@ -27,10 +27,10 @@ int bpf_prog2(struct pt_regs *ctx)
|
||||
long init_val = 1;
|
||||
long *value;
|
||||
|
||||
/* x64/s390x specific: read ip of kfree_skb caller.
|
||||
/* read ip of kfree_skb caller.
|
||||
* non-portable version of __builtin_return_address(0)
|
||||
*/
|
||||
bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx));
|
||||
BPF_KPROBE_READ_RET_IP(loc, ctx);
|
||||
|
||||
value = bpf_map_lookup_elem(&my_map, &loc);
|
||||
if (value)
|
||||
|
@ -40,7 +40,7 @@ int bpf_prog2(struct pt_regs *ctx)
|
||||
long ip = 0;
|
||||
|
||||
/* get ip address of kmem_cache_alloc_node() caller */
|
||||
bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip)));
|
||||
BPF_KRETPROBE_READ_RET_IP(ip, ctx);
|
||||
|
||||
struct pair v = {
|
||||
.val = bpf_ktime_get_ns(),
|
||||
|
Loading…
Reference in New Issue
Block a user