2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2007-07-27 21:43:22 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2002-2005, Instant802 Networks, Inc.
|
|
|
|
* Copyright 2005-2006, Devicescape Software, Inc.
|
|
|
|
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
|
|
|
|
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
|
2014-09-03 20:24:57 +08:00
|
|
|
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
2021-03-20 06:28:01 +08:00
|
|
|
* Copyright (C) 2018-2021 Intel Corporation
|
2007-07-27 21:43:22 +08:00
|
|
|
*
|
|
|
|
* Transmit and frame generation functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/skbuff.h>
|
2016-11-22 18:52:18 +08:00
|
|
|
#include <linux/if_vlan.h>
|
2007-07-27 21:43:22 +08:00
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/bitmap.h>
|
[MAC80211]: fix race conditions with keys
During receive processing, we select the key long before using it and
because there's no locking it is possible that we kfree() the key
after having selected it but before using it for crypto operations.
Obviously, this is bad.
Secondly, during transmit processing, there are two possible races: We
have a similar race between select_key() and using it for encryption,
but we also have a race here between select_key() and hardware
encryption (both when a key is removed.)
This patch solves these issues by using RCU: when a key is to be freed,
we first remove the pointer from the appropriate places (sdata->keys,
sdata->default_key, sta->key) using rcu_assign_pointer() and then
synchronize_rcu(). Then, we can safely kfree() the key and remove it
from the hardware. There's a window here where the hardware may still
be using it for decryption, but we can't work around that without having
two hardware callbacks, one to disable the key for RX and one to disable
it for TX; but the worst thing that will happen is that we receive a
packet decrypted that we don't find a key for any more and then drop it.
When we add a key, we first need to upload it to the hardware and then,
using rcu_assign_pointer() again, link it into our structures.
In the code using keys (TX/RX paths) we use rcu_dereference() to get the
key and enclose the whole tx/rx section in a rcu_read_lock() ...
rcu_read_unlock() block. Because we've uploaded the key to hardware
before linking it into internal structures, we can guarantee that it is
valid once get to into tx().
One possible race condition remains, however: when we have hardware
acceleration enabled and the driver shuts down the queues, we end up
queueing the frame. If now somebody removes the key, the key will be
removed from hwaccel and then then driver will be asked to encrypt the
frame with a key index that has been removed. Hence, drivers will need
to be aware that the hw_key_index they are passed might not be under
all circumstances. Most drivers will, however, simply ignore that
condition and encrypt the frame with the selected key anyway, this
only results in a frame being encrypted with a wrong key or dropped
(rightfully) because the key was not valid. There isn't much we can
do about it unless we want to walk the pending frame queue every time
a key is removed and remove all frames that used it.
This race condition, however, will most likely be solved once we add
multiqueue support to mac80211 because then frames will be queued
further up the stack instead of after being processed.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Acked-by: Michael Wu <flamingice@sourmilk.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-14 23:10:24 +08:00
|
|
|
#include <linux/rcupdate.h>
|
2011-07-15 23:47:34 +08:00
|
|
|
#include <linux/export.h>
|
2021-06-23 21:47:55 +08:00
|
|
|
#include <linux/timekeeping.h>
|
2007-09-18 02:56:21 +08:00
|
|
|
#include <net/net_namespace.h>
|
2007-07-27 21:43:22 +08:00
|
|
|
#include <net/ieee80211_radiotap.h>
|
|
|
|
#include <net/cfg80211.h>
|
|
|
|
#include <net/mac80211.h>
|
2016-05-19 16:37:51 +08:00
|
|
|
#include <net/codel.h>
|
|
|
|
#include <net/codel_impl.h>
|
2007-07-27 21:43:22 +08:00
|
|
|
#include <asm/unaligned.h>
|
2016-05-19 16:37:49 +08:00
|
|
|
#include <net/fq_impl.h>
|
2007-07-27 21:43:22 +08:00
|
|
|
|
|
|
|
#include "ieee80211_i.h"
|
2009-04-24 00:52:52 +08:00
|
|
|
#include "driver-ops.h"
|
2008-04-09 03:14:40 +08:00
|
|
|
#include "led.h"
|
2008-02-23 22:17:10 +08:00
|
|
|
#include "mesh.h"
|
2007-07-27 21:43:22 +08:00
|
|
|
#include "wep.h"
|
|
|
|
#include "wpa.h"
|
|
|
|
#include "wme.h"
|
2008-04-09 03:14:40 +08:00
|
|
|
#include "rate.h"
|
2007-07-27 21:43:22 +08:00
|
|
|
|
|
|
|
/* misc utils */
|
|
|
|
|
2011-11-16 22:28:55 +08:00
|
|
|
static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
|
|
|
|
struct sk_buff *skb, int group_addr,
|
2008-06-25 19:36:27 +08:00
|
|
|
int next_frag_len)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2013-07-08 22:55:53 +08:00
|
|
|
int rate, mrate, erp, dur, i, shift = 0;
|
2008-05-15 18:55:27 +08:00
|
|
|
struct ieee80211_rate *txrate;
|
2007-07-27 21:43:22 +08:00
|
|
|
struct ieee80211_local *local = tx->local;
|
2008-01-25 02:38:38 +08:00
|
|
|
struct ieee80211_supported_band *sband;
|
2008-07-16 09:44:13 +08:00
|
|
|
struct ieee80211_hdr *hdr;
|
2011-11-16 22:28:55 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2013-07-08 22:55:53 +08:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
|
|
|
u32 rate_flags = 0;
|
|
|
|
|
2016-12-15 03:46:57 +08:00
|
|
|
/* assume HW handles this */
|
|
|
|
if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))
|
|
|
|
return 0;
|
|
|
|
|
2013-07-08 22:55:53 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
|
|
|
|
if (chanctx_conf) {
|
|
|
|
shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
|
|
|
|
rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2008-10-21 18:40:02 +08:00
|
|
|
|
|
|
|
/* uh huh? */
|
2013-04-22 22:14:41 +08:00
|
|
|
if (WARN_ON_ONCE(tx->rate.idx < 0))
|
2008-10-21 18:40:02 +08:00
|
|
|
return 0;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2012-07-23 21:12:51 +08:00
|
|
|
sband = local->hw.wiphy->bands[info->band];
|
2013-04-22 22:14:41 +08:00
|
|
|
txrate = &sband->bitrates[tx->rate.idx];
|
2008-01-25 02:38:38 +08:00
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
erp = txrate->flags & IEEE80211_RATE_ERP_G;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2020-09-22 10:28:11 +08:00
|
|
|
/* device is expected to do this */
|
|
|
|
if (sband->band == NL80211_BAND_S1GHZ)
|
|
|
|
return 0;
|
|
|
|
|
2007-07-27 21:43:22 +08:00
|
|
|
/*
|
|
|
|
* data and mgmt (except PS Poll):
|
|
|
|
* - during CFP: 32768
|
|
|
|
* - during contention period:
|
|
|
|
* if addr1 is group address: 0
|
|
|
|
* if more fragments = 0 and addr1 is individual address: time to
|
|
|
|
* transmit one ACK plus SIFS
|
|
|
|
* if more fragments = 1 and addr1 is individual address: time to
|
|
|
|
* transmit next fragment plus 2 x ACK plus 3 x SIFS
|
|
|
|
*
|
|
|
|
* IEEE 802.11, 9.6:
|
|
|
|
* - control response frame (CTS or ACK) shall be transmitted using the
|
|
|
|
* same rate as the immediately previous frame in the frame exchange
|
|
|
|
* sequence, if this rate belongs to the PHY mandatory rates, or else
|
|
|
|
* at the highest possible rate belonging to the PHY rates in the
|
|
|
|
* BSSBasicRateSet
|
|
|
|
*/
|
2011-11-16 22:28:55 +08:00
|
|
|
hdr = (struct ieee80211_hdr *)skb->data;
|
2008-07-16 09:44:13 +08:00
|
|
|
if (ieee80211_is_ctl(hdr->frame_control)) {
|
2007-07-27 21:43:22 +08:00
|
|
|
/* TODO: These control frames are not currently sent by
|
2008-09-11 06:01:56 +08:00
|
|
|
* mac80211, but should they be implemented, this function
|
2007-07-27 21:43:22 +08:00
|
|
|
* needs to be updated to support duration field calculation.
|
|
|
|
*
|
|
|
|
* RTS: time needed to transmit pending data/mgmt frame plus
|
|
|
|
* one CTS frame plus one ACK frame plus 3 x SIFS
|
|
|
|
* CTS: duration of immediately previous RTS minus time
|
|
|
|
* required to transmit CTS and its SIFS
|
|
|
|
* ACK: 0 if immediately previous directed data/mgmt had
|
|
|
|
* more=0, with more=1 duration in ACK frame is duration
|
|
|
|
* from previous frame minus time needed to transmit ACK
|
|
|
|
* and its SIFS
|
|
|
|
* PS Poll: BIT(15) | BIT(14) | aid
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* data/mgmt */
|
|
|
|
if (0 /* FIX: data/mgmt during CFP */)
|
2008-06-25 19:36:27 +08:00
|
|
|
return cpu_to_le16(32768);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
|
|
|
if (group_addr) /* Group address as the destination - no ACK */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Individual destination address:
|
|
|
|
* IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes)
|
|
|
|
* CTS and ACK frames shall be transmitted using the highest rate in
|
|
|
|
* basic rate set that is less than or equal to the rate of the
|
|
|
|
* immediately previous frame and that is using the same modulation
|
|
|
|
* (CCK or OFDM). If no basic rate set matches with these requirements,
|
|
|
|
* the highest mandatory rate of the PHY that is less than or equal to
|
|
|
|
* the rate of the previous frame is used.
|
|
|
|
* Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps
|
|
|
|
*/
|
|
|
|
rate = -1;
|
2008-01-25 02:38:38 +08:00
|
|
|
/* use lowest available if everything fails */
|
|
|
|
mrate = sband->bitrates[0].bitrate;
|
|
|
|
for (i = 0; i < sband->n_bitrates; i++) {
|
|
|
|
struct ieee80211_rate *r = &sband->bitrates[i];
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2008-01-25 02:38:38 +08:00
|
|
|
if (r->bitrate > txrate->bitrate)
|
|
|
|
break;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2013-07-08 22:55:53 +08:00
|
|
|
if ((rate_flags & r->flags) != rate_flags)
|
|
|
|
continue;
|
|
|
|
|
2008-10-11 07:51:51 +08:00
|
|
|
if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
|
2013-07-08 22:55:53 +08:00
|
|
|
rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
|
2008-01-25 02:38:38 +08:00
|
|
|
|
|
|
|
switch (sband->band) {
|
2021-10-18 18:00:54 +08:00
|
|
|
case NL80211_BAND_2GHZ:
|
|
|
|
case NL80211_BAND_LC: {
|
2008-01-25 02:38:38 +08:00
|
|
|
u32 flag;
|
|
|
|
if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
|
|
|
|
flag = IEEE80211_RATE_MANDATORY_G;
|
|
|
|
else
|
|
|
|
flag = IEEE80211_RATE_MANDATORY_B;
|
|
|
|
if (r->flags & flag)
|
|
|
|
mrate = r->bitrate;
|
|
|
|
break;
|
|
|
|
}
|
2016-04-12 21:56:15 +08:00
|
|
|
case NL80211_BAND_5GHZ:
|
2019-08-02 19:30:58 +08:00
|
|
|
case NL80211_BAND_6GHZ:
|
2008-01-25 02:38:38 +08:00
|
|
|
if (r->flags & IEEE80211_RATE_MANDATORY_A)
|
|
|
|
mrate = r->bitrate;
|
|
|
|
break;
|
2020-06-02 14:22:47 +08:00
|
|
|
case NL80211_BAND_S1GHZ:
|
2016-04-12 21:56:15 +08:00
|
|
|
case NL80211_BAND_60GHZ:
|
2012-07-02 14:32:32 +08:00
|
|
|
/* TODO, for now fall through */
|
2016-04-12 21:56:15 +08:00
|
|
|
case NUM_NL80211_BANDS:
|
2008-01-25 02:38:38 +08:00
|
|
|
WARN_ON(1);
|
|
|
|
break;
|
|
|
|
}
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
if (rate == -1) {
|
|
|
|
/* No matching basic rate found; use highest suitable mandatory
|
|
|
|
* PHY rate */
|
2013-07-08 22:55:53 +08:00
|
|
|
rate = DIV_ROUND_UP(mrate, 1 << shift);
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2011-11-22 04:34:30 +08:00
|
|
|
/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
|
|
|
|
if (ieee80211_is_data_qos(hdr->frame_control) &&
|
2012-05-28 20:06:25 +08:00
|
|
|
*(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
|
2011-11-22 04:34:30 +08:00
|
|
|
dur = 0;
|
|
|
|
else
|
|
|
|
/* Time needed to transmit ACK
|
|
|
|
* (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
|
|
|
|
* to closest integer */
|
2012-04-11 14:47:56 +08:00
|
|
|
dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
|
2013-07-08 22:55:51 +08:00
|
|
|
tx->sdata->vif.bss_conf.use_short_preamble,
|
|
|
|
shift);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
|
|
|
if (next_frag_len) {
|
|
|
|
/* Frame is fragmented: duration increases with time needed to
|
|
|
|
* transmit next fragment plus ACK and 2 x SIFS. */
|
|
|
|
dur *= 2; /* ACK + SIFS */
|
|
|
|
/* next fragment */
|
2012-04-11 14:47:56 +08:00
|
|
|
dur += ieee80211_frame_duration(sband->band, next_frag_len,
|
2008-01-25 02:38:38 +08:00
|
|
|
txrate->bitrate, erp,
|
2013-07-08 22:55:51 +08:00
|
|
|
tx->sdata->vif.bss_conf.use_short_preamble,
|
|
|
|
shift);
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2008-06-25 19:36:27 +08:00
|
|
|
return cpu_to_le16(dur);
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* tx handlers */
|
2010-01-12 16:42:46 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
|
|
|
ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = tx->local;
|
mac80211: check uapsd state for dynamic power save
To make U-APSD client mode effective, we must not wake up from dynamic power
save when transmitting frames. So if dynamic power save is enabled, it needs
check the queue the transmitted packet is in and decide if we need to wake
up or not.
In a perfect world, where all packets would have correct QoS tags, U-APSD
enabled queues should not trigger wakeup from power save. But in the real
world, where very few packets have correct QoS tags, this won't work. For
example, if only voip class has U-APSD enabled and we send a packet in voip
class, but the packets we receive are in best effort class, we would receive
the packets with the legacy power save method. And that would increase
latencies too much from a voip application point of view.
The workaround is to enable U-APSD for all qeueus and still use dynamic ps
wakeup for all other queues except voip. That way we can still save power
with a voip application and not sacrifice latency. Normal traffic (in
background, best effort or video class) would still trigger wakeup from
dynamic power save.
Signed-off-by: Kalle Valo <kalle.valo@nokia.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2010-01-12 16:42:53 +08:00
|
|
|
struct ieee80211_if_managed *ifmgd;
|
2018-09-05 13:06:14 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2010-01-12 16:42:46 +08:00
|
|
|
|
|
|
|
/* driver doesn't support power save */
|
2015-06-03 03:39:54 +08:00
|
|
|
if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
|
2010-01-12 16:42:46 +08:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
/* hardware does dynamic power save */
|
2015-06-03 03:39:54 +08:00
|
|
|
if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
|
2010-01-12 16:42:46 +08:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
/* dynamic power save disabled */
|
|
|
|
if (local->hw.conf.dynamic_ps_timeout <= 0)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
/* we are scanning, don't enable power save */
|
|
|
|
if (local->scanning)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
if (!local->ps_sdata)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
/* No point if we're going to suspend */
|
|
|
|
if (local->quiescing)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
mac80211: check uapsd state for dynamic power save
To make U-APSD client mode effective, we must not wake up from dynamic power
save when transmitting frames. So if dynamic power save is enabled, it needs
check the queue the transmitted packet is in and decide if we need to wake
up or not.
In a perfect world, where all packets would have correct QoS tags, U-APSD
enabled queues should not trigger wakeup from power save. But in the real
world, where very few packets have correct QoS tags, this won't work. For
example, if only voip class has U-APSD enabled and we send a packet in voip
class, but the packets we receive are in best effort class, we would receive
the packets with the legacy power save method. And that would increase
latencies too much from a voip application point of view.
The workaround is to enable U-APSD for all qeueus and still use dynamic ps
wakeup for all other queues except voip. That way we can still save power
with a voip application and not sacrifice latency. Normal traffic (in
background, best effort or video class) would still trigger wakeup from
dynamic power save.
Signed-off-by: Kalle Valo <kalle.valo@nokia.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2010-01-12 16:42:53 +08:00
|
|
|
/* dynamic ps is supported only in managed mode */
|
|
|
|
if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2018-09-05 13:06:14 +08:00
|
|
|
if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
mac80211: check uapsd state for dynamic power save
To make U-APSD client mode effective, we must not wake up from dynamic power
save when transmitting frames. So if dynamic power save is enabled, it needs
check the queue the transmitted packet is in and decide if we need to wake
up or not.
In a perfect world, where all packets would have correct QoS tags, U-APSD
enabled queues should not trigger wakeup from power save. But in the real
world, where very few packets have correct QoS tags, this won't work. For
example, if only voip class has U-APSD enabled and we send a packet in voip
class, but the packets we receive are in best effort class, we would receive
the packets with the legacy power save method. And that would increase
latencies too much from a voip application point of view.
The workaround is to enable U-APSD for all qeueus and still use dynamic ps
wakeup for all other queues except voip. That way we can still save power
with a voip application and not sacrifice latency. Normal traffic (in
background, best effort or video class) would still trigger wakeup from
dynamic power save.
Signed-off-by: Kalle Valo <kalle.valo@nokia.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2010-01-12 16:42:53 +08:00
|
|
|
ifmgd = &tx->sdata->u.mgd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't wakeup from power save if u-apsd is enabled, voip ac has
|
|
|
|
* u-apsd enabled and the frame is in voip class. This effectively
|
|
|
|
* means that even if all access categories have u-apsd enabled, in
|
|
|
|
* practise u-apsd is only used with the voip ac. This is a
|
|
|
|
* workaround for the case when received voip class packets do not
|
|
|
|
* have correct qos tag for some reason, due the network or the
|
|
|
|
* peer application.
|
|
|
|
*
|
2012-03-14 22:15:03 +08:00
|
|
|
* Note: ifmgd->uapsd_queues access is racy here. If the value is
|
mac80211: check uapsd state for dynamic power save
To make U-APSD client mode effective, we must not wake up from dynamic power
save when transmitting frames. So if dynamic power save is enabled, it needs
check the queue the transmitted packet is in and decide if we need to wake
up or not.
In a perfect world, where all packets would have correct QoS tags, U-APSD
enabled queues should not trigger wakeup from power save. But in the real
world, where very few packets have correct QoS tags, this won't work. For
example, if only voip class has U-APSD enabled and we send a packet in voip
class, but the packets we receive are in best effort class, we would receive
the packets with the legacy power save method. And that would increase
latencies too much from a voip application point of view.
The workaround is to enable U-APSD for all qeueus and still use dynamic ps
wakeup for all other queues except voip. That way we can still save power
with a voip application and not sacrifice latency. Normal traffic (in
background, best effort or video class) would still trigger wakeup from
dynamic power save.
Signed-off-by: Kalle Valo <kalle.valo@nokia.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2010-01-12 16:42:53 +08:00
|
|
|
* changed via debugfs, user needs to reassociate manually to have
|
|
|
|
* everything in sync.
|
|
|
|
*/
|
2012-03-27 20:18:37 +08:00
|
|
|
if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) &&
|
|
|
|
(ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) &&
|
|
|
|
skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
|
mac80211: check uapsd state for dynamic power save
To make U-APSD client mode effective, we must not wake up from dynamic power
save when transmitting frames. So if dynamic power save is enabled, it needs
check the queue the transmitted packet is in and decide if we need to wake
up or not.
In a perfect world, where all packets would have correct QoS tags, U-APSD
enabled queues should not trigger wakeup from power save. But in the real
world, where very few packets have correct QoS tags, this won't work. For
example, if only voip class has U-APSD enabled and we send a packet in voip
class, but the packets we receive are in best effort class, we would receive
the packets with the legacy power save method. And that would increase
latencies too much from a voip application point of view.
The workaround is to enable U-APSD for all qeueus and still use dynamic ps
wakeup for all other queues except voip. That way we can still save power
with a voip application and not sacrifice latency. Normal traffic (in
background, best effort or video class) would still trigger wakeup from
dynamic power save.
Signed-off-by: Kalle Valo <kalle.valo@nokia.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2010-01-12 16:42:53 +08:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2010-01-12 16:42:46 +08:00
|
|
|
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
|
|
|
|
ieee80211_stop_queues_by_reason(&local->hw,
|
2013-02-13 19:25:28 +08:00
|
|
|
IEEE80211_MAX_QUEUE_MAP,
|
2014-06-13 21:30:05 +08:00
|
|
|
IEEE80211_QUEUE_STOP_REASON_PS,
|
|
|
|
false);
|
2011-02-18 19:48:03 +08:00
|
|
|
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
|
2010-01-12 16:42:46 +08:00
|
|
|
ieee80211_queue_work(&local->hw,
|
|
|
|
&local->dynamic_ps_disable_work);
|
|
|
|
}
|
|
|
|
|
2011-05-04 02:40:08 +08:00
|
|
|
/* Don't restart the timer if we're not disassociated */
|
|
|
|
if (!ifmgd->associated)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2010-01-12 16:42:46 +08:00
|
|
|
mod_timer(&local->dynamic_ps_timer, jiffies +
|
|
|
|
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
|
|
|
|
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2008-06-30 21:10:44 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-02-25 23:27:43 +08:00
|
|
|
ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2008-07-16 09:44:13 +08:00
|
|
|
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2011-09-29 22:04:36 +08:00
|
|
|
bool assoc = false;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2008-05-15 18:55:29 +08:00
|
|
|
if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-09-26 23:53:18 +08:00
|
|
|
|
mac80211: Optimize scans on current operating channel.
This should decrease un-necessary flushes, on/off channel work,
and channel changes in cases where the only scanned channel is
the current operating channel.
* Removes SCAN_OFF_CHANNEL flag, uses SDATA_STATE_OFFCHANNEL
and is-scanning flags instead.
* Add helper method to determine if we are currently configured
for the operating channel.
* Do no blindly go off/on channel in work.c Instead, only call
appropriate on/off code when we really need to change channels.
Always enable offchannel-ps mode when starting work,
and disable it when we are done.
* Consolidate ieee80211_offchannel_stop_station and
ieee80211_offchannel_stop_beaconing, call it
ieee80211_offchannel_stop_vifs instead.
* Accept non-beacon frames when scanning on operating channel.
* Scan state machine optimized to minimize on/off channel
transitions. Also, when going on-channel, go ahead and
re-enable beaconing. We're going to be there for 200ms,
so seems like some useful beaconing could happen.
Always enable offchannel-ps mode when starting software
scan, and disable it when we are done.
* Grab local->mtx earlier in __ieee80211_scan_completed_finish
so that we are protected when calling hw_config(), etc.
* Pass probe-responses up the stack if scanning on local
channel, so that mlme can take a look.
Signed-off-by: Ben Greear <greearb@candelatech.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-02-05 03:54:17 +08:00
|
|
|
if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
|
|
|
|
test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
|
2009-03-18 20:06:44 +08:00
|
|
|
!ieee80211_is_probe_req(hdr->frame_control) &&
|
2020-01-14 13:59:40 +08:00
|
|
|
!ieee80211_is_any_nullfunc(hdr->frame_control))
|
2009-03-18 20:06:44 +08:00
|
|
|
/*
|
|
|
|
* When software scanning only nullfunc frames (to notify
|
|
|
|
* the sleep state to the AP) and probe requests (for the
|
|
|
|
* active scan) are allowed, all other frames should not be
|
|
|
|
* sent and we should not get here, but if we do
|
|
|
|
* nonetheless, drop them to avoid sending them
|
|
|
|
* off-channel. See the link below and
|
|
|
|
* ieee80211_start_scan() for more.
|
|
|
|
*
|
|
|
|
* http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
|
|
|
|
*/
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_DROP;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2014-11-03 17:33:19 +08:00
|
|
|
if (tx->sdata->vif.type == NL80211_IFTYPE_OCB)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2008-02-25 23:27:43 +08:00
|
|
|
if (tx->flags & IEEE80211_TX_PS_BUFFERED)
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2011-09-29 22:04:36 +08:00
|
|
|
if (tx->sta)
|
|
|
|
assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2008-02-25 23:27:43 +08:00
|
|
|
if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
|
2011-09-29 22:04:36 +08:00
|
|
|
if (unlikely(!assoc &&
|
2008-07-16 09:44:13 +08:00
|
|
|
ieee80211_is_data(hdr->frame_control))) {
|
2007-07-27 21:43:22 +08:00
|
|
|
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
|
2012-06-22 17:29:50 +08:00
|
|
|
sdata_info(tx->sdata,
|
|
|
|
"dropped data frame to not associated station %pM\n",
|
|
|
|
hdr->addr1);
|
|
|
|
#endif
|
2007-07-27 21:43:22 +08:00
|
|
|
I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_DROP;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
2016-10-11 01:12:21 +08:00
|
|
|
} else if (unlikely(ieee80211_is_data(hdr->frame_control) &&
|
|
|
|
ieee80211_vif_get_num_mcast_if(tx->sdata) == 0)) {
|
2011-12-14 19:20:31 +08:00
|
|
|
/*
|
|
|
|
* No associated STAs - no need to send multicast
|
|
|
|
* frames.
|
|
|
|
*/
|
|
|
|
return TX_DROP;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This function is called whenever the AP is about to exceed the maximum limit
|
|
|
|
* of buffered frames for power saving STAs. This situation should not really
|
|
|
|
* happen often during normal operation, so dropping the oldest buffered packet
|
|
|
|
* from each queue should be OK to make some room for new frames. */
|
|
|
|
static void purge_old_ps_buffers(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
int total = 0, purged = 0;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
2007-09-19 05:29:21 +08:00
|
|
|
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
|
2012-10-11 03:39:50 +08:00
|
|
|
struct ps_data *ps;
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
|
|
|
ps = &sdata->u.ap.ps;
|
2013-01-31 01:14:08 +08:00
|
|
|
else if (ieee80211_vif_is_mesh(&sdata->vif))
|
|
|
|
ps = &sdata->u.mesh.ps;
|
2012-10-11 03:39:50 +08:00
|
|
|
else
|
2007-07-27 21:43:22 +08:00
|
|
|
continue;
|
2012-10-11 03:39:50 +08:00
|
|
|
|
|
|
|
skb = skb_dequeue(&ps->bc_buf);
|
2007-07-27 21:43:22 +08:00
|
|
|
if (skb) {
|
|
|
|
purged++;
|
2016-08-02 17:13:41 +08:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
2012-10-11 03:39:50 +08:00
|
|
|
total += skb_queue_len(&ps->bc_buf);
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2011-09-29 22:04:29 +08:00
|
|
|
/*
|
|
|
|
* Drop one frame from each station from the lowest-priority
|
|
|
|
* AC that has frames at all.
|
|
|
|
*/
|
2008-02-25 23:27:46 +08:00
|
|
|
list_for_each_entry_rcu(sta, &local->sta_list, list) {
|
2011-09-29 22:04:29 +08:00
|
|
|
int ac;
|
|
|
|
|
|
|
|
for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
|
|
|
|
skb = skb_dequeue(&sta->ps_tx_buf[ac]);
|
|
|
|
total += skb_queue_len(&sta->ps_tx_buf[ac]);
|
|
|
|
if (skb) {
|
|
|
|
purged++;
|
2012-10-08 20:39:33 +08:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2011-09-29 22:04:29 +08:00
|
|
|
break;
|
|
|
|
}
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
}
|
2008-02-25 23:27:46 +08:00
|
|
|
|
2007-07-27 21:43:22 +08:00
|
|
|
local->total_ps_buffered = total;
|
2012-06-22 17:29:50 +08:00
|
|
|
ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2008-02-01 02:48:20 +08:00
|
|
|
static ieee80211_tx_result
|
2008-02-25 23:27:43 +08:00
|
|
|
ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2008-07-16 09:44:13 +08:00
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
2012-10-11 03:39:50 +08:00
|
|
|
struct ps_data *ps;
|
2008-05-15 18:55:29 +08:00
|
|
|
|
2007-12-19 08:31:25 +08:00
|
|
|
/*
|
|
|
|
* broadcast/multicast frame
|
|
|
|
*
|
2013-01-31 01:14:08 +08:00
|
|
|
* If any of the associated/peer stations is in power save mode,
|
2007-12-19 08:31:25 +08:00
|
|
|
* the frame is buffered to be sent after DTIM beacon frame.
|
|
|
|
* This is done either by the hardware or us.
|
|
|
|
*/
|
|
|
|
|
2013-01-31 01:14:08 +08:00
|
|
|
/* powersaving STAs currently only in AP/VLAN/mesh mode */
|
2012-10-11 03:39:50 +08:00
|
|
|
if (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
|
|
|
|
tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
|
|
|
|
if (!tx->sdata->bss)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
ps = &tx->sdata->bss->ps;
|
2013-01-31 01:14:08 +08:00
|
|
|
} else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) {
|
|
|
|
ps = &tx->sdata->u.mesh.ps;
|
2012-10-11 03:39:50 +08:00
|
|
|
} else {
|
mac80211: make master netdev handling sane
Currently, almost every interface type has a 'bss' pointer
pointing to BSS information. This BSS information, however,
is for a _local_ BSS, not for the BSS we joined, so having
it on a STA mode interface makes little sense, but now they
have it pointing to the master device, which is an AP mode
virtual interface. However, except for some bitrate control
data, this pointer is only used in AP/VLAN modes (for power
saving stations.)
Overall, it is not necessary to even have the master netdev
be a valid virtual interface, and it doesn't have to be on
the list of interfaces either.
This patch changes the master netdev to be special, it now
- no longer is on the list of virtual interfaces, which
lets me remove a lot of tests for that
- no longer has sub_if_data attached, since that isn't used
Additionally, this patch changes some vlan/ap mode handling
that is related to these 'bss' pointers described above (but
in the VLAN case they actually make sense because there they
point to the AP they belong to); it also adds some debugging
code to IEEE80211_DEV_TO_SUB_IF to validate it is not called
on the master netdev any more.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2008-07-09 20:40:34 +08:00
|
|
|
return TX_CONTINUE;
|
2012-10-11 03:39:50 +08:00
|
|
|
}
|
|
|
|
|
mac80211: make master netdev handling sane
Currently, almost every interface type has a 'bss' pointer
pointing to BSS information. This BSS information, however,
is for a _local_ BSS, not for the BSS we joined, so having
it on a STA mode interface makes little sense, but now they
have it pointing to the master device, which is an AP mode
virtual interface. However, except for some bitrate control
data, this pointer is only used in AP/VLAN modes (for power
saving stations.)
Overall, it is not necessary to even have the master netdev
be a valid virtual interface, and it doesn't have to be on
the list of interfaces either.
This patch changes the master netdev to be special, it now
- no longer is on the list of virtual interfaces, which
lets me remove a lot of tests for that
- no longer has sub_if_data attached, since that isn't used
Additionally, this patch changes some vlan/ap mode handling
that is related to these 'bss' pointers described above (but
in the VLAN case they actually make sense because there they
point to the AP they belong to); it also adds some debugging
code to IEEE80211_DEV_TO_SUB_IF to validate it is not called
on the master netdev any more.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2008-07-09 20:40:34 +08:00
|
|
|
|
|
|
|
/* no buffering for ordered frames */
|
2008-07-16 09:44:13 +08:00
|
|
|
if (ieee80211_has_order(hdr->frame_control))
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-12-19 08:31:25 +08:00
|
|
|
|
2014-07-07 18:01:11 +08:00
|
|
|
if (ieee80211_is_probe_req(hdr->frame_control))
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2015-06-03 03:39:54 +08:00
|
|
|
if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
|
2013-05-28 23:24:15 +08:00
|
|
|
info->hw_queue = tx->sdata->vif.cab_queue;
|
|
|
|
|
2018-11-29 05:39:16 +08:00
|
|
|
/* no stations in PS mode and no buffered packets */
|
|
|
|
if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-12-19 08:31:25 +08:00
|
|
|
|
2009-10-29 19:19:21 +08:00
|
|
|
info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
|
2009-08-10 22:04:15 +08:00
|
|
|
|
2009-10-29 19:19:21 +08:00
|
|
|
/* device releases frame after DTIM beacon */
|
2015-06-03 03:39:54 +08:00
|
|
|
if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING))
|
2009-08-10 22:04:15 +08:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2007-12-19 08:31:25 +08:00
|
|
|
/* buffered in mac80211 */
|
2009-08-10 22:04:15 +08:00
|
|
|
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
|
|
|
|
purge_old_ps_buffers(tx->local);
|
|
|
|
|
2012-10-11 03:39:50 +08:00
|
|
|
if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
|
2012-06-22 17:29:50 +08:00
|
|
|
ps_dbg(tx->sdata,
|
|
|
|
"BC TX buffer full - dropping the oldest frame\n");
|
2016-08-02 17:13:41 +08:00
|
|
|
ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
|
2009-08-10 22:04:15 +08:00
|
|
|
} else
|
|
|
|
tx->local->total_ps_buffered++;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2012-10-11 03:39:50 +08:00
|
|
|
skb_queue_tail(&ps->bc_buf, tx->skb);
|
2007-12-19 08:31:25 +08:00
|
|
|
|
2009-08-10 22:04:15 +08:00
|
|
|
return TX_QUEUED;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2009-01-08 19:32:00 +08:00
|
|
|
static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
if (!ieee80211_is_mgmt(fc))
|
|
|
|
return 0;
|
|
|
|
|
2011-09-29 22:04:36 +08:00
|
|
|
if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
|
2009-01-08 19:32:00 +08:00
|
|
|
return 0;
|
|
|
|
|
2014-01-23 23:20:29 +08:00
|
|
|
if (!ieee80211_is_robust_mgmt_frame(skb))
|
2009-01-08 19:32:00 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-02-01 02:48:20 +08:00
|
|
|
static ieee80211_tx_result
|
2008-02-25 23:27:43 +08:00
|
|
|
ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
|
|
|
struct sta_info *sta = tx->sta;
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2014-07-07 18:01:11 +08:00
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
2010-04-19 15:12:52 +08:00
|
|
|
struct ieee80211_local *local = tx->local;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2012-02-27 19:18:30 +08:00
|
|
|
if (unlikely(!sta))
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2011-09-29 22:04:36 +08:00
|
|
|
if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
|
mac80211: fix station/driver powersave race
It is currently possible to have a race due to the station PS
unblock work like this:
* station goes to sleep with frames buffered in the driver
* driver blocks wakeup
* station wakes up again
* driver flushes/returns frames, and unblocks, which schedules
the unblock work
* unblock work starts to run, and checks that the station is
awake (i.e. that the WLAN_STA_PS_STA flag isn't set)
* we process a received frame with PM=1, setting the flag again
* ieee80211_sta_ps_deliver_wakeup() runs, delivering all frames
to the driver, and then clearing the WLAN_STA_PS_DRIVER and
WLAN_STA_PS_STA flags
In this scenario, mac80211 will think that the station is awake,
while it really is asleep, and any TX'ed frames should be filtered
by the device (it will know that the station is sleeping) but then
passed to mac80211 again, which will not buffer it either as it
thinks the station is awake, and eventually the packets will be
dropped.
Fix this by moving the clearing of the flags to exactly where we
learn about the situation. This creates a problem of reordering,
so introduce another flag indicating that delivery is being done,
this new flag also queues frames and is cleared only while the
spinlock is held (which the queuing code also holds) so that any
concurrent delivery/TX is handled correctly.
Reported-by: Andrei Otcheretianski <andrei.otcheretianski@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2014-05-27 22:32:27 +08:00
|
|
|
test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
|
|
|
|
test_sta_flag(sta, WLAN_STA_PS_DELIVER)) &&
|
2012-02-27 19:18:30 +08:00
|
|
|
!(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
|
2011-09-29 22:04:29 +08:00
|
|
|
int ac = skb_get_queue_mapping(tx->skb);
|
|
|
|
|
2014-07-07 18:01:11 +08:00
|
|
|
if (ieee80211_is_mgmt(hdr->frame_control) &&
|
|
|
|
!ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
|
|
|
|
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2012-06-22 17:29:50 +08:00
|
|
|
ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
|
|
|
|
sta->sta.addr, sta->sta.aid, ac);
|
2007-07-27 21:43:22 +08:00
|
|
|
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
|
|
|
|
purge_old_ps_buffers(tx->local);
|
mac80211: fix AP powersave TX vs. wakeup race
There is a race between the TX path and the STA wakeup: while
a station is sleeping, mac80211 buffers frames until it wakes
up, then the frames are transmitted. However, the RX and TX
path are concurrent, so the packet indicating wakeup can be
processed while a packet is being transmitted.
This can lead to a situation where the buffered frames list
is emptied on the one side, while a frame is being added on
the other side, as the station is still seen as sleeping in
the TX path.
As a result, the newly added frame will not be send anytime
soon. It might be sent much later (and out of order) when the
station goes to sleep and wakes up the next time.
Additionally, it can lead to the crash below.
Fix all this by synchronising both paths with a new lock.
Both path are not fastpath since they handle PS situations.
In a later patch we'll remove the extra skb queue locks to
reduce locking overhead.
BUG: unable to handle kernel
NULL pointer dereference at 000000b0
IP: [<ff6f1791>] ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
*pde = 00000000
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
EIP: 0060:[<ff6f1791>] EFLAGS: 00210282 CPU: 1
EIP is at ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
EAX: e5900da0 EBX: 00000000 ECX: 00000001 EDX: 00000000
ESI: e41d00c0 EDI: e5900da0 EBP: ebe458e4 ESP: ebe458b0
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
CR0: 8005003b CR2: 000000b0 CR3: 25a78000 CR4: 000407d0
DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
DR6: ffff0ff0 DR7: 00000400
Process iperf (pid: 3934, ti=ebe44000 task=e757c0b0 task.ti=ebe44000)
iwlwifi 0000:02:00.0: I iwl_pcie_enqueue_hcmd Sending command LQ_CMD (#4e), seq: 0x0903, 92 bytes at 3[3]:9
Stack:
e403b32c ebe458c4 00200002 00200286 e403b338 ebe458cc c10960bb e5900da0
ff76a6ec ebe458d8 00000000 e41d00c0 e5900da0 ebe458f0 ff6f1b75 e403b210
ebe4598c ff723dc1 00000000 ff76a6ec e597c978 e403b758 00000002 00000002
Call Trace:
[<ff6f1b75>] ieee80211_free_txskb+0x15/0x20 [mac80211]
[<ff723dc1>] invoke_tx_handlers+0x1661/0x1780 [mac80211]
[<ff7248a5>] ieee80211_tx+0x75/0x100 [mac80211]
[<ff7249bf>] ieee80211_xmit+0x8f/0xc0 [mac80211]
[<ff72550e>] ieee80211_subif_start_xmit+0x4fe/0xe20 [mac80211]
[<c149ef70>] dev_hard_start_xmit+0x450/0x950
[<c14b9aa9>] sch_direct_xmit+0xa9/0x250
[<c14b9c9b>] __qdisc_run+0x4b/0x150
[<c149f732>] dev_queue_xmit+0x2c2/0xca0
Cc: stable@vger.kernel.org
Reported-by: Yaara Rozenblum <yaara.rozenblum@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Reviewed-by: Stanislaw Gruszka <sgruszka@redhat.com>
[reword commit log, use a separate lock]
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2014-02-20 15:22:11 +08:00
|
|
|
|
|
|
|
/* sync with ieee80211_sta_ps_deliver_wakeup */
|
|
|
|
spin_lock(&sta->ps_lock);
|
|
|
|
/*
|
|
|
|
* STA woke up the meantime and all the frames on ps_tx_buf have
|
|
|
|
* been queued to pending queue. No reordering can happen, go
|
|
|
|
* ahead and Tx the packet.
|
|
|
|
*/
|
|
|
|
if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
|
mac80211: fix station/driver powersave race
It is currently possible to have a race due to the station PS
unblock work like this:
* station goes to sleep with frames buffered in the driver
* driver blocks wakeup
* station wakes up again
* driver flushes/returns frames, and unblocks, which schedules
the unblock work
* unblock work starts to run, and checks that the station is
awake (i.e. that the WLAN_STA_PS_STA flag isn't set)
* we process a received frame with PM=1, setting the flag again
* ieee80211_sta_ps_deliver_wakeup() runs, delivering all frames
to the driver, and then clearing the WLAN_STA_PS_DRIVER and
WLAN_STA_PS_STA flags
In this scenario, mac80211 will think that the station is awake,
while it really is asleep, and any TX'ed frames should be filtered
by the device (it will know that the station is sleeping) but then
passed to mac80211 again, which will not buffer it either as it
thinks the station is awake, and eventually the packets will be
dropped.
Fix this by moving the clearing of the flags to exactly where we
learn about the situation. This creates a problem of reordering,
so introduce another flag indicating that delivery is being done,
this new flag also queues frames and is cleared only while the
spinlock is held (which the queuing code also holds) so that any
concurrent delivery/TX is handled correctly.
Reported-by: Andrei Otcheretianski <andrei.otcheretianski@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2014-05-27 22:32:27 +08:00
|
|
|
!test_sta_flag(sta, WLAN_STA_PS_DRIVER) &&
|
|
|
|
!test_sta_flag(sta, WLAN_STA_PS_DELIVER)) {
|
mac80211: fix AP powersave TX vs. wakeup race
There is a race between the TX path and the STA wakeup: while
a station is sleeping, mac80211 buffers frames until it wakes
up, then the frames are transmitted. However, the RX and TX
path are concurrent, so the packet indicating wakeup can be
processed while a packet is being transmitted.
This can lead to a situation where the buffered frames list
is emptied on the one side, while a frame is being added on
the other side, as the station is still seen as sleeping in
the TX path.
As a result, the newly added frame will not be send anytime
soon. It might be sent much later (and out of order) when the
station goes to sleep and wakes up the next time.
Additionally, it can lead to the crash below.
Fix all this by synchronising both paths with a new lock.
Both path are not fastpath since they handle PS situations.
In a later patch we'll remove the extra skb queue locks to
reduce locking overhead.
BUG: unable to handle kernel
NULL pointer dereference at 000000b0
IP: [<ff6f1791>] ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
*pde = 00000000
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
EIP: 0060:[<ff6f1791>] EFLAGS: 00210282 CPU: 1
EIP is at ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
EAX: e5900da0 EBX: 00000000 ECX: 00000001 EDX: 00000000
ESI: e41d00c0 EDI: e5900da0 EBP: ebe458e4 ESP: ebe458b0
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
CR0: 8005003b CR2: 000000b0 CR3: 25a78000 CR4: 000407d0
DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
DR6: ffff0ff0 DR7: 00000400
Process iperf (pid: 3934, ti=ebe44000 task=e757c0b0 task.ti=ebe44000)
iwlwifi 0000:02:00.0: I iwl_pcie_enqueue_hcmd Sending command LQ_CMD (#4e), seq: 0x0903, 92 bytes at 3[3]:9
Stack:
e403b32c ebe458c4 00200002 00200286 e403b338 ebe458cc c10960bb e5900da0
ff76a6ec ebe458d8 00000000 e41d00c0 e5900da0 ebe458f0 ff6f1b75 e403b210
ebe4598c ff723dc1 00000000 ff76a6ec e597c978 e403b758 00000002 00000002
Call Trace:
[<ff6f1b75>] ieee80211_free_txskb+0x15/0x20 [mac80211]
[<ff723dc1>] invoke_tx_handlers+0x1661/0x1780 [mac80211]
[<ff7248a5>] ieee80211_tx+0x75/0x100 [mac80211]
[<ff7249bf>] ieee80211_xmit+0x8f/0xc0 [mac80211]
[<ff72550e>] ieee80211_subif_start_xmit+0x4fe/0xe20 [mac80211]
[<c149ef70>] dev_hard_start_xmit+0x450/0x950
[<c14b9aa9>] sch_direct_xmit+0xa9/0x250
[<c14b9c9b>] __qdisc_run+0x4b/0x150
[<c149f732>] dev_queue_xmit+0x2c2/0xca0
Cc: stable@vger.kernel.org
Reported-by: Yaara Rozenblum <yaara.rozenblum@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Reviewed-by: Stanislaw Gruszka <sgruszka@redhat.com>
[reword commit log, use a separate lock]
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2014-02-20 15:22:11 +08:00
|
|
|
spin_unlock(&sta->ps_lock);
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2011-09-29 22:04:29 +08:00
|
|
|
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
|
|
|
|
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
|
2012-06-22 17:29:50 +08:00
|
|
|
ps_dbg(tx->sdata,
|
|
|
|
"STA %pM TX buffer for AC %d full - dropping oldest frame\n",
|
|
|
|
sta->sta.addr, ac);
|
2012-10-08 20:39:33 +08:00
|
|
|
ieee80211_free_txskb(&local->hw, old);
|
2007-07-27 21:43:22 +08:00
|
|
|
} else
|
|
|
|
tx->local->total_ps_buffered++;
|
2008-02-20 18:21:35 +08:00
|
|
|
|
2008-05-15 18:55:29 +08:00
|
|
|
info->control.jiffies = jiffies;
|
2009-07-14 06:33:34 +08:00
|
|
|
info->control.vif = &tx->sdata->vif;
|
2020-09-08 20:36:57 +08:00
|
|
|
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
|
2014-01-09 08:45:28 +08:00
|
|
|
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
|
2011-09-29 22:04:29 +08:00
|
|
|
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
|
mac80211: fix AP powersave TX vs. wakeup race
There is a race between the TX path and the STA wakeup: while
a station is sleeping, mac80211 buffers frames until it wakes
up, then the frames are transmitted. However, the RX and TX
path are concurrent, so the packet indicating wakeup can be
processed while a packet is being transmitted.
This can lead to a situation where the buffered frames list
is emptied on the one side, while a frame is being added on
the other side, as the station is still seen as sleeping in
the TX path.
As a result, the newly added frame will not be send anytime
soon. It might be sent much later (and out of order) when the
station goes to sleep and wakes up the next time.
Additionally, it can lead to the crash below.
Fix all this by synchronising both paths with a new lock.
Both path are not fastpath since they handle PS situations.
In a later patch we'll remove the extra skb queue locks to
reduce locking overhead.
BUG: unable to handle kernel
NULL pointer dereference at 000000b0
IP: [<ff6f1791>] ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
*pde = 00000000
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
EIP: 0060:[<ff6f1791>] EFLAGS: 00210282 CPU: 1
EIP is at ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
EAX: e5900da0 EBX: 00000000 ECX: 00000001 EDX: 00000000
ESI: e41d00c0 EDI: e5900da0 EBP: ebe458e4 ESP: ebe458b0
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
CR0: 8005003b CR2: 000000b0 CR3: 25a78000 CR4: 000407d0
DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
DR6: ffff0ff0 DR7: 00000400
Process iperf (pid: 3934, ti=ebe44000 task=e757c0b0 task.ti=ebe44000)
iwlwifi 0000:02:00.0: I iwl_pcie_enqueue_hcmd Sending command LQ_CMD (#4e), seq: 0x0903, 92 bytes at 3[3]:9
Stack:
e403b32c ebe458c4 00200002 00200286 e403b338 ebe458cc c10960bb e5900da0
ff76a6ec ebe458d8 00000000 e41d00c0 e5900da0 ebe458f0 ff6f1b75 e403b210
ebe4598c ff723dc1 00000000 ff76a6ec e597c978 e403b758 00000002 00000002
Call Trace:
[<ff6f1b75>] ieee80211_free_txskb+0x15/0x20 [mac80211]
[<ff723dc1>] invoke_tx_handlers+0x1661/0x1780 [mac80211]
[<ff7248a5>] ieee80211_tx+0x75/0x100 [mac80211]
[<ff7249bf>] ieee80211_xmit+0x8f/0xc0 [mac80211]
[<ff72550e>] ieee80211_subif_start_xmit+0x4fe/0xe20 [mac80211]
[<c149ef70>] dev_hard_start_xmit+0x450/0x950
[<c14b9aa9>] sch_direct_xmit+0xa9/0x250
[<c14b9c9b>] __qdisc_run+0x4b/0x150
[<c149f732>] dev_queue_xmit+0x2c2/0xca0
Cc: stable@vger.kernel.org
Reported-by: Yaara Rozenblum <yaara.rozenblum@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Reviewed-by: Stanislaw Gruszka <sgruszka@redhat.com>
[reword commit log, use a separate lock]
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2014-02-20 15:22:11 +08:00
|
|
|
spin_unlock(&sta->ps_lock);
|
2010-04-19 15:12:52 +08:00
|
|
|
|
|
|
|
if (!timer_pending(&local->sta_cleanup))
|
|
|
|
mod_timer(&local->sta_cleanup,
|
|
|
|
round_jiffies(jiffies +
|
|
|
|
STA_INFO_CLEANUP_INTERVAL));
|
|
|
|
|
2011-09-29 22:04:27 +08:00
|
|
|
/*
|
|
|
|
* We queued up some frames, so the TIM bit might
|
|
|
|
* need to be set, recalculate it.
|
|
|
|
*/
|
|
|
|
sta_info_recalc_tim(sta);
|
|
|
|
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_QUEUED;
|
2012-06-22 17:29:50 +08:00
|
|
|
} else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
|
|
|
|
ps_dbg(tx->sdata,
|
|
|
|
"STA %pM in PS mode, but polling/in SP -> send frame\n",
|
|
|
|
sta->sta.addr);
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2008-06-30 21:10:44 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-02-25 23:27:43 +08:00
|
|
|
ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2008-02-25 23:27:43 +08:00
|
|
|
if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2008-02-25 23:27:43 +08:00
|
|
|
if (tx->flags & IEEE80211_TX_UNICAST)
|
2007-07-27 21:43:22 +08:00
|
|
|
return ieee80211_tx_h_unicast_ps_buf(tx);
|
|
|
|
else
|
|
|
|
return ieee80211_tx_h_multicast_ps_buf(tx);
|
|
|
|
}
|
|
|
|
|
2010-08-27 19:26:54 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
|
|
|
ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
|
|
|
2013-07-03 00:09:12 +08:00
|
|
|
if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) {
|
|
|
|
if (tx->sdata->control_port_no_encrypt)
|
|
|
|
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
|
|
|
info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
|
mac80211: Send EAPOL frames at lowest rate
The current minstrel_ht rate control behavior is somewhat optimistic in
trying to find optimum TX rate. While this is usually fine for normal
Data frames, there are cases where a more conservative set of retry
parameters would be beneficial to make the connection more robust.
EAPOL frames are critical to the authentication and especially the
EAPOL-Key message 4/4 (the last message in the 4-way handshake) is
important to get through to the AP. If that message is lost, the only
recovery mechanism in many cases is to reassociate with the AP and start
from scratch. This can often be avoided by trying to send the frame with
more conservative rate and/or with more link layer retries.
In most cases, minstrel_ht is currently using the initial EAPOL-Key
frames for probing higher rates and this results in only five link layer
transmission attempts (one at high(ish) MCS and four at MCS0). While
this works with most APs, it looks like there are some deployed APs that
may have issues with the EAPOL frames using HT MCS immediately after
association. Similarly, there may be issues in cases where the signal
strength or radio environment is not good enough to be able to get
frames through even at couple of MCS 0 tries.
The best approach for this would likely to be to reduce the TX rate for
the last rate (3rd rate parameter in the set) to a low basic rate (say,
6 Mbps on 5 GHz and 2 or 5.5 Mbps on 2.4 GHz), but doing that cleanly
requires some more effort. For now, we can start with a simple one-liner
that forces the minimum rate to be used for EAPOL frames similarly how
the TX rate is selected for the IEEE 802.11 Management frames. This does
result in a small extra latency added to the cases where the AP would be
able to receive the higher rate, but taken into account how small number
of EAPOL frames are used, this is likely to be insignificant. A future
optimization in the minstrel_ht design can also allow this patch to be
reverted to get back to the more optimized initial TX rate.
It should also be noted that many drivers that do not use minstrel as
the rate control algorithm are already doing similar workarounds by
forcing the lowest TX rate to be used for EAPOL frames.
Cc: stable@vger.kernel.org
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Tested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Jouni Malinen <jouni@qca.qualcomm.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2015-02-26 21:50:50 +08:00
|
|
|
info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
|
2013-07-03 00:09:12 +08:00
|
|
|
}
|
2010-08-27 19:26:54 +08:00
|
|
|
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2008-06-30 21:10:44 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-02-25 23:27:43 +08:00
|
|
|
ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2012-07-05 00:10:07 +08:00
|
|
|
struct ieee80211_key *key;
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2008-07-16 09:44:13 +08:00
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
[MAC80211]: fix race conditions with keys
During receive processing, we select the key long before using it and
because there's no locking it is possible that we kfree() the key
after having selected it but before using it for crypto operations.
Obviously, this is bad.
Secondly, during transmit processing, there are two possible races: We
have a similar race between select_key() and using it for encryption,
but we also have a race here between select_key() and hardware
encryption (both when a key is removed.)
This patch solves these issues by using RCU: when a key is to be freed,
we first remove the pointer from the appropriate places (sdata->keys,
sdata->default_key, sta->key) using rcu_assign_pointer() and then
synchronize_rcu(). Then, we can safely kfree() the key and remove it
from the hardware. There's a window here where the hardware may still
be using it for decryption, but we can't work around that without having
two hardware callbacks, one to disable the key for RX and one to disable
it for TX; but the worst thing that will happen is that we receive a
packet decrypted that we don't find a key for any more and then drop it.
When we add a key, we first need to upload it to the hardware and then,
using rcu_assign_pointer() again, link it into our structures.
In the code using keys (TX/RX paths) we use rcu_dereference() to get the
key and enclose the whole tx/rx section in a rcu_read_lock() ...
rcu_read_unlock() block. Because we've uploaded the key to hardware
before linking it into internal structures, we can guarantee that it is
valid once get to into tx().
One possible race condition remains, however: when we have hardware
acceleration enabled and the driver shuts down the queues, we end up
queueing the frame. If now somebody removes the key, the key will be
removed from hwaccel and then then driver will be asked to encrypt the
frame with a key index that has been removed. Hence, drivers will need
to be aware that the hw_key_index they are passed might not be under
all circumstances. Most drivers will, however, simply ignore that
condition and encrypt the frame with the selected key anyway, this
only results in a frame being encrypted with a wrong key or dropped
(rightfully) because the key was not valid. There isn't much we can
do about it unless we want to walk the pending frame queue every time
a key is removed and remove all frames that used it.
This race condition, however, will most likely be solved once we add
multiqueue support to mac80211 because then frames will be queued
further up the stack instead of after being processed.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Acked-by: Michael Wu <flamingice@sourmilk.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-14 23:10:24 +08:00
|
|
|
|
2020-03-26 21:09:42 +08:00
|
|
|
if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
|
2007-07-27 21:43:22 +08:00
|
|
|
tx->key = NULL;
|
2020-03-26 21:09:42 +08:00
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx->sta &&
|
|
|
|
(key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
|
[MAC80211]: fix race conditions with keys
During receive processing, we select the key long before using it and
because there's no locking it is possible that we kfree() the key
after having selected it but before using it for crypto operations.
Obviously, this is bad.
Secondly, during transmit processing, there are two possible races: We
have a similar race between select_key() and using it for encryption,
but we also have a race here between select_key() and hardware
encryption (both when a key is removed.)
This patch solves these issues by using RCU: when a key is to be freed,
we first remove the pointer from the appropriate places (sdata->keys,
sdata->default_key, sta->key) using rcu_assign_pointer() and then
synchronize_rcu(). Then, we can safely kfree() the key and remove it
from the hardware. There's a window here where the hardware may still
be using it for decryption, but we can't work around that without having
two hardware callbacks, one to disable the key for RX and one to disable
it for TX; but the worst thing that will happen is that we receive a
packet decrypted that we don't find a key for any more and then drop it.
When we add a key, we first need to upload it to the hardware and then,
using rcu_assign_pointer() again, link it into our structures.
In the code using keys (TX/RX paths) we use rcu_dereference() to get the
key and enclose the whole tx/rx section in a rcu_read_lock() ...
rcu_read_unlock() block. Because we've uploaded the key to hardware
before linking it into internal structures, we can guarantee that it is
valid once get to into tx().
One possible race condition remains, however: when we have hardware
acceleration enabled and the driver shuts down the queues, we end up
queueing the frame. If now somebody removes the key, the key will be
removed from hwaccel and then then driver will be asked to encrypt the
frame with a key index that has been removed. Hence, drivers will need
to be aware that the hw_key_index they are passed might not be under
all circumstances. Most drivers will, however, simply ignore that
condition and encrypt the frame with the selected key anyway, this
only results in a frame being encrypted with a wrong key or dropped
(rightfully) because the key was not valid. There isn't much we can
do about it unless we want to walk the pending frame queue every time
a key is removed and remove all frames that used it.
This race condition, however, will most likely be solved once we add
multiqueue support to mac80211 because then frames will be queued
further up the stack instead of after being processed.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Acked-by: Michael Wu <flamingice@sourmilk.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-14 23:10:24 +08:00
|
|
|
tx->key = key;
|
2016-06-22 18:55:20 +08:00
|
|
|
else if (ieee80211_is_group_privacy_action(tx->skb) &&
|
|
|
|
(key = rcu_dereference(tx->sdata->default_multicast_key)))
|
|
|
|
tx->key = key;
|
2009-01-08 19:32:02 +08:00
|
|
|
else if (ieee80211_is_mgmt(hdr->frame_control) &&
|
2010-03-30 14:35:23 +08:00
|
|
|
is_multicast_ether_addr(hdr->addr1) &&
|
2014-01-23 23:20:29 +08:00
|
|
|
ieee80211_is_robust_mgmt_frame(tx->skb) &&
|
2009-01-08 19:32:02 +08:00
|
|
|
(key = rcu_dereference(tx->sdata->default_mgmt_key)))
|
|
|
|
tx->key = key;
|
2010-12-10 02:49:02 +08:00
|
|
|
else if (is_multicast_ether_addr(hdr->addr1) &&
|
|
|
|
(key = rcu_dereference(tx->sdata->default_multicast_key)))
|
|
|
|
tx->key = key;
|
|
|
|
else if (!is_multicast_ether_addr(hdr->addr1) &&
|
|
|
|
(key = rcu_dereference(tx->sdata->default_unicast_key)))
|
[MAC80211]: fix race conditions with keys
During receive processing, we select the key long before using it and
because there's no locking it is possible that we kfree() the key
after having selected it but before using it for crypto operations.
Obviously, this is bad.
Secondly, during transmit processing, there are two possible races: We
have a similar race between select_key() and using it for encryption,
but we also have a race here between select_key() and hardware
encryption (both when a key is removed.)
This patch solves these issues by using RCU: when a key is to be freed,
we first remove the pointer from the appropriate places (sdata->keys,
sdata->default_key, sta->key) using rcu_assign_pointer() and then
synchronize_rcu(). Then, we can safely kfree() the key and remove it
from the hardware. There's a window here where the hardware may still
be using it for decryption, but we can't work around that without having
two hardware callbacks, one to disable the key for RX and one to disable
it for TX; but the worst thing that will happen is that we receive a
packet decrypted that we don't find a key for any more and then drop it.
When we add a key, we first need to upload it to the hardware and then,
using rcu_assign_pointer() again, link it into our structures.
In the code using keys (TX/RX paths) we use rcu_dereference() to get the
key and enclose the whole tx/rx section in a rcu_read_lock() ...
rcu_read_unlock() block. Because we've uploaded the key to hardware
before linking it into internal structures, we can guarantee that it is
valid once get to into tx().
One possible race condition remains, however: when we have hardware
acceleration enabled and the driver shuts down the queues, we end up
queueing the frame. If now somebody removes the key, the key will be
removed from hwaccel and then then driver will be asked to encrypt the
frame with a key index that has been removed. Hence, drivers will need
to be aware that the hw_key_index they are passed might not be under
all circumstances. Most drivers will, however, simply ignore that
condition and encrypt the frame with the selected key anyway, this
only results in a frame being encrypted with a wrong key or dropped
(rightfully) because the key was not valid. There isn't much we can
do about it unless we want to walk the pending frame queue every time
a key is removed and remove all frames that used it.
This race condition, however, will most likely be solved once we add
multiqueue support to mac80211 because then frames will be queued
further up the stack instead of after being processed.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Acked-by: Michael Wu <flamingice@sourmilk.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-14 23:10:24 +08:00
|
|
|
tx->key = key;
|
2015-03-20 18:37:36 +08:00
|
|
|
else
|
2012-07-05 00:10:08 +08:00
|
|
|
tx->key = NULL;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
|
|
|
if (tx->key) {
|
2010-01-17 08:47:58 +08:00
|
|
|
bool skip_hw = false;
|
|
|
|
|
2007-09-17 13:29:25 +08:00
|
|
|
/* TODO: add threshold stuff again */
|
2007-12-18 22:27:47 +08:00
|
|
|
|
2010-08-10 15:46:38 +08:00
|
|
|
switch (tx->key->conf.cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_WEP40:
|
|
|
|
case WLAN_CIPHER_SUITE_WEP104:
|
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
2008-07-16 09:44:13 +08:00
|
|
|
if (!ieee80211_is_data_present(hdr->frame_control))
|
2007-12-18 22:27:47 +08:00
|
|
|
tx->key = NULL;
|
|
|
|
break;
|
2010-08-10 15:46:38 +08:00
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
2015-01-25 01:52:07 +08:00
|
|
|
case WLAN_CIPHER_SUITE_CCMP_256:
|
2015-01-25 01:52:06 +08:00
|
|
|
case WLAN_CIPHER_SUITE_GCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP_256:
|
2009-01-08 19:32:00 +08:00
|
|
|
if (!ieee80211_is_data_present(hdr->frame_control) &&
|
|
|
|
!ieee80211_use_mfp(hdr->frame_control, tx->sta,
|
2016-06-22 18:55:20 +08:00
|
|
|
tx->skb) &&
|
|
|
|
!ieee80211_is_group_privacy_action(tx->skb))
|
2009-01-08 19:32:00 +08:00
|
|
|
tx->key = NULL;
|
2010-01-24 02:27:14 +08:00
|
|
|
else
|
|
|
|
skip_hw = (tx->key->conf.flags &
|
2012-09-04 23:08:23 +08:00
|
|
|
IEEE80211_KEY_FLAG_SW_MGMT_TX) &&
|
2010-01-24 02:27:14 +08:00
|
|
|
ieee80211_is_mgmt(hdr->frame_control);
|
2009-01-08 19:32:00 +08:00
|
|
|
break;
|
2010-08-10 15:46:38 +08:00
|
|
|
case WLAN_CIPHER_SUITE_AES_CMAC:
|
2015-01-25 01:52:08 +08:00
|
|
|
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
|
2015-01-25 01:52:09 +08:00
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
2009-01-08 19:32:02 +08:00
|
|
|
if (!ieee80211_is_mgmt(hdr->frame_control))
|
|
|
|
tx->key = NULL;
|
|
|
|
break;
|
2007-12-18 22:27:47 +08:00
|
|
|
}
|
2010-01-17 08:47:58 +08:00
|
|
|
|
2013-01-29 18:41:38 +08:00
|
|
|
if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
|
|
|
|
!ieee80211_is_deauth(hdr->frame_control)))
|
2011-07-12 18:30:59 +08:00
|
|
|
return TX_DROP;
|
|
|
|
|
2010-01-23 05:07:59 +08:00
|
|
|
if (!skip_hw && tx->key &&
|
2010-01-25 18:36:16 +08:00
|
|
|
tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
|
2010-01-17 08:47:58 +08:00
|
|
|
info->control.hw_key = &tx->key->conf;
|
2020-12-19 03:15:25 +08:00
|
|
|
} else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta &&
|
2020-03-26 21:09:42 +08:00
|
|
|
test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
|
|
|
|
return TX_DROP;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2008-06-30 21:10:44 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-02-25 23:27:43 +08:00
|
|
|
ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
2008-10-21 18:40:02 +08:00
|
|
|
struct ieee80211_hdr *hdr = (void *)tx->skb->data;
|
|
|
|
struct ieee80211_supported_band *sband;
|
2010-04-28 02:15:12 +08:00
|
|
|
u32 len;
|
2008-10-21 18:40:02 +08:00
|
|
|
struct ieee80211_tx_rate_control txrc;
|
2013-04-22 22:14:41 +08:00
|
|
|
struct ieee80211_sta_rates *ratetbl = NULL;
|
2021-06-18 00:31:13 +08:00
|
|
|
bool encap = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
|
2011-09-29 22:04:36 +08:00
|
|
|
bool assoc = false;
|
2008-01-25 02:38:38 +08:00
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
memset(&txrc, 0, sizeof(txrc));
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2012-07-23 21:12:51 +08:00
|
|
|
sband = tx->local->hw.wiphy->bands[info->band];
|
2007-09-26 23:53:18 +08:00
|
|
|
|
2010-04-28 02:15:12 +08:00
|
|
|
len = min_t(u32, tx->skb->len + FCS_LEN,
|
2009-04-21 00:39:05 +08:00
|
|
|
tx->local->hw.wiphy->frag_threshold);
|
2008-10-21 18:40:02 +08:00
|
|
|
|
|
|
|
/* set up the tx rate control struct we give the RC algo */
|
2012-02-26 18:24:35 +08:00
|
|
|
txrc.hw = &tx->local->hw;
|
2008-10-21 18:40:02 +08:00
|
|
|
txrc.sband = sband;
|
|
|
|
txrc.bss_conf = &tx->sdata->vif.bss_conf;
|
|
|
|
txrc.skb = tx->skb;
|
|
|
|
txrc.reported_rate.idx = -1;
|
2012-07-23 21:12:51 +08:00
|
|
|
txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
|
2013-04-16 19:38:42 +08:00
|
|
|
|
|
|
|
if (tx->sdata->rc_has_mcs_mask[info->band])
|
|
|
|
txrc.rate_idx_mcs_mask =
|
|
|
|
tx->sdata->rc_rateidx_mcs_mask[info->band];
|
|
|
|
|
2010-11-11 22:07:23 +08:00
|
|
|
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
|
2011-11-25 09:15:20 +08:00
|
|
|
tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
|
2015-08-05 22:02:28 +08:00
|
|
|
tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
|
|
|
|
tx->sdata->vif.type == NL80211_IFTYPE_OCB);
|
2008-10-21 18:40:02 +08:00
|
|
|
|
|
|
|
/* set up RTS protection if desired */
|
2009-04-21 00:39:05 +08:00
|
|
|
if (len > tx->local->hw.wiphy->rts_threshold) {
|
2013-04-22 22:14:41 +08:00
|
|
|
txrc.rts = true;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2013-04-22 22:14:41 +08:00
|
|
|
info->control.use_rts = txrc.rts;
|
2013-04-16 19:38:43 +08:00
|
|
|
info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot;
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
/*
|
|
|
|
* Use short preamble if the BSS can handle it, but not for
|
|
|
|
* management frames unless we know the receiver can handle
|
|
|
|
* that -- the management frame might be to a station that
|
|
|
|
* just wants a probe response.
|
|
|
|
*/
|
|
|
|
if (tx->sdata->vif.bss_conf.use_short_preamble &&
|
2021-06-18 00:31:13 +08:00
|
|
|
(ieee80211_is_tx_data(tx->skb) ||
|
2011-09-29 22:04:36 +08:00
|
|
|
(tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
|
2013-04-22 22:14:41 +08:00
|
|
|
txrc.short_preamble = true;
|
|
|
|
|
|
|
|
info->control.short_preamble = txrc.short_preamble;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2016-01-27 00:11:13 +08:00
|
|
|
/* don't ask rate control when rate already injected via radiotap */
|
|
|
|
if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2011-09-29 22:04:36 +08:00
|
|
|
if (tx->sta)
|
|
|
|
assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
|
2009-07-17 01:15:09 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Lets not bother rate control if we're associated and cannot
|
|
|
|
* talk to the sta. This should not happen.
|
|
|
|
*/
|
2011-09-29 22:04:36 +08:00
|
|
|
if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
|
2009-07-17 01:15:09 +08:00
|
|
|
!rate_usable_index_exists(sband, &tx->sta->sta),
|
|
|
|
"%s: Dropped data frame as no usable bitrate found while "
|
|
|
|
"scanning and associated. Target station: "
|
|
|
|
"%pM on %d GHz band\n",
|
2021-06-18 00:31:13 +08:00
|
|
|
tx->sdata->name,
|
|
|
|
encap ? ((struct ethhdr *)hdr)->h_dest : hdr->addr1,
|
2012-07-23 21:12:51 +08:00
|
|
|
info->band ? 5 : 2))
|
2009-07-17 01:15:09 +08:00
|
|
|
return TX_DROP;
|
2008-05-15 18:55:27 +08:00
|
|
|
|
2009-07-17 01:15:09 +08:00
|
|
|
/*
|
|
|
|
* If we're associated with the sta at this point we know we can at
|
|
|
|
* least send the frame at the lowest bit rate.
|
|
|
|
*/
|
2008-10-21 18:40:02 +08:00
|
|
|
rate_control_get_rate(tx->sdata, tx->sta, &txrc);
|
|
|
|
|
2013-04-22 22:14:41 +08:00
|
|
|
if (tx->sta && !info->control.skip_table)
|
|
|
|
ratetbl = rcu_dereference(tx->sta->sta.rates);
|
|
|
|
|
|
|
|
if (unlikely(info->control.rates[0].idx < 0)) {
|
|
|
|
if (ratetbl) {
|
|
|
|
struct ieee80211_tx_rate rate = {
|
|
|
|
.idx = ratetbl->rate[0].idx,
|
|
|
|
.flags = ratetbl->rate[0].flags,
|
|
|
|
.count = ratetbl->rate[0].count
|
|
|
|
};
|
|
|
|
|
|
|
|
if (ratetbl->rate[0].idx < 0)
|
|
|
|
return TX_DROP;
|
|
|
|
|
|
|
|
tx->rate = rate;
|
|
|
|
} else {
|
|
|
|
return TX_DROP;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tx->rate = info->control.rates[0];
|
|
|
|
}
|
2008-10-21 18:40:02 +08:00
|
|
|
|
2010-12-01 23:34:45 +08:00
|
|
|
if (txrc.reported_rate.idx < 0) {
|
2013-04-22 22:14:41 +08:00
|
|
|
txrc.reported_rate = tx->rate;
|
2021-06-18 00:31:13 +08:00
|
|
|
if (tx->sta && ieee80211_is_tx_data(tx->skb))
|
2015-10-16 23:54:47 +08:00
|
|
|
tx->sta->tx_stats.last_rate = txrc.reported_rate;
|
2010-12-01 23:34:45 +08:00
|
|
|
} else if (tx->sta)
|
2015-10-16 23:54:47 +08:00
|
|
|
tx->sta->tx_stats.last_rate = txrc.reported_rate;
|
2008-05-15 18:55:29 +08:00
|
|
|
|
2013-04-22 22:14:41 +08:00
|
|
|
if (ratetbl)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
if (unlikely(!info->control.rates[0].count))
|
|
|
|
info->control.rates[0].count = 1;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2009-04-24 01:36:14 +08:00
|
|
|
if (WARN_ON_ONCE((info->control.rates[0].count > 1) &&
|
|
|
|
(info->flags & IEEE80211_TX_CTL_NO_ACK)))
|
|
|
|
info->control.rates[0].count = 1;
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2015-03-28 04:30:37 +08:00
|
|
|
static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
|
|
|
|
{
|
|
|
|
u16 *seq = &sta->tid_seq[tid];
|
|
|
|
__le16 ret = cpu_to_le16(*seq);
|
|
|
|
|
|
|
|
/* Increase the sequence number. */
|
|
|
|
*seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-10 17:21:26 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
|
|
|
ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
|
|
|
int tid;
|
|
|
|
|
2008-09-13 04:52:47 +08:00
|
|
|
/*
|
|
|
|
* Packet injection may want to control the sequence
|
|
|
|
* number, if we have no matching interface then we
|
|
|
|
* neither assign one ourselves nor ask the driver to.
|
|
|
|
*/
|
2009-07-14 06:33:34 +08:00
|
|
|
if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR))
|
2008-09-13 04:52:47 +08:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2008-07-10 17:21:26 +08:00
|
|
|
if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
if (ieee80211_hdrlen(hdr->frame_control) < 24)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2011-09-29 22:04:41 +08:00
|
|
|
if (ieee80211_is_qos_nullfunc(hdr->frame_control))
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2020-07-23 18:01:50 +08:00
|
|
|
if (info->control.flags & IEEE80211_TX_CTRL_NO_SEQNO)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2008-10-10 19:21:59 +08:00
|
|
|
/*
|
|
|
|
* Anything but QoS data that has a sequence number field
|
|
|
|
* (is long enough) gets a sequence number from the global
|
2013-08-23 21:35:38 +08:00
|
|
|
* counter. QoS data frames with a multicast destination
|
|
|
|
* also use the global counter (802.11-2012 9.3.2.10).
|
2008-10-10 19:21:59 +08:00
|
|
|
*/
|
2013-08-23 21:35:38 +08:00
|
|
|
if (!ieee80211_is_data_qos(hdr->frame_control) ||
|
|
|
|
is_multicast_ether_addr(hdr->addr1)) {
|
2008-10-10 19:21:59 +08:00
|
|
|
/* driver should assign sequence number */
|
2008-07-10 17:21:26 +08:00
|
|
|
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
|
2008-10-10 19:21:59 +08:00
|
|
|
/* for pure STA mode without beacons, we can do it */
|
|
|
|
hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
|
|
|
|
tx->sdata->sequence_number += 0x10;
|
2014-11-21 21:26:31 +08:00
|
|
|
if (tx->sta)
|
2015-10-16 23:54:47 +08:00
|
|
|
tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++;
|
2008-07-10 17:21:26 +08:00
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This should be true for injected/management frames only, for
|
|
|
|
* management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ
|
|
|
|
* above since they are not QoS-data frames.
|
|
|
|
*/
|
|
|
|
if (!tx->sta)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
|
|
|
/* include per-STA, per-TID sequence counter */
|
2018-02-19 20:48:40 +08:00
|
|
|
tid = ieee80211_get_tid(hdr);
|
2015-10-16 23:54:47 +08:00
|
|
|
tx->sta->tx_stats.msdu[tid]++;
|
2008-07-10 17:21:26 +08:00
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
|
2008-07-10 17:21:26 +08:00
|
|
|
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2011-11-16 22:28:55 +08:00
|
|
|
static int ieee80211_fragment(struct ieee80211_tx_data *tx,
|
2009-03-24 00:28:35 +08:00
|
|
|
struct sk_buff *skb, int hdrlen,
|
|
|
|
int frag_threshold)
|
|
|
|
{
|
2011-11-16 22:28:55 +08:00
|
|
|
struct ieee80211_local *local = tx->local;
|
2011-11-16 22:28:56 +08:00
|
|
|
struct ieee80211_tx_info *info;
|
2011-11-16 22:28:55 +08:00
|
|
|
struct sk_buff *tmp;
|
2009-03-24 00:28:35 +08:00
|
|
|
int per_fragm = frag_threshold - hdrlen - FCS_LEN;
|
|
|
|
int pos = hdrlen + per_fragm;
|
|
|
|
int rem = skb->len - hdrlen - per_fragm;
|
|
|
|
|
|
|
|
if (WARN_ON(rem < 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2011-11-16 22:28:55 +08:00
|
|
|
/* first fragment was already added to queue by caller */
|
|
|
|
|
2009-03-24 00:28:35 +08:00
|
|
|
while (rem) {
|
|
|
|
int fraglen = per_fragm;
|
|
|
|
|
|
|
|
if (fraglen > rem)
|
|
|
|
fraglen = rem;
|
|
|
|
rem -= fraglen;
|
|
|
|
tmp = dev_alloc_skb(local->tx_headroom +
|
|
|
|
frag_threshold +
|
2013-03-24 20:23:27 +08:00
|
|
|
tx->sdata->encrypt_headroom +
|
2009-03-24 00:28:35 +08:00
|
|
|
IEEE80211_ENCRYPT_TAILROOM);
|
|
|
|
if (!tmp)
|
|
|
|
return -ENOMEM;
|
2011-11-16 22:28:55 +08:00
|
|
|
|
|
|
|
__skb_queue_tail(&tx->skbs, tmp);
|
|
|
|
|
2013-03-24 20:23:27 +08:00
|
|
|
skb_reserve(tmp,
|
|
|
|
local->tx_headroom + tx->sdata->encrypt_headroom);
|
|
|
|
|
2009-03-24 00:28:35 +08:00
|
|
|
/* copy control information */
|
|
|
|
memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
|
2011-11-16 22:28:56 +08:00
|
|
|
|
|
|
|
info = IEEE80211_SKB_CB(tmp);
|
|
|
|
info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
|
|
|
|
IEEE80211_TX_CTL_FIRST_FRAGMENT);
|
|
|
|
|
|
|
|
if (rem)
|
|
|
|
info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
|
|
|
|
|
2009-03-24 00:28:35 +08:00
|
|
|
skb_copy_queue_mapping(tmp, skb);
|
|
|
|
tmp->priority = skb->priority;
|
|
|
|
tmp->dev = skb->dev;
|
|
|
|
|
|
|
|
/* copy header and data */
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:20 +08:00
|
|
|
skb_put_data(tmp, skb->data, hdrlen);
|
|
|
|
skb_put_data(tmp, skb->data + pos, fraglen);
|
2009-03-24 00:28:35 +08:00
|
|
|
|
|
|
|
pos += fraglen;
|
|
|
|
}
|
|
|
|
|
2011-11-16 22:28:55 +08:00
|
|
|
/* adjust first fragment's length */
|
2014-02-01 07:16:23 +08:00
|
|
|
skb_trim(skb, hdrlen + per_fragm);
|
2009-03-24 00:28:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-30 21:10:44 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-05-15 18:55:28 +08:00
|
|
|
ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
2009-03-24 00:28:35 +08:00
|
|
|
struct sk_buff *skb = tx->skb;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_hdr *hdr = (void *)skb->data;
|
2009-04-21 00:39:05 +08:00
|
|
|
int frag_threshold = tx->local->hw.wiphy->frag_threshold;
|
2009-03-24 00:28:35 +08:00
|
|
|
int hdrlen;
|
|
|
|
int fragnum;
|
2008-05-15 18:55:28 +08:00
|
|
|
|
2011-11-16 22:28:55 +08:00
|
|
|
/* no matter what happens, tx->skb moves to tx->skbs */
|
|
|
|
__skb_queue_tail(&tx->skbs, skb);
|
|
|
|
tx->skb = NULL;
|
|
|
|
|
2011-10-07 20:01:25 +08:00
|
|
|
if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2016-10-19 04:12:11 +08:00
|
|
|
if (ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG))
|
2008-05-15 18:55:28 +08:00
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2008-05-17 06:57:13 +08:00
|
|
|
/*
|
|
|
|
* Warn when submitting a fragmented A-MPDU frame and drop it.
|
2009-06-17 23:43:56 +08:00
|
|
|
* This scenario is handled in ieee80211_tx_prepare but extra
|
2008-06-12 20:42:29 +08:00
|
|
|
* caution taken here as fragmented ampdu may cause Tx stop.
|
2008-05-17 06:57:13 +08:00
|
|
|
*/
|
2008-10-24 12:25:27 +08:00
|
|
|
if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
|
2008-05-17 06:57:13 +08:00
|
|
|
return TX_DROP;
|
|
|
|
|
2008-06-23 07:45:27 +08:00
|
|
|
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
2008-05-15 18:55:28 +08:00
|
|
|
|
2011-10-07 20:01:25 +08:00
|
|
|
/* internal error, why isn't DONTFRAG set? */
|
2009-04-30 05:35:56 +08:00
|
|
|
if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
|
2009-03-24 00:28:35 +08:00
|
|
|
return TX_DROP;
|
2008-10-21 18:40:02 +08:00
|
|
|
|
2009-03-24 00:28:35 +08:00
|
|
|
/*
|
|
|
|
* Now fragment the frame. This will allocate all the fragments and
|
|
|
|
* chain them (using skb as the first fragment) to skb->next.
|
|
|
|
* During transmission, we will remove the successfully transmitted
|
|
|
|
* fragments from this list. When the low-level driver rejects one
|
|
|
|
* of the fragments then we will simply pretend to accept the skb
|
|
|
|
* but store it away as pending.
|
|
|
|
*/
|
2011-11-16 22:28:55 +08:00
|
|
|
if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
|
2009-03-24 00:28:35 +08:00
|
|
|
return TX_DROP;
|
2008-10-21 18:40:02 +08:00
|
|
|
|
2009-03-24 00:28:35 +08:00
|
|
|
/* update duration/seq/flags of fragments */
|
|
|
|
fragnum = 0;
|
2011-11-16 22:28:55 +08:00
|
|
|
|
|
|
|
skb_queue_walk(&tx->skbs, skb) {
|
2009-03-24 00:28:35 +08:00
|
|
|
const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
|
2008-10-21 18:40:02 +08:00
|
|
|
|
2009-03-24 00:28:35 +08:00
|
|
|
hdr = (void *)skb->data;
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
2008-10-21 18:40:02 +08:00
|
|
|
|
2011-11-16 22:28:55 +08:00
|
|
|
if (!skb_queue_is_last(&tx->skbs, skb)) {
|
2009-03-24 00:28:35 +08:00
|
|
|
hdr->frame_control |= morefrags;
|
2008-10-21 18:40:02 +08:00
|
|
|
/*
|
|
|
|
* No multi-rate retries for fragmented frames, that
|
|
|
|
* would completely throw off the NAV at other STAs.
|
|
|
|
*/
|
|
|
|
info->control.rates[1].idx = -1;
|
|
|
|
info->control.rates[2].idx = -1;
|
|
|
|
info->control.rates[3].idx = -1;
|
2012-07-03 01:46:16 +08:00
|
|
|
BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4);
|
2008-10-21 18:40:02 +08:00
|
|
|
info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
|
2009-03-24 00:28:35 +08:00
|
|
|
} else {
|
|
|
|
hdr->frame_control &= ~morefrags;
|
2008-10-21 18:40:02 +08:00
|
|
|
}
|
2009-03-24 00:28:35 +08:00
|
|
|
hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
|
|
|
|
fragnum++;
|
2011-11-16 22:28:55 +08:00
|
|
|
}
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2009-08-10 22:01:54 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
|
|
|
ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
2011-11-16 22:28:55 +08:00
|
|
|
struct sk_buff *skb;
|
2013-02-05 17:55:21 +08:00
|
|
|
int ac = -1;
|
2009-08-10 22:01:54 +08:00
|
|
|
|
|
|
|
if (!tx->sta)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2011-11-16 22:28:55 +08:00
|
|
|
skb_queue_walk(&tx->skbs, skb) {
|
2013-02-05 17:55:21 +08:00
|
|
|
ac = skb_get_queue_mapping(skb);
|
2015-10-16 23:54:47 +08:00
|
|
|
tx->sta->tx_stats.bytes[ac] += skb->len;
|
2011-11-16 22:28:55 +08:00
|
|
|
}
|
2013-02-05 17:55:21 +08:00
|
|
|
if (ac >= 0)
|
2015-10-16 23:54:47 +08:00
|
|
|
tx->sta->tx_stats.packets[ac]++;
|
2009-08-10 22:01:54 +08:00
|
|
|
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2008-06-30 21:10:44 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-05-15 18:55:28 +08:00
|
|
|
ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
if (!tx->key)
|
|
|
|
return TX_CONTINUE;
|
|
|
|
|
2010-08-10 15:46:38 +08:00
|
|
|
switch (tx->key->conf.cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_WEP40:
|
|
|
|
case WLAN_CIPHER_SUITE_WEP104:
|
2008-05-15 18:55:28 +08:00
|
|
|
return ieee80211_crypto_wep_encrypt(tx);
|
2010-08-10 15:46:38 +08:00
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
2008-05-15 18:55:28 +08:00
|
|
|
return ieee80211_crypto_tkip_encrypt(tx);
|
2010-08-10 15:46:38 +08:00
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
2015-01-25 01:52:07 +08:00
|
|
|
return ieee80211_crypto_ccmp_encrypt(
|
|
|
|
tx, IEEE80211_CCMP_MIC_LEN);
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP_256:
|
|
|
|
return ieee80211_crypto_ccmp_encrypt(
|
|
|
|
tx, IEEE80211_CCMP_256_MIC_LEN);
|
2010-08-10 15:46:38 +08:00
|
|
|
case WLAN_CIPHER_SUITE_AES_CMAC:
|
2009-01-08 19:32:02 +08:00
|
|
|
return ieee80211_crypto_aes_cmac_encrypt(tx);
|
2015-01-25 01:52:08 +08:00
|
|
|
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
|
|
|
|
return ieee80211_crypto_aes_cmac_256_encrypt(tx);
|
2015-01-25 01:52:09 +08:00
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
|
|
|
return ieee80211_crypto_aes_gmac_encrypt(tx);
|
2015-01-25 01:52:06 +08:00
|
|
|
case WLAN_CIPHER_SUITE_GCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP_256:
|
|
|
|
return ieee80211_crypto_gcmp_encrypt(tx);
|
2010-08-27 19:26:52 +08:00
|
|
|
default:
|
2012-01-16 21:18:59 +08:00
|
|
|
return ieee80211_crypto_hw_encrypt(tx);
|
2008-05-15 18:55:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return TX_DROP;
|
|
|
|
}
|
|
|
|
|
2008-06-30 21:10:44 +08:00
|
|
|
static ieee80211_tx_result debug_noinline
|
2008-06-25 19:36:27 +08:00
|
|
|
ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
2011-11-16 22:28:55 +08:00
|
|
|
struct sk_buff *skb;
|
2009-03-24 00:28:35 +08:00
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
int next_len;
|
|
|
|
bool group_addr;
|
2008-06-25 19:36:27 +08:00
|
|
|
|
2011-11-16 22:28:55 +08:00
|
|
|
skb_queue_walk(&tx->skbs, skb) {
|
2009-03-24 00:28:35 +08:00
|
|
|
hdr = (void *) skb->data;
|
2009-05-20 00:25:58 +08:00
|
|
|
if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
|
|
|
|
break; /* must not overwrite AID */
|
2011-11-16 22:28:55 +08:00
|
|
|
if (!skb_queue_is_last(&tx->skbs, skb)) {
|
|
|
|
struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
|
|
|
|
next_len = next->len;
|
|
|
|
} else
|
|
|
|
next_len = 0;
|
2009-03-24 00:28:35 +08:00
|
|
|
group_addr = is_multicast_ether_addr(hdr->addr1);
|
2008-06-25 19:36:27 +08:00
|
|
|
|
2009-03-24 00:28:35 +08:00
|
|
|
hdr->duration_id =
|
2011-11-16 22:28:55 +08:00
|
|
|
ieee80211_duration(tx, skb, group_addr, next_len);
|
|
|
|
}
|
2008-06-25 19:36:27 +08:00
|
|
|
|
|
|
|
return TX_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2007-07-27 21:43:22 +08:00
|
|
|
/* actual transmit path */
|
|
|
|
|
2010-06-10 16:21:39 +08:00
|
|
|
static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct ieee80211_tx_info *info,
|
|
|
|
struct tid_ampdu_tx *tid_tx,
|
|
|
|
int tid)
|
|
|
|
{
|
|
|
|
bool queued = false;
|
2011-11-23 10:50:28 +08:00
|
|
|
bool reset_agg_timer = false;
|
2012-03-08 00:20:30 +08:00
|
|
|
struct sk_buff *purge_skb = NULL;
|
2010-06-10 16:21:39 +08:00
|
|
|
|
|
|
|
if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
|
|
|
|
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
2011-11-23 10:50:28 +08:00
|
|
|
reset_agg_timer = true;
|
2010-06-10 16:21:42 +08:00
|
|
|
} else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
|
|
|
|
/*
|
|
|
|
* nothing -- this aggregation session is being started
|
|
|
|
* but that might still fail with the driver
|
|
|
|
*/
|
2015-03-28 04:30:37 +08:00
|
|
|
} else if (!tx->sta->sta.txq[tid]) {
|
2010-06-10 16:21:39 +08:00
|
|
|
spin_lock(&tx->sta->lock);
|
|
|
|
/*
|
|
|
|
* Need to re-check now, because we may get here
|
|
|
|
*
|
|
|
|
* 1) in the window during which the setup is actually
|
|
|
|
* already done, but not marked yet because not all
|
|
|
|
* packets are spliced over to the driver pending
|
|
|
|
* queue yet -- if this happened we acquire the lock
|
|
|
|
* either before or after the splice happens, but
|
|
|
|
* need to recheck which of these cases happened.
|
|
|
|
*
|
|
|
|
* 2) during session teardown, if the OPERATIONAL bit
|
|
|
|
* was cleared due to the teardown but the pointer
|
|
|
|
* hasn't been assigned NULL yet (or we loaded it
|
|
|
|
* before it was assigned) -- in this case it may
|
|
|
|
* now be NULL which means we should just let the
|
|
|
|
* packet pass through because splicing the frames
|
|
|
|
* back is already done.
|
|
|
|
*/
|
2011-05-13 20:15:49 +08:00
|
|
|
tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid);
|
2010-06-10 16:21:39 +08:00
|
|
|
|
|
|
|
if (!tid_tx) {
|
|
|
|
/* do nothing, let packet pass through */
|
|
|
|
} else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
|
|
|
|
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
2011-11-23 10:50:28 +08:00
|
|
|
reset_agg_timer = true;
|
2010-06-10 16:21:39 +08:00
|
|
|
} else {
|
|
|
|
queued = true;
|
2016-03-17 22:51:42 +08:00
|
|
|
if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
|
|
|
|
clear_sta_flag(tx->sta, WLAN_STA_SP);
|
|
|
|
ps_dbg(tx->sta->sdata,
|
|
|
|
"STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
|
|
|
|
tx->sta->sta.addr, tx->sta->sta.aid);
|
|
|
|
}
|
2010-06-10 16:21:39 +08:00
|
|
|
info->control.vif = &tx->sdata->vif;
|
2020-09-08 20:36:57 +08:00
|
|
|
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
|
2016-03-17 22:51:41 +08:00
|
|
|
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
|
2010-06-10 16:21:39 +08:00
|
|
|
__skb_queue_tail(&tid_tx->pending, skb);
|
2012-03-08 00:20:30 +08:00
|
|
|
if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
|
|
|
|
purge_skb = __skb_dequeue(&tid_tx->pending);
|
2010-06-10 16:21:39 +08:00
|
|
|
}
|
|
|
|
spin_unlock(&tx->sta->lock);
|
2012-03-08 00:20:30 +08:00
|
|
|
|
|
|
|
if (purge_skb)
|
2012-10-08 20:39:33 +08:00
|
|
|
ieee80211_free_txskb(&tx->local->hw, purge_skb);
|
2010-06-10 16:21:39 +08:00
|
|
|
}
|
|
|
|
|
2011-11-23 10:50:28 +08:00
|
|
|
/* reset session timer */
|
2018-04-20 18:49:19 +08:00
|
|
|
if (reset_agg_timer)
|
2012-03-19 05:58:06 +08:00
|
|
|
tid_tx->last_tx = jiffies;
|
2011-11-23 10:50:28 +08:00
|
|
|
|
2010-06-10 16:21:39 +08:00
|
|
|
return queued;
|
|
|
|
}
|
|
|
|
|
2021-06-29 19:28:53 +08:00
|
|
|
static void
|
|
|
|
ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct rate_control_ref *ref = sdata->local->rate_ctrl;
|
|
|
|
u16 tid;
|
|
|
|
|
|
|
|
if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!sta || !sta->sta.ht_cap.ht_supported ||
|
|
|
|
!sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
|
|
|
|
skb->protocol == sdata->control_port_protocol)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
|
|
|
|
if (likely(sta->ampdu_mlme.tid_tx[tid]))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
|
|
|
|
}
|
|
|
|
|
2007-09-26 23:53:18 +08:00
|
|
|
/*
|
|
|
|
* initialises @tx
|
2015-03-20 21:18:27 +08:00
|
|
|
* pass %NULL for the station if unknown, a valid pointer if known
|
|
|
|
* or an ERR_PTR() if the station is known not to exist
|
2007-09-26 23:53:18 +08:00
|
|
|
*/
|
2008-02-01 02:48:20 +08:00
|
|
|
static ieee80211_tx_result
|
2009-06-17 23:43:56 +08:00
|
|
|
ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_tx_data *tx,
|
2015-03-20 21:18:27 +08:00
|
|
|
struct sta_info *sta, struct sk_buff *skb)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2009-06-17 23:43:56 +08:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2007-09-26 23:53:18 +08:00
|
|
|
struct ieee80211_hdr *hdr;
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2021-06-29 19:28:53 +08:00
|
|
|
bool aggr_check = false;
|
2011-10-07 20:01:24 +08:00
|
|
|
int tid;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
|
|
|
memset(tx, 0, sizeof(*tx));
|
|
|
|
tx->skb = skb;
|
|
|
|
tx->local = local;
|
2009-06-17 23:43:56 +08:00
|
|
|
tx->sdata = sdata;
|
2011-11-16 22:28:55 +08:00
|
|
|
__skb_queue_head_init(&tx->skbs);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2009-03-24 00:28:41 +08:00
|
|
|
/*
|
|
|
|
* If this flag is set to true anywhere, and we get here,
|
|
|
|
* we are doing the needed processing, so remove the flag
|
|
|
|
* now.
|
|
|
|
*/
|
2020-09-08 20:36:57 +08:00
|
|
|
info->control.flags &= ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
|
2009-03-24 00:28:41 +08:00
|
|
|
|
2007-09-26 23:53:18 +08:00
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
|
|
|
|
2015-03-20 21:18:27 +08:00
|
|
|
if (likely(sta)) {
|
|
|
|
if (!IS_ERR(sta))
|
|
|
|
tx->sta = sta;
|
|
|
|
} else {
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
|
|
|
|
tx->sta = rcu_dereference(sdata->u.vlan.sta);
|
|
|
|
if (!tx->sta && sdata->wdev.use_4addr)
|
|
|
|
return TX_DROP;
|
2021-02-06 19:51:12 +08:00
|
|
|
} else if (tx->sdata->control_port_protocol == tx->skb->protocol) {
|
2015-03-20 21:18:27 +08:00
|
|
|
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
|
|
|
|
}
|
2021-06-29 19:28:53 +08:00
|
|
|
if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) {
|
2015-03-20 21:18:27 +08:00
|
|
|
tx->sta = sta_info_get(sdata, hdr->addr1);
|
2021-06-29 19:28:53 +08:00
|
|
|
aggr_check = true;
|
|
|
|
}
|
2010-01-09 01:15:13 +08:00
|
|
|
}
|
2007-09-26 23:53:18 +08:00
|
|
|
|
2009-03-24 00:28:41 +08:00
|
|
|
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
|
2011-09-29 22:04:41 +08:00
|
|
|
!ieee80211_is_qos_nullfunc(hdr->frame_control) &&
|
2015-06-03 03:39:54 +08:00
|
|
|
ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
|
|
|
|
!ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) {
|
2009-03-24 00:28:41 +08:00
|
|
|
struct tid_ampdu_tx *tid_tx;
|
|
|
|
|
2018-02-19 20:48:40 +08:00
|
|
|
tid = ieee80211_get_tid(hdr);
|
2010-06-10 16:21:39 +08:00
|
|
|
tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
|
2021-06-29 19:28:53 +08:00
|
|
|
if (!tid_tx && aggr_check) {
|
|
|
|
ieee80211_aggr_check(sdata, tx->sta, skb);
|
|
|
|
tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
|
|
|
|
}
|
|
|
|
|
2010-06-10 16:21:39 +08:00
|
|
|
if (tid_tx) {
|
|
|
|
bool queued;
|
2009-03-24 00:28:41 +08:00
|
|
|
|
2010-06-10 16:21:39 +08:00
|
|
|
queued = ieee80211_tx_prep_agg(tx, skb, info,
|
|
|
|
tid_tx, tid);
|
|
|
|
|
|
|
|
if (unlikely(queued))
|
|
|
|
return TX_QUEUED;
|
|
|
|
}
|
2008-10-24 12:25:27 +08:00
|
|
|
}
|
|
|
|
|
2007-08-29 05:01:54 +08:00
|
|
|
if (is_multicast_ether_addr(hdr->addr1)) {
|
2008-02-25 23:27:43 +08:00
|
|
|
tx->flags &= ~IEEE80211_TX_UNICAST;
|
2008-05-15 18:55:29 +08:00
|
|
|
info->flags |= IEEE80211_TX_CTL_NO_ACK;
|
2011-11-18 21:20:42 +08:00
|
|
|
} else
|
2008-02-25 23:27:43 +08:00
|
|
|
tx->flags |= IEEE80211_TX_UNICAST;
|
2007-09-26 23:53:18 +08:00
|
|
|
|
2011-10-07 20:01:25 +08:00
|
|
|
if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
|
|
|
|
if (!(tx->flags & IEEE80211_TX_UNICAST) ||
|
|
|
|
skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
|
|
|
|
info->flags & IEEE80211_TX_CTL_AMPDU)
|
|
|
|
info->flags |= IEEE80211_TX_CTL_DONTFRAG;
|
2007-09-26 23:53:18 +08:00
|
|
|
}
|
|
|
|
|
2007-07-27 21:43:22 +08:00
|
|
|
if (!tx->sta)
|
2008-05-15 18:55:29 +08:00
|
|
|
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
|
2015-09-24 20:59:49 +08:00
|
|
|
else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
|
2008-05-15 18:55:29 +08:00
|
|
|
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
|
2015-09-24 20:59:49 +08:00
|
|
|
ieee80211_check_fast_xmit(tx->sta);
|
|
|
|
}
|
2007-09-26 23:53:18 +08:00
|
|
|
|
2008-05-15 18:55:29 +08:00
|
|
|
info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2008-02-01 02:48:20 +08:00
|
|
|
return TX_CONTINUE;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 16:37:48 +08:00
|
|
|
static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_vif *vif,
|
2017-01-13 20:32:51 +08:00
|
|
|
struct sta_info *sta,
|
2016-05-19 16:37:48 +08:00
|
|
|
struct sk_buff *skb)
|
2015-03-28 04:30:37 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_txq *txq = NULL;
|
|
|
|
|
2016-02-28 22:19:53 +08:00
|
|
|
if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
|
|
|
|
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
|
2016-05-19 16:37:48 +08:00
|
|
|
return NULL;
|
2015-03-28 04:30:37 +08:00
|
|
|
|
2020-09-08 20:36:57 +08:00
|
|
|
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
|
2019-11-25 18:04:37 +08:00
|
|
|
unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
|
2018-08-31 16:31:08 +08:00
|
|
|
if ((!ieee80211_is_mgmt(hdr->frame_control) ||
|
2018-09-05 13:06:09 +08:00
|
|
|
ieee80211_is_bufferable_mmpdu(hdr->frame_control) ||
|
|
|
|
vif->type == NL80211_IFTYPE_STATION) &&
|
2018-08-31 16:31:08 +08:00
|
|
|
sta && sta->uploaded) {
|
|
|
|
/*
|
|
|
|
* This will be NULL if the driver didn't set the
|
|
|
|
* opt-in hardware flag.
|
|
|
|
*/
|
|
|
|
txq = sta->sta.txq[IEEE80211_NUM_TIDS];
|
|
|
|
}
|
|
|
|
} else if (sta) {
|
2015-03-28 04:30:37 +08:00
|
|
|
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
|
|
|
|
|
2017-01-13 20:32:51 +08:00
|
|
|
if (!sta->uploaded)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
txq = sta->sta.txq[tid];
|
2015-03-28 04:30:37 +08:00
|
|
|
} else if (vif) {
|
|
|
|
txq = vif->txq;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!txq)
|
2016-05-19 16:37:48 +08:00
|
|
|
return NULL;
|
2015-03-28 04:30:37 +08:00
|
|
|
|
2016-05-19 16:37:48 +08:00
|
|
|
return to_txq_info(txq);
|
|
|
|
}
|
2015-03-28 04:30:37 +08:00
|
|
|
|
2016-05-19 16:37:51 +08:00
|
|
|
static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 codel_skb_len_func(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return skb->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static codel_time_t codel_skb_time_func(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
const struct ieee80211_tx_info *info;
|
|
|
|
|
|
|
|
info = (const struct ieee80211_tx_info *)skb->cb;
|
|
|
|
return info->control.enqueue_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sk_buff *codel_dequeue_func(struct codel_vars *cvars,
|
|
|
|
void *ctx)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct txq_info *txqi;
|
|
|
|
struct fq *fq;
|
|
|
|
struct fq_flow *flow;
|
|
|
|
|
|
|
|
txqi = ctx;
|
|
|
|
local = vif_to_sdata(txqi->txq.vif)->local;
|
|
|
|
fq = &local->fq;
|
|
|
|
|
|
|
|
if (cvars == &txqi->def_cvars)
|
2020-12-19 02:47:14 +08:00
|
|
|
flow = &txqi->tin.default_flow;
|
2016-05-19 16:37:51 +08:00
|
|
|
else
|
|
|
|
flow = &fq->flows[cvars - local->cvars];
|
|
|
|
|
|
|
|
return fq_flow_dequeue(fq, flow);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void codel_drop_func(struct sk_buff *skb,
|
|
|
|
void *ctx)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct ieee80211_hw *hw;
|
|
|
|
struct txq_info *txqi;
|
|
|
|
|
|
|
|
txqi = ctx;
|
|
|
|
local = vif_to_sdata(txqi->txq.vif)->local;
|
|
|
|
hw = &local->hw;
|
|
|
|
|
|
|
|
ieee80211_free_txskb(hw, skb);
|
|
|
|
}
|
|
|
|
|
2016-05-19 16:37:49 +08:00
|
|
|
static struct sk_buff *fq_tin_dequeue_func(struct fq *fq,
|
|
|
|
struct fq_tin *tin,
|
|
|
|
struct fq_flow *flow)
|
|
|
|
{
|
2016-05-19 16:37:51 +08:00
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct txq_info *txqi;
|
|
|
|
struct codel_vars *cvars;
|
|
|
|
struct codel_params *cparams;
|
|
|
|
struct codel_stats *cstats;
|
|
|
|
|
|
|
|
local = container_of(fq, struct ieee80211_local, fq);
|
|
|
|
txqi = container_of(tin, struct txq_info, tin);
|
2016-09-12 21:55:43 +08:00
|
|
|
cstats = &txqi->cstats;
|
2016-05-19 16:37:51 +08:00
|
|
|
|
2017-04-06 17:38:26 +08:00
|
|
|
if (txqi->txq.sta) {
|
|
|
|
struct sta_info *sta = container_of(txqi->txq.sta,
|
|
|
|
struct sta_info, sta);
|
|
|
|
cparams = &sta->cparams;
|
|
|
|
} else {
|
|
|
|
cparams = &local->cparams;
|
|
|
|
}
|
|
|
|
|
2020-12-19 02:47:14 +08:00
|
|
|
if (flow == &tin->default_flow)
|
2016-05-19 16:37:51 +08:00
|
|
|
cvars = &txqi->def_cvars;
|
|
|
|
else
|
|
|
|
cvars = &local->cvars[flow - fq->flows];
|
|
|
|
|
|
|
|
return codel_dequeue(txqi,
|
|
|
|
&flow->backlog,
|
|
|
|
cparams,
|
|
|
|
cvars,
|
|
|
|
cstats,
|
|
|
|
codel_skb_len_func,
|
|
|
|
codel_skb_time_func,
|
|
|
|
codel_drop_func,
|
|
|
|
codel_dequeue_func);
|
2016-05-19 16:37:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void fq_skb_free_func(struct fq *fq,
|
|
|
|
struct fq_tin *tin,
|
|
|
|
struct fq_flow *flow,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
|
|
|
|
local = container_of(fq, struct ieee80211_local, fq);
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
}
|
|
|
|
|
2016-05-19 16:37:48 +08:00
|
|
|
static void ieee80211_txq_enqueue(struct ieee80211_local *local,
|
|
|
|
struct txq_info *txqi,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
2016-05-19 16:37:49 +08:00
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
struct fq_tin *tin = &txqi->tin;
|
2019-03-17 01:06:32 +08:00
|
|
|
u32 flow_idx = fq_flow_idx(fq, skb);
|
2016-01-27 22:26:12 +08:00
|
|
|
|
2016-05-19 16:37:51 +08:00
|
|
|
ieee80211_set_skb_enqueue_time(skb);
|
2019-03-17 01:06:32 +08:00
|
|
|
|
|
|
|
spin_lock_bh(&fq->lock);
|
2021-03-20 06:28:01 +08:00
|
|
|
/*
|
|
|
|
* For management frames, don't really apply codel etc.,
|
|
|
|
* we don't want to apply any shaping or anything we just
|
|
|
|
* want to simplify the driver API by having them on the
|
|
|
|
* txqi.
|
|
|
|
*/
|
2021-04-16 19:47:04 +08:00
|
|
|
if (unlikely(txqi->txq.tid == IEEE80211_NUM_TIDS)) {
|
|
|
|
IEEE80211_SKB_CB(skb)->control.flags |=
|
|
|
|
IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
|
2021-03-20 06:28:01 +08:00
|
|
|
__skb_queue_tail(&txqi->frags, skb);
|
2021-04-16 19:47:04 +08:00
|
|
|
} else {
|
2021-03-20 06:28:01 +08:00
|
|
|
fq_tin_enqueue(fq, tin, flow_idx, skb,
|
|
|
|
fq_skb_free_func);
|
2021-04-16 19:47:04 +08:00
|
|
|
}
|
2019-03-17 01:06:32 +08:00
|
|
|
spin_unlock_bh(&fq->lock);
|
2016-05-19 16:37:49 +08:00
|
|
|
}
|
2015-03-28 04:30:37 +08:00
|
|
|
|
2017-10-06 17:53:33 +08:00
|
|
|
static bool fq_vlan_filter_func(struct fq *fq, struct fq_tin *tin,
|
|
|
|
struct fq_flow *flow, struct sk_buff *skb,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
|
|
|
|
return info->control.vif == data;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_txq_remove_vlan(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata)
|
|
|
|
{
|
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
struct txq_info *txqi;
|
|
|
|
struct fq_tin *tin;
|
|
|
|
struct ieee80211_sub_if_data *ap;
|
|
|
|
|
|
|
|
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
|
|
|
|
|
|
|
|
if (!ap->vif.txq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
txqi = to_txq_info(ap->vif.txq);
|
|
|
|
tin = &txqi->tin;
|
|
|
|
|
|
|
|
spin_lock_bh(&fq->lock);
|
|
|
|
fq_tin_filter(fq, tin, fq_vlan_filter_func, &sdata->vif,
|
|
|
|
fq_skb_free_func);
|
|
|
|
spin_unlock_bh(&fq->lock);
|
|
|
|
}
|
|
|
|
|
2016-05-19 16:37:49 +08:00
|
|
|
void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct txq_info *txqi, int tid)
|
|
|
|
{
|
|
|
|
fq_tin_init(&txqi->tin);
|
2016-05-19 16:37:51 +08:00
|
|
|
codel_vars_init(&txqi->def_cvars);
|
2016-09-12 21:55:43 +08:00
|
|
|
codel_stats_init(&txqi->cstats);
|
2016-09-23 01:04:20 +08:00
|
|
|
__skb_queue_head_init(&txqi->frags);
|
2021-06-23 21:47:55 +08:00
|
|
|
RB_CLEAR_NODE(&txqi->schedule_order);
|
2016-05-19 16:37:49 +08:00
|
|
|
|
|
|
|
txqi->txq.vif = &sdata->vif;
|
|
|
|
|
2018-08-31 16:31:08 +08:00
|
|
|
if (!sta) {
|
2016-05-19 16:37:49 +08:00
|
|
|
sdata->vif.txq = &txqi->txq;
|
|
|
|
txqi->txq.tid = 0;
|
|
|
|
txqi->txq.ac = IEEE80211_AC_BE;
|
2018-08-31 16:31:08 +08:00
|
|
|
|
|
|
|
return;
|
2016-05-19 16:37:48 +08:00
|
|
|
}
|
2018-08-31 16:31:08 +08:00
|
|
|
|
|
|
|
if (tid == IEEE80211_NUM_TIDS) {
|
2018-09-05 13:06:09 +08:00
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
|
|
|
|
/* Drivers need to opt in to the management MPDU TXQ */
|
|
|
|
if (!ieee80211_hw_check(&sdata->local->hw,
|
|
|
|
STA_MMPDU_TXQ))
|
|
|
|
return;
|
|
|
|
} else if (!ieee80211_hw_check(&sdata->local->hw,
|
|
|
|
BUFF_MMPDU_TXQ)) {
|
|
|
|
/* Drivers need to opt in to the bufferable MMPDU TXQ */
|
2018-08-31 16:31:08 +08:00
|
|
|
return;
|
2018-09-05 13:06:09 +08:00
|
|
|
}
|
2018-08-31 16:31:08 +08:00
|
|
|
txqi->txq.ac = IEEE80211_AC_VO;
|
|
|
|
} else {
|
|
|
|
txqi->txq.ac = ieee80211_ac_from_tid(tid);
|
|
|
|
}
|
|
|
|
|
|
|
|
txqi->txq.sta = &sta->sta;
|
|
|
|
txqi->txq.tid = tid;
|
|
|
|
sta->sta.txq[tid] = &txqi->txq;
|
2016-05-19 16:37:49 +08:00
|
|
|
}
|
2015-03-28 04:30:37 +08:00
|
|
|
|
2016-05-19 16:37:49 +08:00
|
|
|
void ieee80211_txq_purge(struct ieee80211_local *local,
|
|
|
|
struct txq_info *txqi)
|
|
|
|
{
|
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
struct fq_tin *tin = &txqi->tin;
|
|
|
|
|
2018-12-19 09:02:08 +08:00
|
|
|
spin_lock_bh(&fq->lock);
|
2016-05-19 16:37:49 +08:00
|
|
|
fq_tin_reset(fq, tin, fq_skb_free_func);
|
2016-09-23 01:04:20 +08:00
|
|
|
ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
|
2018-12-19 09:02:08 +08:00
|
|
|
spin_unlock_bh(&fq->lock);
|
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
ieee80211_unschedule_txq(&local->hw, &txqi->txq, true);
|
2016-05-19 16:37:49 +08:00
|
|
|
}
|
|
|
|
|
2018-05-08 19:03:50 +08:00
|
|
|
void ieee80211_txq_set_params(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
if (local->hw.wiphy->txq_limit)
|
|
|
|
local->fq.limit = local->hw.wiphy->txq_limit;
|
|
|
|
else
|
|
|
|
local->hw.wiphy->txq_limit = local->fq.limit;
|
|
|
|
|
|
|
|
if (local->hw.wiphy->txq_memory_limit)
|
|
|
|
local->fq.memory_limit = local->hw.wiphy->txq_memory_limit;
|
|
|
|
else
|
|
|
|
local->hw.wiphy->txq_memory_limit = local->fq.memory_limit;
|
|
|
|
|
|
|
|
if (local->hw.wiphy->txq_quantum)
|
|
|
|
local->fq.quantum = local->hw.wiphy->txq_quantum;
|
|
|
|
else
|
|
|
|
local->hw.wiphy->txq_quantum = local->fq.quantum;
|
|
|
|
}
|
|
|
|
|
2016-05-19 16:37:49 +08:00
|
|
|
int ieee80211_txq_setup_flows(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
int ret;
|
2016-05-19 16:37:51 +08:00
|
|
|
int i;
|
2016-09-24 03:59:11 +08:00
|
|
|
bool supp_vht = false;
|
|
|
|
enum nl80211_band band;
|
2016-05-19 16:37:49 +08:00
|
|
|
|
|
|
|
if (!local->ops->wake_tx_queue)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = fq_init(fq, 4096);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-09-24 03:59:11 +08:00
|
|
|
/*
|
|
|
|
* If the hardware doesn't support VHT, it is safe to limit the maximum
|
|
|
|
* queue size. 4 Mbytes is 64 max-size aggregates in 802.11n.
|
|
|
|
*/
|
|
|
|
for (band = 0; band < NUM_NL80211_BANDS; band++) {
|
|
|
|
struct ieee80211_supported_band *sband;
|
|
|
|
|
|
|
|
sband = local->hw.wiphy->bands[band];
|
|
|
|
if (!sband)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
supp_vht = supp_vht || sband->vht_cap.vht_supported;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!supp_vht)
|
|
|
|
fq->memory_limit = 4 << 20; /* 4 Mbytes */
|
|
|
|
|
2016-05-19 16:37:51 +08:00
|
|
|
codel_params_init(&local->cparams);
|
|
|
|
local->cparams.interval = MS2TIME(100);
|
|
|
|
local->cparams.target = MS2TIME(20);
|
|
|
|
local->cparams.ecn = true;
|
|
|
|
|
|
|
|
local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!local->cvars) {
|
2016-06-29 20:00:34 +08:00
|
|
|
spin_lock_bh(&fq->lock);
|
2016-05-19 16:37:51 +08:00
|
|
|
fq_reset(fq, fq_skb_free_func);
|
2016-06-29 20:00:34 +08:00
|
|
|
spin_unlock_bh(&fq->lock);
|
2016-05-19 16:37:51 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < fq->flows_cnt; i++)
|
|
|
|
codel_vars_init(&local->cvars[i]);
|
|
|
|
|
2018-05-08 19:03:50 +08:00
|
|
|
ieee80211_txq_set_params(local);
|
|
|
|
|
2016-05-19 16:37:49 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_txq_teardown_flows(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
|
|
|
|
if (!local->ops->wake_tx_queue)
|
|
|
|
return;
|
|
|
|
|
2016-05-19 16:37:51 +08:00
|
|
|
kfree(local->cvars);
|
|
|
|
local->cvars = NULL;
|
|
|
|
|
2016-06-29 20:00:34 +08:00
|
|
|
spin_lock_bh(&fq->lock);
|
2016-05-19 16:37:49 +08:00
|
|
|
fq_reset(fq, fq_skb_free_func);
|
2016-06-29 20:00:34 +08:00
|
|
|
spin_unlock_bh(&fq->lock);
|
2015-03-28 04:30:37 +08:00
|
|
|
}
|
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
static bool ieee80211_queue_skb(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_vif *vif;
|
|
|
|
struct txq_info *txqi;
|
|
|
|
|
|
|
|
if (!local->ops->wake_tx_queue ||
|
|
|
|
sdata->vif.type == NL80211_IFTYPE_MONITOR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
|
|
sdata = container_of(sdata->bss,
|
|
|
|
struct ieee80211_sub_if_data, u.ap);
|
|
|
|
|
|
|
|
vif = &sdata->vif;
|
2017-01-13 20:32:51 +08:00
|
|
|
txqi = ieee80211_get_txq(local, vif, sta, skb);
|
2016-09-23 01:04:20 +08:00
|
|
|
|
|
|
|
if (!txqi)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ieee80211_txq_enqueue(local, txqi, skb);
|
|
|
|
|
2018-12-19 09:02:06 +08:00
|
|
|
schedule_and_wake_txq(local, txqi);
|
2016-09-23 01:04:20 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-11-16 23:02:47 +08:00
|
|
|
static bool ieee80211_tx_frags(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_vif *vif,
|
2019-10-02 05:26:35 +08:00
|
|
|
struct sta_info *sta,
|
2011-11-16 23:02:47 +08:00
|
|
|
struct sk_buff_head *skbs,
|
|
|
|
bool txpending)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2016-05-19 16:37:48 +08:00
|
|
|
struct ieee80211_tx_control control = {};
|
2011-11-16 22:28:55 +08:00
|
|
|
struct sk_buff *skb, *tmp;
|
2009-06-17 23:43:56 +08:00
|
|
|
unsigned long flags;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2011-11-16 22:28:55 +08:00
|
|
|
skb_queue_walk_safe(skbs, skb, tmp) {
|
2012-04-03 22:28:50 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
int q = info->hw_queue;
|
|
|
|
|
|
|
|
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
|
|
|
|
if (WARN_ON_ONCE(q >= local->hw.queues)) {
|
|
|
|
__skb_unlink(skb, skbs);
|
2012-10-08 20:39:33 +08:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2012-04-03 22:28:50 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif
|
2009-06-17 23:43:56 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
|
|
if (local->queue_stop_reasons[q] ||
|
2011-02-24 21:42:06 +08:00
|
|
|
(!txpending && !skb_queue_empty(&local->pending[q]))) {
|
2013-02-12 01:21:07 +08:00
|
|
|
if (unlikely(info->flags &
|
2013-02-26 04:58:05 +08:00
|
|
|
IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
|
|
|
|
if (local->queue_stop_reasons[q] &
|
|
|
|
~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
|
|
|
|
/*
|
|
|
|
* Drop off-channel frames if queues
|
|
|
|
* are stopped for any reason other
|
|
|
|
* than off-channel operation. Never
|
|
|
|
* queue them.
|
|
|
|
*/
|
|
|
|
spin_unlock_irqrestore(
|
|
|
|
&local->queue_stop_reason_lock,
|
|
|
|
flags);
|
|
|
|
ieee80211_purge_tx_queue(&local->hw,
|
|
|
|
skbs);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
|
2013-02-12 01:21:07 +08:00
|
|
|
/*
|
2013-02-26 04:58:05 +08:00
|
|
|
* Since queue is stopped, queue up frames for
|
|
|
|
* later transmission from the tx-pending
|
|
|
|
* tasklet when the queue is woken again.
|
2013-02-12 01:21:07 +08:00
|
|
|
*/
|
2013-02-26 04:58:05 +08:00
|
|
|
if (txpending)
|
|
|
|
skb_queue_splice_init(skbs,
|
|
|
|
&local->pending[q]);
|
|
|
|
else
|
|
|
|
skb_queue_splice_tail_init(skbs,
|
|
|
|
&local->pending[q]);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
|
|
|
|
flags);
|
|
|
|
return false;
|
2013-02-12 01:21:07 +08:00
|
|
|
}
|
2011-02-24 21:42:06 +08:00
|
|
|
}
|
2009-06-17 23:43:56 +08:00
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
2008-07-24 23:46:44 +08:00
|
|
|
|
2011-11-16 23:02:47 +08:00
|
|
|
info->control.vif = vif;
|
2019-10-02 05:26:35 +08:00
|
|
|
control.sta = sta ? &sta->sta : NULL;
|
2009-03-24 00:28:36 +08:00
|
|
|
|
2011-11-16 23:02:47 +08:00
|
|
|
__skb_unlink(skb, skbs);
|
2016-05-19 16:37:48 +08:00
|
|
|
drv_tx(local, &control, skb);
|
2011-11-16 23:02:47 +08:00
|
|
|
}
|
2009-07-14 06:33:34 +08:00
|
|
|
|
2011-11-16 23:02:47 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns false if the frame couldn't be transmitted but was queued instead.
|
|
|
|
*/
|
|
|
|
static bool __ieee80211_tx(struct ieee80211_local *local,
|
2021-11-13 14:34:15 +08:00
|
|
|
struct sk_buff_head *skbs, struct sta_info *sta,
|
|
|
|
bool txpending)
|
2011-11-16 23:02:47 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info;
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct ieee80211_vif *vif;
|
|
|
|
struct sk_buff *skb;
|
2021-03-29 05:37:29 +08:00
|
|
|
bool result;
|
2009-07-14 06:33:34 +08:00
|
|
|
|
2011-11-16 23:02:47 +08:00
|
|
|
if (WARN_ON(skb_queue_empty(skbs)))
|
|
|
|
return true;
|
2010-07-22 23:11:28 +08:00
|
|
|
|
2011-11-16 23:02:47 +08:00
|
|
|
skb = skb_peek(skbs);
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
sdata = vif_to_sdata(info->control.vif);
|
|
|
|
if (sta && !sta->uploaded)
|
|
|
|
sta = NULL;
|
|
|
|
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_MONITOR:
|
2016-08-30 04:25:15 +08:00
|
|
|
if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
|
2013-07-15 04:39:20 +08:00
|
|
|
vif = &sdata->vif;
|
|
|
|
break;
|
|
|
|
}
|
2012-04-03 20:35:57 +08:00
|
|
|
sdata = rcu_dereference(local->monitor_sdata);
|
2012-04-03 22:28:50 +08:00
|
|
|
if (sdata) {
|
2012-04-03 20:35:57 +08:00
|
|
|
vif = &sdata->vif;
|
2012-04-03 22:28:50 +08:00
|
|
|
info->hw_queue =
|
|
|
|
vif->hw_queue[skb_get_queue_mapping(skb)];
|
2015-06-03 03:39:54 +08:00
|
|
|
} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
|
2015-11-24 22:41:50 +08:00
|
|
|
ieee80211_purge_tx_queue(&local->hw, skbs);
|
2012-04-03 22:28:50 +08:00
|
|
|
return true;
|
|
|
|
} else
|
2012-04-03 20:35:57 +08:00
|
|
|
vif = NULL;
|
2011-11-16 23:02:47 +08:00
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
sdata = container_of(sdata->bss,
|
|
|
|
struct ieee80211_sub_if_data, u.ap);
|
2020-07-08 04:45:48 +08:00
|
|
|
fallthrough;
|
2011-11-16 23:02:47 +08:00
|
|
|
default:
|
|
|
|
vif = &sdata->vif;
|
|
|
|
break;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
2009-03-24 00:28:35 +08:00
|
|
|
|
2019-10-02 05:26:35 +08:00
|
|
|
result = ieee80211_tx_frags(local, vif, sta, skbs, txpending);
|
2011-11-16 23:02:47 +08:00
|
|
|
|
2011-11-24 21:47:36 +08:00
|
|
|
WARN_ON_ONCE(!skb_queue_empty(skbs));
|
2011-11-16 22:28:55 +08:00
|
|
|
|
2011-11-16 23:02:47 +08:00
|
|
|
return result;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2008-06-20 07:22:30 +08:00
|
|
|
/*
|
|
|
|
* Invoke TX handlers, return 0 on success and non-zero if the
|
|
|
|
* frame was dropped or queued.
|
2016-09-23 01:04:20 +08:00
|
|
|
*
|
|
|
|
* The handlers are split into an early and late part. The latter is everything
|
|
|
|
* that can be sensitive to reordering, and will be deferred to after packets
|
|
|
|
* are dequeued from the intermediate queues (when they are enabled).
|
2008-06-20 07:22:30 +08:00
|
|
|
*/
|
2016-09-23 01:04:20 +08:00
|
|
|
static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
|
2008-06-20 07:22:30 +08:00
|
|
|
{
|
|
|
|
ieee80211_tx_result res = TX_DROP;
|
|
|
|
|
2009-10-29 15:43:48 +08:00
|
|
|
#define CALL_TXH(txh) \
|
|
|
|
do { \
|
|
|
|
res = txh(tx); \
|
|
|
|
if (res != TX_CONTINUE) \
|
|
|
|
goto txh_done; \
|
|
|
|
} while (0)
|
|
|
|
|
2010-01-12 16:42:46 +08:00
|
|
|
CALL_TXH(ieee80211_tx_h_dynamic_ps);
|
2009-10-29 15:43:48 +08:00
|
|
|
CALL_TXH(ieee80211_tx_h_check_assoc);
|
|
|
|
CALL_TXH(ieee80211_tx_h_ps_buf);
|
2010-08-27 19:26:54 +08:00
|
|
|
CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
|
2009-10-29 15:43:48 +08:00
|
|
|
CALL_TXH(ieee80211_tx_h_select_key);
|
2010-01-17 08:47:59 +08:00
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
txh_done:
|
|
|
|
if (unlikely(res == TX_DROP)) {
|
|
|
|
I802_DEBUG_INC(tx->local->tx_handlers_drop);
|
|
|
|
if (tx->skb)
|
|
|
|
ieee80211_free_txskb(&tx->local->hw, tx->skb);
|
|
|
|
else
|
|
|
|
ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
|
|
|
|
return -1;
|
|
|
|
} else if (unlikely(res == TX_QUEUED)) {
|
|
|
|
I802_DEBUG_INC(tx->local->tx_handlers_queued);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Late handlers can be called while the sta lock is held. Handlers that can
|
|
|
|
* cause packets to be generated will cause deadlock!
|
|
|
|
*/
|
|
|
|
static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
|
|
ieee80211_tx_result res = TX_CONTINUE;
|
|
|
|
|
2021-11-23 04:43:23 +08:00
|
|
|
if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
|
|
|
|
CALL_TXH(ieee80211_tx_h_rate_ctrl);
|
|
|
|
|
2011-12-03 05:08:52 +08:00
|
|
|
if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
|
|
|
|
__skb_queue_tail(&tx->skbs, tx->skb);
|
|
|
|
tx->skb = NULL;
|
2010-01-17 08:47:59 +08:00
|
|
|
goto txh_done;
|
2011-12-03 05:08:52 +08:00
|
|
|
}
|
2010-01-17 08:47:59 +08:00
|
|
|
|
|
|
|
CALL_TXH(ieee80211_tx_h_michael_mic_add);
|
2009-10-29 15:43:48 +08:00
|
|
|
CALL_TXH(ieee80211_tx_h_sequence);
|
|
|
|
CALL_TXH(ieee80211_tx_h_fragment);
|
2008-06-30 21:10:44 +08:00
|
|
|
/* handlers after fragment must be aware of tx info fragmentation! */
|
2009-10-29 15:43:48 +08:00
|
|
|
CALL_TXH(ieee80211_tx_h_stats);
|
|
|
|
CALL_TXH(ieee80211_tx_h_encrypt);
|
2015-06-03 03:39:54 +08:00
|
|
|
if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
|
2011-02-01 04:29:12 +08:00
|
|
|
CALL_TXH(ieee80211_tx_h_calculate_duration);
|
2008-06-30 21:10:44 +08:00
|
|
|
#undef CALL_TXH
|
2008-06-20 07:22:30 +08:00
|
|
|
|
2008-06-30 21:10:44 +08:00
|
|
|
txh_done:
|
2008-06-20 07:22:30 +08:00
|
|
|
if (unlikely(res == TX_DROP)) {
|
2008-06-28 08:15:03 +08:00
|
|
|
I802_DEBUG_INC(tx->local->tx_handlers_drop);
|
2011-11-16 22:28:55 +08:00
|
|
|
if (tx->skb)
|
2012-10-08 20:39:33 +08:00
|
|
|
ieee80211_free_txskb(&tx->local->hw, tx->skb);
|
2011-11-16 22:28:55 +08:00
|
|
|
else
|
2012-11-10 10:44:14 +08:00
|
|
|
ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
|
2008-06-20 07:22:30 +08:00
|
|
|
return -1;
|
|
|
|
} else if (unlikely(res == TX_QUEUED)) {
|
2008-06-28 08:15:03 +08:00
|
|
|
I802_DEBUG_INC(tx->local->tx_handlers_queued);
|
2008-06-20 07:22:30 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
|
|
|
|
{
|
|
|
|
int r = invoke_tx_handlers_early(tx);
|
|
|
|
|
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
return invoke_tx_handlers_late(tx);
|
|
|
|
}
|
|
|
|
|
2013-10-15 00:01:00 +08:00
|
|
|
bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif, struct sk_buff *skb,
|
|
|
|
int band, struct ieee80211_sta **sta)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_tx_data tx;
|
2015-03-01 15:10:12 +08:00
|
|
|
struct sk_buff *skb2;
|
2013-10-15 00:01:00 +08:00
|
|
|
|
2015-03-20 21:18:27 +08:00
|
|
|
if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP)
|
2013-10-15 00:01:00 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
info->band = band;
|
|
|
|
info->control.vif = vif;
|
|
|
|
info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)];
|
|
|
|
|
|
|
|
if (invoke_tx_handlers(&tx))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (sta) {
|
|
|
|
if (tx.sta)
|
|
|
|
*sta = &tx.sta->sta;
|
|
|
|
else
|
|
|
|
*sta = NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-01 15:10:12 +08:00
|
|
|
/* this function isn't suitable for fragmented data frames */
|
|
|
|
skb2 = __skb_dequeue(&tx.skbs);
|
|
|
|
if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) {
|
|
|
|
ieee80211_free_txskb(hw, skb2);
|
|
|
|
ieee80211_purge_tx_queue(hw, &tx.skbs);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-10-15 00:01:00 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
|
|
|
|
|
2011-02-24 21:42:06 +08:00
|
|
|
/*
|
|
|
|
* Returns false if the frame couldn't be transmitted but was queued instead.
|
|
|
|
*/
|
|
|
|
static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
|
2015-03-20 21:18:27 +08:00
|
|
|
struct sta_info *sta, struct sk_buff *skb,
|
2020-07-23 18:01:52 +08:00
|
|
|
bool txpending)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2009-06-17 23:43:56 +08:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2008-02-25 23:27:43 +08:00
|
|
|
struct ieee80211_tx_data tx;
|
2008-06-20 07:22:30 +08:00
|
|
|
ieee80211_tx_result res_prepare;
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2011-02-24 21:42:06 +08:00
|
|
|
bool result = true;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
|
|
|
if (unlikely(skb->len < 10)) {
|
|
|
|
dev_kfree_skb(skb);
|
2011-02-24 21:42:06 +08:00
|
|
|
return true;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2007-09-26 23:53:18 +08:00
|
|
|
/* initialises tx */
|
2015-03-20 21:18:27 +08:00
|
|
|
res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2009-03-24 00:28:41 +08:00
|
|
|
if (unlikely(res_prepare == TX_DROP)) {
|
2012-10-08 20:39:33 +08:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2012-07-26 23:24:39 +08:00
|
|
|
return true;
|
2009-03-24 00:28:41 +08:00
|
|
|
} else if (unlikely(res_prepare == TX_QUEUED)) {
|
2012-07-26 23:24:39 +08:00
|
|
|
return true;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2012-04-03 22:28:50 +08:00
|
|
|
/* set up hw_queue value early */
|
|
|
|
if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
|
2015-06-03 03:39:54 +08:00
|
|
|
!ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
|
2012-04-03 22:28:50 +08:00
|
|
|
info->hw_queue =
|
|
|
|
sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
if (invoke_tx_handlers_early(&tx))
|
2018-09-05 18:22:59 +08:00
|
|
|
return true;
|
2016-09-23 01:04:20 +08:00
|
|
|
|
|
|
|
if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!invoke_tx_handlers_late(&tx))
|
2021-11-13 14:34:15 +08:00
|
|
|
result = __ieee80211_tx(local, &tx.skbs, tx.sta, txpending);
|
2012-07-26 23:24:39 +08:00
|
|
|
|
2011-02-24 21:42:06 +08:00
|
|
|
return result;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* device xmit handlers */
|
|
|
|
|
2020-10-09 19:25:41 +08:00
|
|
|
enum ieee80211_encrypt {
|
|
|
|
ENCRYPT_NO,
|
|
|
|
ENCRYPT_MGMT,
|
|
|
|
ENCRYPT_DATA,
|
|
|
|
};
|
|
|
|
|
2011-06-28 21:11:37 +08:00
|
|
|
static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
|
2008-05-29 16:38:53 +08:00
|
|
|
struct sk_buff *skb,
|
2020-10-09 19:25:41 +08:00
|
|
|
int head_need,
|
|
|
|
enum ieee80211_encrypt encrypt)
|
2008-05-29 16:38:53 +08:00
|
|
|
{
|
2011-06-28 21:11:37 +08:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2019-01-29 18:10:57 +08:00
|
|
|
bool enc_tailroom;
|
2008-05-29 16:38:53 +08:00
|
|
|
int tail_need = 0;
|
|
|
|
|
2020-10-09 19:25:41 +08:00
|
|
|
enc_tailroom = encrypt == ENCRYPT_MGMT ||
|
|
|
|
(encrypt == ENCRYPT_DATA &&
|
|
|
|
sdata->crypto_tx_tailroom_needed_cnt);
|
2019-01-29 18:10:57 +08:00
|
|
|
|
|
|
|
if (enc_tailroom) {
|
2008-05-29 16:38:53 +08:00
|
|
|
tail_need = IEEE80211_ENCRYPT_TAILROOM;
|
|
|
|
tail_need -= skb_tailroom(skb);
|
|
|
|
tail_need = max_t(int, tail_need, 0);
|
|
|
|
}
|
|
|
|
|
2014-07-29 20:39:14 +08:00
|
|
|
if (skb_cloned(skb) &&
|
2015-06-03 03:39:54 +08:00
|
|
|
(!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
|
2019-01-29 18:10:57 +08:00
|
|
|
!skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
|
2008-05-29 16:38:53 +08:00
|
|
|
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
|
2010-12-19 02:30:49 +08:00
|
|
|
else if (head_need || tail_need)
|
2008-05-29 16:38:53 +08:00
|
|
|
I802_DEBUG_INC(local->tx_expand_skb_head);
|
2010-12-19 02:30:49 +08:00
|
|
|
else
|
|
|
|
return 0;
|
2008-05-29 16:38:53 +08:00
|
|
|
|
|
|
|
if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
|
2010-08-21 07:25:38 +08:00
|
|
|
wiphy_debug(local->hw.wiphy,
|
|
|
|
"failed to reallocate TX buffer\n");
|
2008-05-29 16:38:53 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-20 21:18:27 +08:00
|
|
|
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
|
2020-07-23 18:01:52 +08:00
|
|
|
struct sta_info *sta, struct sk_buff *skb)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2009-06-17 23:43:56 +08:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2020-10-09 19:25:41 +08:00
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
2007-07-27 21:43:22 +08:00
|
|
|
int headroom;
|
2020-10-09 19:25:41 +08:00
|
|
|
enum ieee80211_encrypt encrypt;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2020-10-09 19:25:41 +08:00
|
|
|
if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)
|
|
|
|
encrypt = ENCRYPT_NO;
|
|
|
|
else if (ieee80211_is_mgmt(hdr->frame_control))
|
|
|
|
encrypt = ENCRYPT_MGMT;
|
|
|
|
else
|
|
|
|
encrypt = ENCRYPT_DATA;
|
2008-05-29 16:38:53 +08:00
|
|
|
|
2009-06-17 23:43:56 +08:00
|
|
|
headroom = local->tx_headroom;
|
2020-10-09 19:25:41 +08:00
|
|
|
if (encrypt != ENCRYPT_NO)
|
2013-03-24 20:23:27 +08:00
|
|
|
headroom += sdata->encrypt_headroom;
|
2008-05-29 16:38:53 +08:00
|
|
|
headroom -= skb_headroom(skb);
|
|
|
|
headroom = max_t(int, 0, headroom);
|
|
|
|
|
2020-10-09 19:25:41 +08:00
|
|
|
if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) {
|
2012-10-08 20:39:33 +08:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2009-06-17 23:43:56 +08:00
|
|
|
return;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2020-10-09 19:25:41 +08:00
|
|
|
/* reload after potential resize */
|
2010-09-12 11:01:31 +08:00
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
2009-07-14 06:33:34 +08:00
|
|
|
info->control.vif = &sdata->vif;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2013-01-31 01:14:08 +08:00
|
|
|
if (ieee80211_vif_is_mesh(&sdata->vif)) {
|
|
|
|
if (ieee80211_is_data(hdr->frame_control) &&
|
|
|
|
is_unicast_ether_addr(hdr->addr1)) {
|
2013-02-15 21:40:31 +08:00
|
|
|
if (mesh_nexthop_resolve(sdata, skb))
|
2013-01-31 01:14:08 +08:00
|
|
|
return; /* skb queued: don't free */
|
|
|
|
} else {
|
|
|
|
ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
|
|
|
|
}
|
2012-03-27 20:18:36 +08:00
|
|
|
}
|
2009-08-11 08:29:29 +08:00
|
|
|
|
2011-09-08 08:49:53 +08:00
|
|
|
ieee80211_set_qos_hdr(sdata, skb);
|
2020-07-23 18:01:52 +08:00
|
|
|
ieee80211_tx(sdata, sta, skb, false);
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2021-05-30 21:32:26 +08:00
|
|
|
static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_radiotap_header *rthdr =
|
|
|
|
(struct ieee80211_radiotap_header *)skb->data;
|
|
|
|
|
|
|
|
/* check for not even having the fixed radiotap header part */
|
|
|
|
if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
|
|
|
|
return false; /* too short to be possibly valid */
|
|
|
|
|
|
|
|
/* is it a header version we can trust to find length from? */
|
|
|
|
if (unlikely(rthdr->it_version))
|
|
|
|
return false; /* only version 0 is supported */
|
|
|
|
|
|
|
|
/* does the skb contain enough to deliver on the alleged length? */
|
|
|
|
if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
|
|
|
|
return false; /* skb too short for claimed rt header extent */
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-07-23 18:01:53 +08:00
|
|
|
bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2011-10-07 20:01:26 +08:00
|
|
|
{
|
2020-07-23 18:01:53 +08:00
|
|
|
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
|
2011-10-07 20:01:26 +08:00
|
|
|
struct ieee80211_radiotap_iterator iterator;
|
|
|
|
struct ieee80211_radiotap_header *rthdr =
|
|
|
|
(struct ieee80211_radiotap_header *) skb->data;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
|
|
|
|
NULL);
|
|
|
|
u16 txflags;
|
2016-01-27 00:11:13 +08:00
|
|
|
u16 rate = 0;
|
|
|
|
bool rate_found = false;
|
|
|
|
u8 rate_retries = 0;
|
|
|
|
u16 rate_flags = 0;
|
2016-02-24 23:25:49 +08:00
|
|
|
u8 mcs_known, mcs_flags, mcs_bw;
|
2016-02-23 22:43:35 +08:00
|
|
|
u16 vht_known;
|
|
|
|
u8 vht_mcs = 0, vht_nss = 0;
|
2016-01-27 00:11:13 +08:00
|
|
|
int i;
|
2011-10-07 20:01:26 +08:00
|
|
|
|
2021-05-30 21:32:26 +08:00
|
|
|
if (!ieee80211_validate_radiotap_len(skb))
|
|
|
|
return false;
|
2020-07-23 18:01:53 +08:00
|
|
|
|
2011-10-07 20:01:26 +08:00
|
|
|
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
|
|
|
|
IEEE80211_TX_CTL_DONTFRAG;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* for every radiotap entry that is present
|
|
|
|
* (ieee80211_radiotap_iterator_next returns -ENOENT when no more
|
|
|
|
* entries present, or -EINVAL on error)
|
|
|
|
*/
|
|
|
|
|
|
|
|
while (!ret) {
|
|
|
|
ret = ieee80211_radiotap_iterator_next(&iterator);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* see if this argument is something we can use */
|
|
|
|
switch (iterator.this_arg_index) {
|
|
|
|
/*
|
|
|
|
* You must take care when dereferencing iterator.this_arg
|
|
|
|
* for multibyte types... the pointer is not aligned. Use
|
|
|
|
* get_unaligned((type *)iterator.this_arg) to dereference
|
|
|
|
* iterator.this_arg for type "type" safely on all arches.
|
|
|
|
*/
|
|
|
|
case IEEE80211_RADIOTAP_FLAGS:
|
|
|
|
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
|
|
|
|
/*
|
|
|
|
* this indicates that the skb we have been
|
|
|
|
* handed has the 32-bit FCS CRC at the end...
|
|
|
|
* we should react to that by snipping it off
|
|
|
|
* because it will be recomputed and added
|
|
|
|
* on transmission
|
|
|
|
*/
|
|
|
|
if (skb->len < (iterator._max_length + FCS_LEN))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
skb_trim(skb, skb->len - FCS_LEN);
|
|
|
|
}
|
|
|
|
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
|
|
|
|
info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
|
|
|
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
|
|
|
|
info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IEEE80211_RADIOTAP_TX_FLAGS:
|
|
|
|
txflags = get_unaligned_le16(iterator.this_arg);
|
|
|
|
if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
|
|
|
|
info->flags |= IEEE80211_TX_CTL_NO_ACK;
|
2020-07-23 18:01:49 +08:00
|
|
|
if (txflags & IEEE80211_RADIOTAP_F_TX_NOSEQNO)
|
|
|
|
info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO;
|
2020-11-04 14:18:19 +08:00
|
|
|
if (txflags & IEEE80211_RADIOTAP_F_TX_ORDER)
|
|
|
|
info->control.flags |=
|
|
|
|
IEEE80211_TX_CTRL_DONT_REORDER;
|
2011-10-07 20:01:26 +08:00
|
|
|
break;
|
|
|
|
|
2016-01-27 00:11:13 +08:00
|
|
|
case IEEE80211_RADIOTAP_RATE:
|
|
|
|
rate = *iterator.this_arg;
|
|
|
|
rate_flags = 0;
|
|
|
|
rate_found = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IEEE80211_RADIOTAP_DATA_RETRIES:
|
|
|
|
rate_retries = *iterator.this_arg;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IEEE80211_RADIOTAP_MCS:
|
|
|
|
mcs_known = iterator.this_arg[0];
|
|
|
|
mcs_flags = iterator.this_arg[1];
|
|
|
|
if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS))
|
|
|
|
break;
|
|
|
|
|
|
|
|
rate_found = true;
|
|
|
|
rate = iterator.this_arg[2];
|
|
|
|
rate_flags = IEEE80211_TX_RC_MCS;
|
|
|
|
|
|
|
|
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI &&
|
|
|
|
mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)
|
|
|
|
rate_flags |= IEEE80211_TX_RC_SHORT_GI;
|
|
|
|
|
2016-02-24 23:25:49 +08:00
|
|
|
mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK;
|
2016-01-27 00:11:13 +08:00
|
|
|
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW &&
|
2016-02-24 23:25:49 +08:00
|
|
|
mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40)
|
2016-01-27 00:11:13 +08:00
|
|
|
rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
|
2020-12-20 01:07:10 +08:00
|
|
|
|
|
|
|
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_FEC &&
|
|
|
|
mcs_flags & IEEE80211_RADIOTAP_MCS_FEC_LDPC)
|
|
|
|
info->flags |= IEEE80211_TX_CTL_LDPC;
|
2021-01-25 23:07:44 +08:00
|
|
|
|
|
|
|
if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_STBC) {
|
|
|
|
u8 stbc = u8_get_bits(mcs_flags,
|
|
|
|
IEEE80211_RADIOTAP_MCS_STBC_MASK);
|
|
|
|
|
|
|
|
info->flags |=
|
|
|
|
u32_encode_bits(stbc,
|
|
|
|
IEEE80211_TX_CTL_STBC);
|
|
|
|
}
|
2016-01-27 00:11:13 +08:00
|
|
|
break;
|
|
|
|
|
2016-02-23 22:43:35 +08:00
|
|
|
case IEEE80211_RADIOTAP_VHT:
|
|
|
|
vht_known = get_unaligned_le16(iterator.this_arg);
|
|
|
|
rate_found = true;
|
|
|
|
|
|
|
|
rate_flags = IEEE80211_TX_RC_VHT_MCS;
|
|
|
|
if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) &&
|
|
|
|
(iterator.this_arg[2] &
|
|
|
|
IEEE80211_RADIOTAP_VHT_FLAG_SGI))
|
|
|
|
rate_flags |= IEEE80211_TX_RC_SHORT_GI;
|
|
|
|
if (vht_known &
|
|
|
|
IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) {
|
|
|
|
if (iterator.this_arg[3] == 1)
|
|
|
|
rate_flags |=
|
|
|
|
IEEE80211_TX_RC_40_MHZ_WIDTH;
|
|
|
|
else if (iterator.this_arg[3] == 4)
|
|
|
|
rate_flags |=
|
|
|
|
IEEE80211_TX_RC_80_MHZ_WIDTH;
|
|
|
|
else if (iterator.this_arg[3] == 11)
|
|
|
|
rate_flags |=
|
|
|
|
IEEE80211_TX_RC_160_MHZ_WIDTH;
|
|
|
|
}
|
|
|
|
|
|
|
|
vht_mcs = iterator.this_arg[4] >> 4;
|
2021-09-20 20:45:22 +08:00
|
|
|
if (vht_mcs > 11)
|
|
|
|
vht_mcs = 0;
|
2016-02-23 22:43:35 +08:00
|
|
|
vht_nss = iterator.this_arg[4] & 0xF;
|
2021-09-20 20:45:22 +08:00
|
|
|
if (!vht_nss || vht_nss > 8)
|
|
|
|
vht_nss = 1;
|
2016-02-23 22:43:35 +08:00
|
|
|
break;
|
|
|
|
|
2011-10-07 20:01:26 +08:00
|
|
|
/*
|
|
|
|
* Please update the file
|
2020-05-01 00:03:59 +08:00
|
|
|
* Documentation/networking/mac80211-injection.rst
|
2011-10-07 20:01:26 +08:00
|
|
|
* when parsing new fields here.
|
|
|
|
*/
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
|
|
|
|
return false;
|
|
|
|
|
2016-01-27 00:11:13 +08:00
|
|
|
if (rate_found) {
|
2021-05-30 21:32:26 +08:00
|
|
|
struct ieee80211_supported_band *sband =
|
|
|
|
local->hw.wiphy->bands[info->band];
|
|
|
|
|
2016-01-27 00:11:13 +08:00
|
|
|
info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
|
|
|
|
|
|
|
|
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
|
|
|
|
info->control.rates[i].idx = -1;
|
|
|
|
info->control.rates[i].flags = 0;
|
|
|
|
info->control.rates[i].count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rate_flags & IEEE80211_TX_RC_MCS) {
|
|
|
|
info->control.rates[0].idx = rate;
|
2016-02-23 22:43:35 +08:00
|
|
|
} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
|
|
|
|
ieee80211_rate_set_vht(info->control.rates, vht_mcs,
|
|
|
|
vht_nss);
|
2021-05-30 21:32:26 +08:00
|
|
|
} else if (sband) {
|
2016-01-27 00:11:13 +08:00
|
|
|
for (i = 0; i < sband->n_bitrates; i++) {
|
|
|
|
if (rate * 5 != sband->bitrates[i].bitrate)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
info->control.rates[0].idx = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-02 22:54:51 +08:00
|
|
|
if (info->control.rates[0].idx < 0)
|
|
|
|
info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT;
|
|
|
|
|
2016-01-27 00:11:13 +08:00
|
|
|
info->control.rates[0].flags = rate_flags;
|
|
|
|
info->control.rates[0].count = min_t(u8, rate_retries + 1,
|
|
|
|
local->hw.max_rate_tries);
|
|
|
|
}
|
|
|
|
|
2011-10-07 20:01:26 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-09-01 03:50:57 +08:00
|
|
|
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
|
2012-07-26 23:24:39 +08:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
2009-06-17 23:43:56 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2011-08-01 17:32:52 +08:00
|
|
|
struct ieee80211_hdr *hdr;
|
2011-10-07 20:01:23 +08:00
|
|
|
struct ieee80211_sub_if_data *tmp_sdata, *sdata;
|
2014-06-05 14:12:57 +08:00
|
|
|
struct cfg80211_chan_def *chandef;
|
2007-07-27 21:43:24 +08:00
|
|
|
u16 len_rthdr;
|
2011-10-07 20:01:23 +08:00
|
|
|
int hdrlen;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2020-07-23 18:01:53 +08:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
|
|
|
|
IEEE80211_TX_CTL_INJECTED;
|
2007-07-27 21:43:24 +08:00
|
|
|
|
2021-05-30 21:32:26 +08:00
|
|
|
/* Sanity-check the length of the radiotap header */
|
|
|
|
if (!ieee80211_validate_radiotap_len(skb))
|
2020-07-23 18:01:53 +08:00
|
|
|
goto fail;
|
2007-07-27 21:43:24 +08:00
|
|
|
|
2020-07-23 18:01:53 +08:00
|
|
|
/* we now know there is a radiotap header with a length we can use */
|
2007-07-27 21:43:24 +08:00
|
|
|
len_rthdr = ieee80211_get_radiotap_len(skb->data);
|
|
|
|
|
2007-07-27 21:43:22 +08:00
|
|
|
/*
|
|
|
|
* fix up the pointers accounting for the radiotap
|
|
|
|
* header still being in there. We are being given
|
|
|
|
* a precooked IEEE80211 header so no need for
|
|
|
|
* normal processing
|
|
|
|
*/
|
2007-07-27 21:43:24 +08:00
|
|
|
skb_set_mac_header(skb, len_rthdr);
|
2007-07-27 21:43:22 +08:00
|
|
|
/*
|
2007-07-27 21:43:24 +08:00
|
|
|
* these are just fixed to the end of the rt area since we
|
|
|
|
* don't have any better information and at this point, nobody cares
|
2007-07-27 21:43:22 +08:00
|
|
|
*/
|
2007-07-27 21:43:24 +08:00
|
|
|
skb_set_network_header(skb, len_rthdr);
|
|
|
|
skb_set_transport_header(skb, len_rthdr);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2011-10-07 20:01:23 +08:00
|
|
|
if (skb->len < len_rthdr + 2)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
|
|
|
|
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
|
|
|
|
|
|
|
if (skb->len < len_rthdr + hdrlen)
|
|
|
|
goto fail;
|
|
|
|
|
2011-08-01 17:32:52 +08:00
|
|
|
/*
|
|
|
|
* Initialize skb->protocol if the injected frame is a data frame
|
|
|
|
* carrying a rfc1042 header
|
|
|
|
*/
|
2011-10-07 20:01:23 +08:00
|
|
|
if (ieee80211_is_data(hdr->frame_control) &&
|
|
|
|
skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
|
|
|
|
u8 *payload = (u8 *)hdr + hdrlen;
|
|
|
|
|
mac80211: Convert compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script:
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-09 02:56:52 +08:00
|
|
|
if (ether_addr_equal(payload, rfc1042_header))
|
2011-10-07 20:01:23 +08:00
|
|
|
skb->protocol = cpu_to_be16((payload[6] << 8) |
|
|
|
|
payload[7]);
|
2011-08-01 17:32:52 +08:00
|
|
|
}
|
|
|
|
|
2011-10-07 20:01:23 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We process outgoing injected frames that have a local address
|
|
|
|
* we handle as though they are non-injected frames.
|
|
|
|
* This code here isn't entirely correct, the local MAC address
|
|
|
|
* isn't always enough to find the interface to use; for proper
|
2020-11-09 17:57:46 +08:00
|
|
|
* VLAN support we have an nl80211-based mechanism.
|
2019-11-22 19:42:42 +08:00
|
|
|
*
|
|
|
|
* This is necessary, for example, for old hostapd versions that
|
|
|
|
* don't use nl80211-based management TX/RX.
|
2011-10-07 20:01:23 +08:00
|
|
|
*/
|
|
|
|
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
|
|
|
|
if (!ieee80211_sdata_running(tmp_sdata))
|
|
|
|
continue;
|
|
|
|
if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
|
2020-11-09 17:57:46 +08:00
|
|
|
tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
2011-10-07 20:01:23 +08:00
|
|
|
continue;
|
mac80211: Convert compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script:
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-09 02:56:52 +08:00
|
|
|
if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
|
2011-10-07 20:01:23 +08:00
|
|
|
sdata = tmp_sdata;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2009-11-19 08:08:30 +08:00
|
|
|
|
2012-07-26 23:24:39 +08:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
|
|
|
if (!chanctx_conf) {
|
|
|
|
tmp_sdata = rcu_dereference(local->monitor_sdata);
|
|
|
|
if (tmp_sdata)
|
|
|
|
chanctx_conf =
|
|
|
|
rcu_dereference(tmp_sdata->vif.chanctx_conf);
|
|
|
|
}
|
|
|
|
|
2013-01-14 06:10:26 +08:00
|
|
|
if (chanctx_conf)
|
2014-06-05 14:12:57 +08:00
|
|
|
chandef = &chanctx_conf->def;
|
2013-01-14 06:10:26 +08:00
|
|
|
else if (!local->use_chanctx)
|
2014-06-05 14:12:57 +08:00
|
|
|
chandef = &local->_oper_chandef;
|
2013-01-14 06:10:26 +08:00
|
|
|
else
|
|
|
|
goto fail_rcu;
|
2012-07-26 23:24:39 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Frame injection is not allowed if beaconing is not allowed
|
|
|
|
* or if we need radar detection. Beaconing is usually not allowed when
|
|
|
|
* the mode or operation (Adhoc, AP, Mesh) does not support DFS.
|
|
|
|
* Passive scan is also used in world regulatory domains where
|
|
|
|
* your country is not known and as such it should be treated as
|
|
|
|
* NO TX unless the channel is explicitly allowed in which case
|
|
|
|
* your current regulatory domain would not have the passive scan
|
|
|
|
* flag.
|
|
|
|
*
|
|
|
|
* Since AP mode uses monitor interfaces to inject/TX management
|
|
|
|
* frames we can make AP mode the exception to this rule once it
|
|
|
|
* supports radar detection as its implementation can deal with
|
|
|
|
* radar detection by itself. We can do that later by adding a
|
|
|
|
* monitor flag interfaces used for AP support.
|
|
|
|
*/
|
2014-06-05 14:12:57 +08:00
|
|
|
if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef,
|
|
|
|
sdata->vif.type))
|
2012-07-26 23:24:39 +08:00
|
|
|
goto fail_rcu;
|
|
|
|
|
2014-11-10 00:50:09 +08:00
|
|
|
info->band = chandef->chan->band;
|
2016-02-19 19:18:01 +08:00
|
|
|
|
2021-04-02 00:44:55 +08:00
|
|
|
/* Initialize skb->priority according to frame type and TID class,
|
|
|
|
* with respect to the sub interface that the frame will actually
|
|
|
|
* be transmitted on. If the DONT_REORDER flag is set, the original
|
|
|
|
* skb-priority is preserved to assure frames injected with this
|
|
|
|
* flag are not reordered relative to each other.
|
|
|
|
*/
|
|
|
|
ieee80211_select_queue_80211(sdata, skb, hdr);
|
|
|
|
skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
|
|
|
|
|
2021-05-30 21:32:26 +08:00
|
|
|
/*
|
|
|
|
* Process the radiotap header. This will now take into account the
|
|
|
|
* selected chandef above to accurately set injection rates and
|
|
|
|
* retransmissions.
|
|
|
|
*/
|
|
|
|
if (!ieee80211_parse_tx_radiotap(skb, dev))
|
|
|
|
goto fail_rcu;
|
|
|
|
|
2020-07-23 18:01:53 +08:00
|
|
|
/* remove the injection radiotap header */
|
|
|
|
skb_pull(skb, len_rthdr);
|
2016-02-19 19:18:01 +08:00
|
|
|
|
2020-07-23 18:01:52 +08:00
|
|
|
ieee80211_xmit(sdata, NULL, skb);
|
2011-10-07 20:01:23 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2007-07-27 21:43:22 +08:00
|
|
|
return NETDEV_TX_OK;
|
2007-07-27 21:43:24 +08:00
|
|
|
|
2012-07-26 23:24:39 +08:00
|
|
|
fail_rcu:
|
|
|
|
rcu_read_unlock();
|
2007-07-27 21:43:24 +08:00
|
|
|
fail:
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
return NETDEV_TX_OK; /* meaning, we dealt with the skb */
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2015-03-21 16:13:45 +08:00
|
|
|
static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb)
|
2013-11-19 01:06:45 +08:00
|
|
|
{
|
2015-03-21 16:13:45 +08:00
|
|
|
u16 ethertype = (skb->data[12] << 8) | skb->data[13];
|
2013-11-19 01:06:45 +08:00
|
|
|
|
2015-03-21 16:13:45 +08:00
|
|
|
return ethertype == ETH_P_TDLS &&
|
|
|
|
skb->len > 14 &&
|
|
|
|
skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
|
|
|
|
}
|
|
|
|
|
2019-11-25 18:04:37 +08:00
|
|
|
int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct sta_info **sta_out)
|
2015-03-21 16:13:45 +08:00
|
|
|
{
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
sta = rcu_dereference(sdata->u.vlan.sta);
|
|
|
|
if (sta) {
|
|
|
|
*sta_out = sta;
|
|
|
|
return 0;
|
|
|
|
} else if (sdata->wdev.use_4addr) {
|
|
|
|
return -ENOLINK;
|
|
|
|
}
|
2020-07-08 04:45:48 +08:00
|
|
|
fallthrough;
|
2015-03-21 16:13:45 +08:00
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
case NL80211_IFTYPE_OCB:
|
|
|
|
case NL80211_IFTYPE_ADHOC:
|
|
|
|
if (is_multicast_ether_addr(skb->data)) {
|
|
|
|
*sta_out = ERR_PTR(-ENOENT);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
sta = sta_info_get_bss(sdata, skb->data);
|
|
|
|
break;
|
|
|
|
#ifdef CONFIG_MAC80211_MESH
|
|
|
|
case NL80211_IFTYPE_MESH_POINT:
|
|
|
|
/* determined much later */
|
|
|
|
*sta_out = NULL;
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
|
|
|
|
sta = sta_info_get(sdata, skb->data);
|
2016-09-13 14:28:22 +08:00
|
|
|
if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
|
|
|
|
if (test_sta_flag(sta,
|
|
|
|
WLAN_STA_TDLS_PEER_AUTH)) {
|
2015-03-21 16:13:45 +08:00
|
|
|
*sta_out = sta;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TDLS link during setup - throw out frames to
|
|
|
|
* peer. Allow TDLS-setup frames to unauthorized
|
|
|
|
* peers for the special case of a link teardown
|
|
|
|
* after a TDLS sta is removed due to being
|
|
|
|
* unreachable.
|
|
|
|
*/
|
2016-09-13 14:28:22 +08:00
|
|
|
if (!ieee80211_is_tdls_setup(skb))
|
2015-03-21 16:13:45 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
sta = sta_info_get(sdata, sdata->u.mgd.bssid);
|
|
|
|
if (!sta)
|
|
|
|
return -ENOLINK;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*sta_out = sta ?: ERR_PTR(-ENOENT);
|
|
|
|
return 0;
|
2013-11-19 01:06:45 +08:00
|
|
|
}
|
|
|
|
|
2020-05-28 00:03:34 +08:00
|
|
|
static u16 ieee80211_store_ack_skb(struct ieee80211_local *local,
|
2019-10-29 17:13:02 +08:00
|
|
|
struct sk_buff *skb,
|
2020-05-28 00:03:34 +08:00
|
|
|
u32 *info_flags,
|
|
|
|
u64 *cookie)
|
2019-10-29 17:13:02 +08:00
|
|
|
{
|
2020-05-28 00:03:34 +08:00
|
|
|
struct sk_buff *ack_skb;
|
2019-10-29 17:13:02 +08:00
|
|
|
u16 info_id = 0;
|
|
|
|
|
2020-05-28 00:03:34 +08:00
|
|
|
if (skb->sk)
|
|
|
|
ack_skb = skb_clone_sk(skb);
|
|
|
|
else
|
|
|
|
ack_skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
|
2019-10-29 17:13:02 +08:00
|
|
|
if (ack_skb) {
|
|
|
|
unsigned long flags;
|
|
|
|
int id;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&local->ack_status_lock, flags);
|
|
|
|
id = idr_alloc(&local->ack_status_frames, ack_skb,
|
2020-01-15 19:25:50 +08:00
|
|
|
1, 0x2000, GFP_ATOMIC);
|
2019-10-29 17:13:02 +08:00
|
|
|
spin_unlock_irqrestore(&local->ack_status_lock, flags);
|
|
|
|
|
|
|
|
if (id >= 0) {
|
|
|
|
info_id = id;
|
|
|
|
*info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
|
2020-05-28 00:03:34 +08:00
|
|
|
if (cookie) {
|
|
|
|
*cookie = ieee80211_mgmt_tx_cookie(local);
|
|
|
|
IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie;
|
|
|
|
}
|
2019-10-29 17:13:02 +08:00
|
|
|
} else {
|
|
|
|
kfree_skb(ack_skb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return info_id;
|
|
|
|
}
|
|
|
|
|
2007-07-27 21:43:22 +08:00
|
|
|
/**
|
2014-11-10 00:50:10 +08:00
|
|
|
* ieee80211_build_hdr - build 802.11 header in the given frame
|
|
|
|
* @sdata: virtual interface to build the header for
|
|
|
|
* @skb: the skb to build the header in
|
2014-11-10 00:50:07 +08:00
|
|
|
* @info_flags: skb flags to set
|
2020-09-25 01:25:11 +08:00
|
|
|
* @sta: the station pointer
|
2019-04-12 04:47:25 +08:00
|
|
|
* @ctrl_flags: info control flags to set
|
2020-09-25 01:25:11 +08:00
|
|
|
* @cookie: cookie pointer to fill (if not %NULL)
|
2007-07-27 21:43:22 +08:00
|
|
|
*
|
2014-11-10 00:50:10 +08:00
|
|
|
* This function takes the skb with 802.3 header and reformats the header to
|
|
|
|
* the appropriate IEEE 802.11 header based on which interface the packet is
|
|
|
|
* being transmitted on.
|
|
|
|
*
|
|
|
|
* Note that this function also takes care of the TX status request and
|
|
|
|
* potential unsharing of the SKB - this needs to be interleaved with the
|
|
|
|
* header building.
|
2007-07-27 21:43:22 +08:00
|
|
|
*
|
2014-11-10 00:50:10 +08:00
|
|
|
* The function requires the read-side RCU lock held
|
|
|
|
*
|
|
|
|
* Returns: the (possibly reallocated) skb or an ERR_PTR() code
|
2007-07-27 21:43:22 +08:00
|
|
|
*/
|
2014-11-10 00:50:10 +08:00
|
|
|
static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
|
2015-03-20 21:18:27 +08:00
|
|
|
struct sk_buff *skb, u32 info_flags,
|
2020-05-28 00:03:34 +08:00
|
|
|
struct sta_info *sta, u32 ctrl_flags,
|
|
|
|
u64 *cookie)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2008-09-16 20:18:59 +08:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2010-12-19 02:30:48 +08:00
|
|
|
struct ieee80211_tx_info *info;
|
2012-07-30 21:11:56 +08:00
|
|
|
int head_need;
|
2008-06-23 07:45:27 +08:00
|
|
|
u16 ethertype, hdrlen, meshhdrlen = 0;
|
|
|
|
__le16 fc;
|
2007-07-27 21:43:22 +08:00
|
|
|
struct ieee80211_hdr hdr;
|
2010-08-21 22:23:29 +08:00
|
|
|
struct ieee80211s_hdr mesh_hdr __maybe_unused;
|
2012-05-30 09:30:41 +08:00
|
|
|
struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
|
2007-07-27 21:43:22 +08:00
|
|
|
const u8 *encaps_data;
|
|
|
|
int encaps_len, skip_header_bytes;
|
2015-03-21 16:13:45 +08:00
|
|
|
bool wme_sta = false, authorized = false;
|
|
|
|
bool tdls_peer;
|
2011-11-06 21:13:34 +08:00
|
|
|
bool multicast;
|
|
|
|
u16 info_id = 0;
|
2012-07-26 23:24:39 +08:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
|
|
|
struct ieee80211_sub_if_data *ap_sdata;
|
2016-04-12 21:56:15 +08:00
|
|
|
enum nl80211_band band;
|
2014-11-10 00:50:10 +08:00
|
|
|
int ret;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2015-03-21 16:13:45 +08:00
|
|
|
if (IS_ERR(sta))
|
|
|
|
sta = NULL;
|
|
|
|
|
2019-03-29 04:01:06 +08:00
|
|
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
|
|
if (local->force_tx_status)
|
|
|
|
info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
|
|
|
|
#endif
|
|
|
|
|
2007-07-27 21:43:22 +08:00
|
|
|
/* convert Ethernet header to proper 802.11 header (based on
|
|
|
|
* operation mode) */
|
|
|
|
ethertype = (skb->data[12] << 8) | skb->data[13];
|
2008-06-23 07:45:27 +08:00
|
|
|
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2007-12-19 08:31:27 +08:00
|
|
|
switch (sdata->vif.type) {
|
2008-09-11 06:01:58 +08:00
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
2015-03-21 16:13:45 +08:00
|
|
|
if (sdata->wdev.use_4addr) {
|
2009-11-11 03:10:05 +08:00
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
|
|
|
|
/* RA TA DA SA */
|
|
|
|
memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
|
2009-11-26 00:46:19 +08:00
|
|
|
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
|
2009-11-11 03:10:05 +08:00
|
|
|
memcpy(hdr.addr3, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
hdrlen = 30;
|
2011-09-29 22:04:36 +08:00
|
|
|
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
|
2014-07-22 20:50:47 +08:00
|
|
|
wme_sta = sta->sta.wme;
|
2009-11-11 03:10:05 +08:00
|
|
|
}
|
2012-07-26 23:24:39 +08:00
|
|
|
ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
|
|
|
|
u.ap);
|
|
|
|
chanctx_conf = rcu_dereference(ap_sdata->vif.chanctx_conf);
|
2014-11-10 00:50:10 +08:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2012-11-09 18:39:59 +08:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2015-03-21 16:13:45 +08:00
|
|
|
if (sdata->wdev.use_4addr)
|
2009-11-11 03:10:05 +08:00
|
|
|
break;
|
2020-07-08 04:45:48 +08:00
|
|
|
fallthrough;
|
2009-11-11 03:10:05 +08:00
|
|
|
case NL80211_IFTYPE_AP:
|
2013-01-25 22:14:33 +08:00
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2014-11-10 00:50:10 +08:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2008-06-23 07:45:27 +08:00
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
|
2007-07-27 21:43:22 +08:00
|
|
|
/* DA BSSID SA */
|
|
|
|
memcpy(hdr.addr1, skb->data, ETH_ALEN);
|
2009-11-26 00:46:19 +08:00
|
|
|
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
|
2007-07-27 21:43:22 +08:00
|
|
|
memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
hdrlen = 24;
|
2012-11-09 18:39:59 +08:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2007-08-29 05:01:54 +08:00
|
|
|
break;
|
2008-02-23 22:17:10 +08:00
|
|
|
#ifdef CONFIG_MAC80211_MESH
|
2008-09-11 06:01:58 +08:00
|
|
|
case NL80211_IFTYPE_MESH_POINT:
|
2012-05-30 09:30:41 +08:00
|
|
|
if (!is_multicast_ether_addr(skb->data)) {
|
2013-02-19 10:04:50 +08:00
|
|
|
struct sta_info *next_hop;
|
|
|
|
bool mpp_lookup = true;
|
|
|
|
|
2013-02-15 21:40:31 +08:00
|
|
|
mpath = mesh_path_lookup(sdata, skb->data);
|
2013-02-19 10:04:50 +08:00
|
|
|
if (mpath) {
|
|
|
|
mpp_lookup = false;
|
|
|
|
next_hop = rcu_dereference(mpath->next_hop);
|
|
|
|
if (!next_hop ||
|
|
|
|
!(mpath->flags & (MESH_PATH_ACTIVE |
|
|
|
|
MESH_PATH_RESOLVING)))
|
|
|
|
mpp_lookup = true;
|
|
|
|
}
|
|
|
|
|
2016-02-03 20:58:37 +08:00
|
|
|
if (mpp_lookup) {
|
2013-02-15 21:40:31 +08:00
|
|
|
mppath = mpp_path_lookup(sdata, skb->data);
|
2016-02-03 20:58:37 +08:00
|
|
|
if (mppath)
|
|
|
|
mppath->exp_time = jiffies;
|
|
|
|
}
|
2013-02-19 10:04:50 +08:00
|
|
|
|
|
|
|
if (mppath && mpath)
|
2016-02-29 09:03:56 +08:00
|
|
|
mesh_path_del(sdata, mpath->dst);
|
2012-05-30 09:30:41 +08:00
|
|
|
}
|
2008-09-22 13:30:32 +08:00
|
|
|
|
2010-12-29 09:28:11 +08:00
|
|
|
/*
|
2011-01-10 14:44:23 +08:00
|
|
|
* Use address extension if it is a packet from
|
|
|
|
* another interface or if we know the destination
|
|
|
|
* is being proxied by a portal (i.e. portal address
|
|
|
|
* differs from proxied address)
|
2010-12-29 09:28:11 +08:00
|
|
|
*/
|
mac80211: Convert compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script:
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-09 02:56:52 +08:00
|
|
|
if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
|
|
|
|
!(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
|
2009-08-11 03:15:48 +08:00
|
|
|
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
|
|
|
|
skb->data, skb->data + ETH_ALEN);
|
2013-02-15 21:40:31 +08:00
|
|
|
meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr,
|
|
|
|
NULL, NULL);
|
2008-09-22 13:30:32 +08:00
|
|
|
} else {
|
2012-08-21 02:28:25 +08:00
|
|
|
/* DS -> MBSS (802.11-2012 13.11.3.3).
|
|
|
|
* For unicast with unknown forwarding information,
|
|
|
|
* destination might be in the MBSS or if that fails
|
|
|
|
* forwarded to another mesh gate. In either case
|
|
|
|
* resolution will be handled in ieee80211_xmit(), so
|
|
|
|
* leave the original DA. This also works for mcast */
|
|
|
|
const u8 *mesh_da = skb->data;
|
|
|
|
|
|
|
|
if (mppath)
|
|
|
|
mesh_da = mppath->mpp;
|
|
|
|
else if (mpath)
|
|
|
|
mesh_da = mpath->dst;
|
2008-09-22 13:30:32 +08:00
|
|
|
|
2009-08-11 03:15:48 +08:00
|
|
|
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
|
2009-11-26 00:46:19 +08:00
|
|
|
mesh_da, sdata->vif.addr);
|
2012-08-21 02:28:25 +08:00
|
|
|
if (is_multicast_ether_addr(mesh_da))
|
|
|
|
/* DA TA mSA AE:SA */
|
2013-02-15 21:40:31 +08:00
|
|
|
meshhdrlen = ieee80211_new_mesh_header(
|
|
|
|
sdata, &mesh_hdr,
|
|
|
|
skb->data + ETH_ALEN, NULL);
|
2009-08-11 03:15:48 +08:00
|
|
|
else
|
2012-08-21 02:28:25 +08:00
|
|
|
/* RA TA mDA mSA AE:DA SA */
|
2013-02-15 21:40:31 +08:00
|
|
|
meshhdrlen = ieee80211_new_mesh_header(
|
|
|
|
sdata, &mesh_hdr, skb->data,
|
|
|
|
skb->data + ETH_ALEN);
|
2008-09-22 13:30:32 +08:00
|
|
|
|
|
|
|
}
|
2012-07-26 23:24:39 +08:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2014-11-10 00:50:10 +08:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2012-11-09 18:39:59 +08:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2019-04-12 04:47:26 +08:00
|
|
|
|
|
|
|
/* For injected frames, fill RA right away as nexthop lookup
|
|
|
|
* will be skipped.
|
|
|
|
*/
|
|
|
|
if ((ctrl_flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP) &&
|
|
|
|
is_zero_ether_addr(hdr.addr1))
|
|
|
|
memcpy(hdr.addr1, skb->data, ETH_ALEN);
|
2008-02-23 22:17:10 +08:00
|
|
|
break;
|
|
|
|
#endif
|
2008-09-11 06:01:58 +08:00
|
|
|
case NL80211_IFTYPE_STATION:
|
2015-03-21 16:13:45 +08:00
|
|
|
/* we already did checks when looking up the RA STA */
|
|
|
|
tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER);
|
2011-09-28 19:12:54 +08:00
|
|
|
|
2015-03-21 16:13:45 +08:00
|
|
|
if (tdls_peer) {
|
2011-09-28 19:12:54 +08:00
|
|
|
/* DA SA BSSID */
|
|
|
|
memcpy(hdr.addr1, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
|
|
|
|
hdrlen = 24;
|
|
|
|
} else if (sdata->u.mgd.use_4addr &&
|
|
|
|
cpu_to_be16(ethertype) != sdata->control_port_protocol) {
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
|
|
|
|
IEEE80211_FCTL_TODS);
|
2009-11-11 03:10:05 +08:00
|
|
|
/* RA TA DA SA */
|
2011-09-28 19:12:54 +08:00
|
|
|
memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
|
2009-11-26 00:46:19 +08:00
|
|
|
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
|
2009-11-11 03:10:05 +08:00
|
|
|
memcpy(hdr.addr3, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
hdrlen = 30;
|
|
|
|
} else {
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
|
|
|
|
/* BSSID SA DA */
|
2011-09-28 19:12:54 +08:00
|
|
|
memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
|
2009-11-11 03:10:05 +08:00
|
|
|
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr3, skb->data, ETH_ALEN);
|
|
|
|
hdrlen = 24;
|
|
|
|
}
|
2012-07-26 23:24:39 +08:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2014-11-10 00:50:10 +08:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2012-11-09 18:39:59 +08:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2007-08-29 05:01:54 +08:00
|
|
|
break;
|
2014-11-03 17:33:19 +08:00
|
|
|
case NL80211_IFTYPE_OCB:
|
|
|
|
/* DA SA BSSID */
|
|
|
|
memcpy(hdr.addr1, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
|
|
|
|
eth_broadcast_addr(hdr.addr3);
|
|
|
|
hdrlen = 24;
|
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2014-11-10 00:50:10 +08:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2014-11-03 17:33:19 +08:00
|
|
|
band = chanctx_conf->def.chan->band;
|
|
|
|
break;
|
2008-09-11 06:01:58 +08:00
|
|
|
case NL80211_IFTYPE_ADHOC:
|
2007-07-27 21:43:22 +08:00
|
|
|
/* DA SA BSSID */
|
|
|
|
memcpy(hdr.addr1, skb->data, ETH_ALEN);
|
|
|
|
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
|
2009-02-15 19:44:28 +08:00
|
|
|
memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN);
|
2007-07-27 21:43:22 +08:00
|
|
|
hdrlen = 24;
|
2012-07-26 23:24:39 +08:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2014-11-10 00:50:10 +08:00
|
|
|
if (!chanctx_conf) {
|
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto free;
|
|
|
|
}
|
2012-11-09 18:39:59 +08:00
|
|
|
band = chanctx_conf->def.chan->band;
|
2007-08-29 05:01:54 +08:00
|
|
|
break;
|
|
|
|
default:
|
2014-11-10 00:50:10 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto free;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2011-11-06 21:13:34 +08:00
|
|
|
multicast = is_multicast_ether_addr(hdr.addr1);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2015-03-21 16:13:45 +08:00
|
|
|
/* sta is always NULL for mesh */
|
|
|
|
if (sta) {
|
|
|
|
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
|
|
|
|
wme_sta = sta->sta.wme;
|
|
|
|
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
|
|
|
|
/* For mesh, the use of the QoS header is mandatory */
|
2011-09-29 22:04:36 +08:00
|
|
|
wme_sta = true;
|
2015-03-21 16:13:45 +08:00
|
|
|
}
|
2011-09-08 08:49:52 +08:00
|
|
|
|
2015-03-21 15:09:55 +08:00
|
|
|
/* receiver does QoS (which also means we do) use it */
|
|
|
|
if (wme_sta) {
|
2008-06-23 07:45:27 +08:00
|
|
|
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
|
2007-12-19 08:31:22 +08:00
|
|
|
hdrlen += 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-01-29 00:19:37 +08:00
|
|
|
* Drop unicast frames to unauthorised stations unless they are
|
|
|
|
* EAPOL frames from the local station.
|
2007-12-19 08:31:22 +08:00
|
|
|
*/
|
2011-10-12 23:28:21 +08:00
|
|
|
if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
|
2014-11-03 17:33:19 +08:00
|
|
|
(sdata->vif.type != NL80211_IFTYPE_OCB) &&
|
2013-08-30 05:35:09 +08:00
|
|
|
!multicast && !authorized &&
|
2011-10-12 23:28:21 +08:00
|
|
|
(cpu_to_be16(ethertype) != sdata->control_port_protocol ||
|
mac80211: Convert compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script:
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-05-09 02:56:52 +08:00
|
|
|
!ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
|
2007-12-19 08:31:22 +08:00
|
|
|
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
|
2012-06-22 17:29:50 +08:00
|
|
|
net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
|
2014-11-10 00:50:10 +08:00
|
|
|
sdata->name, hdr.addr1);
|
2007-12-19 08:31:22 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
|
|
|
|
|
2014-11-10 00:50:10 +08:00
|
|
|
ret = -EPERM;
|
|
|
|
goto free;
|
2007-12-19 08:31:22 +08:00
|
|
|
}
|
|
|
|
|
2020-05-28 00:03:34 +08:00
|
|
|
if (unlikely(!multicast && ((skb->sk &&
|
|
|
|
skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) ||
|
|
|
|
ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS)))
|
|
|
|
info_id = ieee80211_store_ack_skb(local, skb, &info_flags,
|
|
|
|
cookie);
|
2011-11-06 21:13:34 +08:00
|
|
|
|
2010-12-03 01:44:09 +08:00
|
|
|
/*
|
|
|
|
* If the skb is shared we need to obtain our own copy.
|
|
|
|
*/
|
|
|
|
if (skb_shared(skb)) {
|
2011-11-06 21:13:34 +08:00
|
|
|
struct sk_buff *tmp_skb = skb;
|
|
|
|
|
|
|
|
/* can't happen -- skb is a clone if info_id != 0 */
|
|
|
|
WARN_ON(info_id);
|
|
|
|
|
2010-12-19 02:30:50 +08:00
|
|
|
skb = skb_clone(skb, GFP_ATOMIC);
|
2010-12-03 01:44:09 +08:00
|
|
|
kfree_skb(tmp_skb);
|
|
|
|
|
2014-11-10 00:50:10 +08:00
|
|
|
if (!skb) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free;
|
|
|
|
}
|
2010-12-03 01:44:09 +08:00
|
|
|
}
|
|
|
|
|
2008-06-23 07:45:27 +08:00
|
|
|
hdr.frame_control = fc;
|
2007-07-27 21:43:22 +08:00
|
|
|
hdr.duration_id = 0;
|
|
|
|
hdr.seq_ctrl = 0;
|
|
|
|
|
|
|
|
skip_header_bytes = ETH_HLEN;
|
|
|
|
if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
|
|
|
|
encaps_data = bridge_tunnel_header;
|
|
|
|
encaps_len = sizeof(bridge_tunnel_header);
|
|
|
|
skip_header_bytes -= 2;
|
2013-03-28 12:38:25 +08:00
|
|
|
} else if (ethertype >= ETH_P_802_3_MIN) {
|
2007-07-27 21:43:22 +08:00
|
|
|
encaps_data = rfc1042_header;
|
|
|
|
encaps_len = sizeof(rfc1042_header);
|
|
|
|
skip_header_bytes -= 2;
|
|
|
|
} else {
|
|
|
|
encaps_data = NULL;
|
|
|
|
encaps_len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb_pull(skb, skip_header_bytes);
|
2008-05-29 16:38:53 +08:00
|
|
|
head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2008-05-29 16:38:53 +08:00
|
|
|
/*
|
|
|
|
* So we need to modify the skb header and hence need a copy of
|
|
|
|
* that. The head_need variable above doesn't, so far, include
|
|
|
|
* the needed header space that we don't need right away. If we
|
|
|
|
* can, then we don't reallocate right now but only after the
|
|
|
|
* frame arrives at the master device (if it does...)
|
|
|
|
*
|
|
|
|
* If we cannot, however, then we will reallocate to include all
|
|
|
|
* the ever needed space. Also, if we need to reallocate it anyway,
|
|
|
|
* make it big enough for everything we may ever need.
|
|
|
|
*/
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2008-06-18 16:19:51 +08:00
|
|
|
if (head_need > 0 || skb_cloned(skb)) {
|
2013-03-24 20:23:27 +08:00
|
|
|
head_need += sdata->encrypt_headroom;
|
2008-05-29 16:38:53 +08:00
|
|
|
head_need += local->tx_headroom;
|
|
|
|
head_need = max_t(int, 0, head_need);
|
2020-10-09 19:25:41 +08:00
|
|
|
if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) {
|
2012-10-08 20:39:33 +08:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2012-11-07 21:02:30 +08:00
|
|
|
skb = NULL;
|
2014-11-10 00:50:10 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2012-10-08 20:39:33 +08:00
|
|
|
}
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2016-07-13 17:00:02 +08:00
|
|
|
if (encaps_data)
|
2007-07-27 21:43:22 +08:00
|
|
|
memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
|
2007-09-14 23:10:24 +08:00
|
|
|
|
2010-06-29 19:08:06 +08:00
|
|
|
#ifdef CONFIG_MAC80211_MESH
|
2016-07-13 17:00:02 +08:00
|
|
|
if (meshhdrlen > 0)
|
2008-02-23 22:17:10 +08:00
|
|
|
memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
|
2010-06-29 19:08:06 +08:00
|
|
|
#endif
|
2008-02-23 22:17:10 +08:00
|
|
|
|
2008-06-23 07:45:27 +08:00
|
|
|
if (ieee80211_is_data_qos(fc)) {
|
2007-09-14 23:10:24 +08:00
|
|
|
__le16 *qos_control;
|
|
|
|
|
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:23 +08:00
|
|
|
qos_control = skb_push(skb, 2);
|
2007-09-14 23:10:24 +08:00
|
|
|
memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2);
|
|
|
|
/*
|
|
|
|
* Maybe we could actually set some fields here, for now just
|
|
|
|
* initialise to zero to indicate no special operation.
|
|
|
|
*/
|
|
|
|
*qos_control = 0;
|
|
|
|
} else
|
|
|
|
memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
|
|
|
|
|
2016-03-03 09:16:56 +08:00
|
|
|
skb_reset_mac_header(skb);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2010-12-19 02:30:48 +08:00
|
|
|
info = IEEE80211_SKB_CB(skb);
|
2009-06-17 23:43:56 +08:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
|
2011-11-06 21:13:34 +08:00
|
|
|
info->flags = info_flags;
|
|
|
|
info->ack_frame_id = info_id;
|
2014-11-10 00:50:09 +08:00
|
|
|
info->band = band;
|
2019-04-12 04:47:25 +08:00
|
|
|
info->control.flags = ctrl_flags;
|
2011-11-06 21:13:34 +08:00
|
|
|
|
2014-11-10 00:50:10 +08:00
|
|
|
return skb;
|
|
|
|
free:
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
2015-03-21 22:25:43 +08:00
|
|
|
/*
|
|
|
|
* fast-xmit overview
|
|
|
|
*
|
|
|
|
* The core idea of this fast-xmit is to remove per-packet checks by checking
|
|
|
|
* them out of band. ieee80211_check_fast_xmit() implements the out-of-band
|
|
|
|
* checks that are needed to get the sta->fast_tx pointer assigned, after which
|
|
|
|
* much less work can be done per packet. For example, fragmentation must be
|
|
|
|
* disabled or the fast_tx pointer will not be set. All the conditions are seen
|
|
|
|
* in the code here.
|
|
|
|
*
|
|
|
|
* Once assigned, the fast_tx data structure also caches the per-packet 802.11
|
|
|
|
* header and other data to aid packet processing in ieee80211_xmit_fast().
|
|
|
|
*
|
|
|
|
* The most difficult part of this is that when any of these assumptions
|
|
|
|
* change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(),
|
|
|
|
* ieee80211_check_fast_xmit() or friends) is required to reset the data,
|
|
|
|
* since the per-packet code no longer checks the conditions. This is reflected
|
|
|
|
* by the calls to these functions throughout the rest of the code, and must be
|
|
|
|
* maintained if any of the TX path checks change.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void ieee80211_check_fast_xmit(struct sta_info *sta)
|
|
|
|
{
|
|
|
|
struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old;
|
|
|
|
struct ieee80211_local *local = sta->local;
|
|
|
|
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
|
|
|
struct ieee80211_hdr *hdr = (void *)build.hdr;
|
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
|
|
|
__le16 fc;
|
|
|
|
|
2015-06-03 03:39:54 +08:00
|
|
|
if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT))
|
2015-03-21 22:25:43 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Locking here protects both the pointer itself, and against concurrent
|
|
|
|
* invocations winning data access races to, e.g., the key pointer that
|
|
|
|
* is used.
|
|
|
|
* Without it, the invocation of this function right after the key
|
|
|
|
* pointer changes wouldn't be sufficient, as another CPU could access
|
|
|
|
* the pointer, then stall, and then do the cache update after the CPU
|
|
|
|
* that invalidated the key.
|
|
|
|
* With the locking, such scenarios cannot happen as the check for the
|
|
|
|
* key and the fast-tx assignment are done atomically, so the CPU that
|
|
|
|
* modifies the key will either wait or other one will see the key
|
|
|
|
* cleared/changed already.
|
|
|
|
*/
|
|
|
|
spin_lock_bh(&sta->lock);
|
2015-06-03 03:39:54 +08:00
|
|
|
if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
|
|
|
|
!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
|
2015-03-21 22:25:43 +08:00
|
|
|
sdata->vif.type == NL80211_IFTYPE_STATION)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
|
|
|
|
test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
|
2015-09-24 20:59:49 +08:00
|
|
|
test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
|
|
|
|
test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
|
2015-03-21 22:25:43 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (sdata->noack_map)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* fast-xmit doesn't handle fragmentation at all */
|
2015-04-10 20:02:08 +08:00
|
|
|
if (local->hw.wiphy->frag_threshold != (u32)-1 &&
|
2016-10-19 04:12:11 +08:00
|
|
|
!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG))
|
2015-03-21 22:25:43 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
|
|
|
if (!chanctx_conf) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
build.band = chanctx_conf->def.chan->band;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
|
|
|
|
|
|
|
|
switch (sdata->vif.type) {
|
2015-04-14 16:28:37 +08:00
|
|
|
case NL80211_IFTYPE_ADHOC:
|
|
|
|
/* DA SA BSSID */
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
|
|
|
|
memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN);
|
|
|
|
build.hdr_len = 24;
|
|
|
|
break;
|
2015-03-21 22:25:43 +08:00
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
|
|
|
|
/* DA SA BSSID */
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
|
|
|
|
memcpy(hdr->addr3, sdata->u.mgd.bssid, ETH_ALEN);
|
|
|
|
build.hdr_len = 24;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sdata->u.mgd.use_4addr) {
|
|
|
|
/* non-regular ethertype cannot use the fastpath */
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
|
|
|
|
IEEE80211_FCTL_TODS);
|
|
|
|
/* RA TA DA SA */
|
|
|
|
memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
|
|
|
|
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
|
|
|
|
build.hdr_len = 30;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
|
|
|
|
/* BSSID SA DA */
|
|
|
|
memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
|
|
|
|
build.hdr_len = 24;
|
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
if (sdata->wdev.use_4addr) {
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
|
|
|
|
IEEE80211_FCTL_TODS);
|
|
|
|
/* RA TA DA SA */
|
|
|
|
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
|
|
|
|
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr3);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
|
|
|
|
build.hdr_len = 30;
|
|
|
|
break;
|
|
|
|
}
|
2020-07-08 04:45:48 +08:00
|
|
|
fallthrough;
|
2015-03-21 22:25:43 +08:00
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
|
|
|
|
/* DA BSSID SA */
|
|
|
|
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
|
|
|
|
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
|
|
|
|
build.sa_offs = offsetof(struct ieee80211_hdr, addr3);
|
|
|
|
build.hdr_len = 24;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* not handled on fast-xmit */
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sta->sta.wme) {
|
|
|
|
build.hdr_len += 2;
|
|
|
|
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We store the key here so there's no point in using rcu_dereference()
|
|
|
|
* but that's fine because the code that changes the pointers will call
|
|
|
|
* this function after doing so. For a single CPU that would be enough,
|
|
|
|
* for multiple see the comment above.
|
|
|
|
*/
|
|
|
|
build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
|
|
|
|
if (!build.key)
|
|
|
|
build.key = rcu_access_pointer(sdata->default_unicast_key);
|
|
|
|
if (build.key) {
|
2015-04-10 20:03:17 +08:00
|
|
|
bool gen_iv, iv_spc, mmic;
|
2015-03-21 22:25:43 +08:00
|
|
|
|
|
|
|
gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
|
|
|
|
iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
|
2017-12-01 19:50:52 +08:00
|
|
|
mmic = build.key->conf.flags &
|
|
|
|
(IEEE80211_KEY_FLAG_GENERATE_MMIC |
|
|
|
|
IEEE80211_KEY_FLAG_PUT_MIC_SPACE);
|
2015-03-21 22:25:43 +08:00
|
|
|
|
|
|
|
/* don't handle software crypto */
|
|
|
|
if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
|
|
|
|
goto out;
|
|
|
|
|
2018-08-31 21:00:38 +08:00
|
|
|
/* Key is being removed */
|
|
|
|
if (build.key->flags & KEY_FLAG_TAINTED)
|
|
|
|
goto out;
|
|
|
|
|
2015-03-21 22:25:43 +08:00
|
|
|
switch (build.key->conf.cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP_256:
|
2019-03-20 04:34:08 +08:00
|
|
|
if (gen_iv)
|
2015-03-21 22:25:43 +08:00
|
|
|
build.pn_offs = build.hdr_len;
|
|
|
|
if (gen_iv || iv_spc)
|
|
|
|
build.hdr_len += IEEE80211_CCMP_HDR_LEN;
|
|
|
|
break;
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP_256:
|
2019-03-20 04:34:08 +08:00
|
|
|
if (gen_iv)
|
2015-03-21 22:25:43 +08:00
|
|
|
build.pn_offs = build.hdr_len;
|
|
|
|
if (gen_iv || iv_spc)
|
|
|
|
build.hdr_len += IEEE80211_GCMP_HDR_LEN;
|
|
|
|
break;
|
2015-04-10 20:03:17 +08:00
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
|
|
|
/* cannot handle MMIC or IV generation in xmit-fast */
|
|
|
|
if (mmic || gen_iv)
|
|
|
|
goto out;
|
|
|
|
if (iv_spc)
|
|
|
|
build.hdr_len += IEEE80211_TKIP_IV_LEN;
|
|
|
|
break;
|
|
|
|
case WLAN_CIPHER_SUITE_WEP40:
|
|
|
|
case WLAN_CIPHER_SUITE_WEP104:
|
|
|
|
/* cannot handle IV generation in fast-xmit */
|
|
|
|
if (gen_iv)
|
|
|
|
goto out;
|
|
|
|
if (iv_spc)
|
|
|
|
build.hdr_len += IEEE80211_WEP_IV_LEN;
|
|
|
|
break;
|
|
|
|
case WLAN_CIPHER_SUITE_AES_CMAC:
|
|
|
|
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
|
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
|
|
|
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
|
|
|
WARN(1,
|
|
|
|
"management cipher suite 0x%x enabled for data\n",
|
|
|
|
build.key->conf.cipher);
|
2015-03-21 22:25:43 +08:00
|
|
|
goto out;
|
2015-04-10 20:03:17 +08:00
|
|
|
default:
|
|
|
|
/* we don't know how to generate IVs for this at all */
|
|
|
|
if (WARN_ON(gen_iv))
|
|
|
|
goto out;
|
|
|
|
/* pure hardware keys are OK, of course */
|
|
|
|
if (!(build.key->flags & KEY_FLAG_CIPHER_SCHEME))
|
|
|
|
break;
|
|
|
|
/* cipher scheme might require space allocation */
|
|
|
|
if (iv_spc &&
|
|
|
|
build.key->conf.iv_len > IEEE80211_FAST_XMIT_MAX_IV)
|
|
|
|
goto out;
|
|
|
|
if (iv_spc)
|
|
|
|
build.hdr_len += build.key->conf.iv_len;
|
2015-03-21 22:25:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
|
|
|
|
}
|
|
|
|
|
|
|
|
hdr->frame_control = fc;
|
|
|
|
|
|
|
|
memcpy(build.hdr + build.hdr_len,
|
|
|
|
rfc1042_header, sizeof(rfc1042_header));
|
|
|
|
build.hdr_len += sizeof(rfc1042_header);
|
|
|
|
|
|
|
|
fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC);
|
|
|
|
/* if the kmemdup fails, continue w/o fast_tx */
|
|
|
|
if (!fast_tx)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
out:
|
|
|
|
/* we might have raced against another call to this function */
|
|
|
|
old = rcu_dereference_protected(sta->fast_tx,
|
|
|
|
lockdep_is_held(&sta->lock));
|
|
|
|
rcu_assign_pointer(sta->fast_tx, fast_tx);
|
|
|
|
if (old)
|
|
|
|
kfree_rcu(old, rcu_head);
|
|
|
|
spin_unlock_bh(&sta->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_check_fast_xmit_all(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(sta, &local->sta_list, list)
|
|
|
|
ieee80211_check_fast_xmit(sta);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(sta, &local->sta_list, list) {
|
|
|
|
if (sdata != sta->sdata &&
|
|
|
|
(!sta->sdata->bss || sta->sdata->bss != sdata->bss))
|
|
|
|
continue;
|
|
|
|
ieee80211_check_fast_xmit(sta);
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_clear_fast_xmit(struct sta_info *sta)
|
|
|
|
{
|
|
|
|
struct ieee80211_fast_tx *fast_tx;
|
|
|
|
|
|
|
|
spin_lock_bh(&sta->lock);
|
|
|
|
fast_tx = rcu_dereference_protected(sta->fast_tx,
|
|
|
|
lockdep_is_held(&sta->lock));
|
|
|
|
RCU_INIT_POINTER(sta->fast_tx, NULL);
|
|
|
|
spin_unlock_bh(&sta->lock);
|
|
|
|
|
|
|
|
if (fast_tx)
|
|
|
|
kfree_rcu(fast_tx, rcu_head);
|
|
|
|
}
|
|
|
|
|
2016-03-04 05:59:00 +08:00
|
|
|
static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
|
2018-08-29 14:57:02 +08:00
|
|
|
struct sk_buff *skb, int headroom)
|
2016-03-04 05:59:00 +08:00
|
|
|
{
|
2018-08-29 14:57:02 +08:00
|
|
|
if (skb_headroom(skb) < headroom) {
|
2016-03-04 05:59:00 +08:00
|
|
|
I802_DEBUG_INC(local->tx_expand_skb_head);
|
|
|
|
|
2018-08-29 14:57:02 +08:00
|
|
|
if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
|
2016-03-04 05:59:00 +08:00
|
|
|
wiphy_debug(local->hw.wiphy,
|
|
|
|
"failed to reallocate TX buffer\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_fast_tx *fast_tx,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_hdr *hdr;
|
2016-10-15 19:28:18 +08:00
|
|
|
struct ethhdr *amsdu_hdr;
|
2016-03-04 05:59:00 +08:00
|
|
|
int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header);
|
|
|
|
int subframe_len = skb->len - hdr_len;
|
|
|
|
void *data;
|
2016-10-15 19:28:18 +08:00
|
|
|
u8 *qc, *h_80211_src, *h_80211_dst;
|
2016-10-15 19:28:19 +08:00
|
|
|
const u8 *bssid;
|
2016-03-04 05:59:00 +08:00
|
|
|
|
|
|
|
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
|
|
|
|
return true;
|
|
|
|
|
2021-08-16 16:51:28 +08:00
|
|
|
if (!ieee80211_amsdu_realloc_pad(local, skb,
|
|
|
|
sizeof(*amsdu_hdr) +
|
|
|
|
local->hw.extra_tx_headroom))
|
2016-03-04 05:59:00 +08:00
|
|
|
return false;
|
|
|
|
|
2016-10-15 19:28:18 +08:00
|
|
|
data = skb_push(skb, sizeof(*amsdu_hdr));
|
|
|
|
memmove(data, data + sizeof(*amsdu_hdr), hdr_len);
|
|
|
|
hdr = data;
|
|
|
|
amsdu_hdr = data + hdr_len;
|
|
|
|
/* h_80211_src/dst is addr* field within hdr */
|
|
|
|
h_80211_src = data + fast_tx->sa_offs;
|
|
|
|
h_80211_dst = data + fast_tx->da_offs;
|
2016-03-04 05:59:00 +08:00
|
|
|
|
2016-10-15 19:28:18 +08:00
|
|
|
amsdu_hdr->h_proto = cpu_to_be16(subframe_len);
|
|
|
|
ether_addr_copy(amsdu_hdr->h_source, h_80211_src);
|
|
|
|
ether_addr_copy(amsdu_hdr->h_dest, h_80211_dst);
|
2016-03-04 05:59:00 +08:00
|
|
|
|
2016-10-15 19:28:19 +08:00
|
|
|
/* according to IEEE 802.11-2012 8.3.2 table 8-19, the outer SA/DA
|
|
|
|
* fields needs to be changed to BSSID for A-MSDU frames depending
|
|
|
|
* on FromDS/ToDS values.
|
|
|
|
*/
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
bssid = sdata->u.mgd.bssid;
|
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
bssid = sdata->vif.addr;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
bssid = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bssid && ieee80211_has_fromds(hdr->frame_control))
|
|
|
|
ether_addr_copy(h_80211_src, bssid);
|
|
|
|
|
|
|
|
if (bssid && ieee80211_has_tods(hdr->frame_control))
|
|
|
|
ether_addr_copy(h_80211_dst, bssid);
|
|
|
|
|
2016-03-04 05:59:00 +08:00
|
|
|
qc = ieee80211_get_qos_ctl(hdr);
|
|
|
|
*qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
|
|
|
|
|
|
|
|
info->control.flags |= IEEE80211_TX_CTRL_AMSDU;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct ieee80211_fast_tx *fast_tx,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
2016-05-19 16:37:49 +08:00
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
struct fq_tin *tin;
|
|
|
|
struct fq_flow *flow;
|
2016-03-04 05:59:00 +08:00
|
|
|
u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
|
|
struct ieee80211_txq *txq = sta->sta.txq[tid];
|
|
|
|
struct txq_info *txqi;
|
|
|
|
struct sk_buff **frag_tail, *head;
|
|
|
|
int subframe_len = skb->len - ETH_ALEN;
|
|
|
|
u8 max_subframes = sta->sta.max_amsdu_subframes;
|
|
|
|
int max_frags = local->hw.max_tx_fragments;
|
|
|
|
int max_amsdu_len = sta->sta.max_amsdu_len;
|
2019-03-17 01:06:31 +08:00
|
|
|
int orig_truesize;
|
2019-03-17 01:06:32 +08:00
|
|
|
u32 flow_idx;
|
2016-03-04 05:59:00 +08:00
|
|
|
__be16 len;
|
|
|
|
void *data;
|
|
|
|
bool ret = false;
|
2016-05-19 16:37:49 +08:00
|
|
|
unsigned int orig_len;
|
2018-08-31 07:04:13 +08:00
|
|
|
int n = 2, nfrags, pad = 0;
|
2018-08-29 14:57:02 +08:00
|
|
|
u16 hdrlen;
|
2016-03-04 05:59:00 +08:00
|
|
|
|
|
|
|
if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
|
|
|
|
return false;
|
|
|
|
|
2021-06-18 04:38:59 +08:00
|
|
|
if (sdata->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
|
|
|
|
return false;
|
|
|
|
|
2018-12-15 17:03:07 +08:00
|
|
|
if (skb_is_gso(skb))
|
|
|
|
return false;
|
|
|
|
|
2016-03-04 05:59:00 +08:00
|
|
|
if (!txq)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
txqi = to_txq_info(txq);
|
|
|
|
if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (sta->sta.max_rc_amsdu_len)
|
|
|
|
max_amsdu_len = min_t(int, max_amsdu_len,
|
|
|
|
sta->sta.max_rc_amsdu_len);
|
|
|
|
|
2018-09-05 13:06:10 +08:00
|
|
|
if (sta->sta.max_tid_amsdu_len[tid])
|
|
|
|
max_amsdu_len = min_t(int, max_amsdu_len,
|
|
|
|
sta->sta.max_tid_amsdu_len[tid]);
|
|
|
|
|
2019-03-17 01:06:32 +08:00
|
|
|
flow_idx = fq_flow_idx(fq, skb);
|
|
|
|
|
2016-05-19 16:37:49 +08:00
|
|
|
spin_lock_bh(&fq->lock);
|
2016-03-04 05:59:00 +08:00
|
|
|
|
2016-05-19 16:37:49 +08:00
|
|
|
/* TODO: Ideally aggregation should be done on dequeue to remain
|
|
|
|
* responsive to environment changes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
tin = &txqi->tin;
|
2020-12-19 02:47:14 +08:00
|
|
|
flow = fq_flow_classify(fq, tin, flow_idx, skb);
|
2016-05-19 16:37:49 +08:00
|
|
|
head = skb_peek_tail(&flow->queue);
|
2018-12-15 17:03:07 +08:00
|
|
|
if (!head || skb_is_gso(head))
|
2016-03-04 05:59:00 +08:00
|
|
|
goto out;
|
|
|
|
|
2019-03-17 01:06:31 +08:00
|
|
|
orig_truesize = head->truesize;
|
2016-05-19 16:37:49 +08:00
|
|
|
orig_len = head->len;
|
|
|
|
|
2016-03-04 05:59:00 +08:00
|
|
|
if (skb->len + head->len > max_amsdu_len)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
nfrags = 1 + skb_shinfo(skb)->nr_frags;
|
|
|
|
nfrags += 1 + skb_shinfo(head)->nr_frags;
|
|
|
|
frag_tail = &skb_shinfo(head)->frag_list;
|
|
|
|
while (*frag_tail) {
|
|
|
|
nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags;
|
|
|
|
frag_tail = &(*frag_tail)->next;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (max_subframes && n > max_subframes)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (max_frags && nfrags > max_frags)
|
|
|
|
goto out;
|
2018-09-05 13:06:11 +08:00
|
|
|
|
|
|
|
if (!drv_can_aggregate_in_amsdu(local, head, skb))
|
|
|
|
goto out;
|
2016-03-04 05:59:00 +08:00
|
|
|
|
2018-08-30 03:03:25 +08:00
|
|
|
if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
|
2016-03-04 05:59:00 +08:00
|
|
|
goto out;
|
|
|
|
|
2021-08-30 15:32:40 +08:00
|
|
|
/* If n == 2, the "while (*frag_tail)" loop above didn't execute
|
|
|
|
* and frag_tail should be &skb_shinfo(head)->frag_list.
|
|
|
|
* However, ieee80211_amsdu_prepare_head() can reallocate it.
|
|
|
|
* Reload frag_tail to have it pointing to the correct place.
|
|
|
|
*/
|
|
|
|
if (n == 2)
|
|
|
|
frag_tail = &skb_shinfo(head)->frag_list;
|
|
|
|
|
2018-08-29 14:57:02 +08:00
|
|
|
/*
|
|
|
|
* Pad out the previous subframe to a multiple of 4 by adding the
|
|
|
|
* padding to the next one, that's being added. Note that head->len
|
|
|
|
* is the length of the full A-MSDU, but that works since each time
|
|
|
|
* we add a new subframe we pad out the previous one to a multiple
|
|
|
|
* of 4 and thus it no longer matters in the next round.
|
|
|
|
*/
|
|
|
|
hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
|
|
|
|
if ((head->len - hdrlen) & 3)
|
|
|
|
pad = 4 - ((head->len - hdrlen) & 3);
|
|
|
|
|
|
|
|
if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
|
|
|
|
2 + pad))
|
2018-08-30 16:55:49 +08:00
|
|
|
goto out_recalc;
|
2016-03-04 05:59:00 +08:00
|
|
|
|
|
|
|
ret = true;
|
|
|
|
data = skb_push(skb, ETH_ALEN + 2);
|
|
|
|
memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
|
|
|
|
|
|
|
|
data += 2 * ETH_ALEN;
|
|
|
|
len = cpu_to_be16(subframe_len);
|
|
|
|
memcpy(data, &len, 2);
|
|
|
|
memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
|
|
|
|
|
2018-08-29 14:57:02 +08:00
|
|
|
memset(skb_push(skb, pad), 0, pad);
|
|
|
|
|
2016-03-04 05:59:00 +08:00
|
|
|
head->len += skb->len;
|
|
|
|
head->data_len += skb->len;
|
|
|
|
*frag_tail = skb;
|
|
|
|
|
2018-08-30 16:55:49 +08:00
|
|
|
out_recalc:
|
2019-03-17 01:06:31 +08:00
|
|
|
fq->memory_usage += head->truesize - orig_truesize;
|
2018-08-30 16:55:49 +08:00
|
|
|
if (head->len != orig_len) {
|
|
|
|
flow->backlog += head->len - orig_len;
|
|
|
|
tin->backlog_bytes += head->len - orig_len;
|
|
|
|
}
|
2016-03-04 05:59:00 +08:00
|
|
|
out:
|
2016-05-19 16:37:49 +08:00
|
|
|
spin_unlock_bh(&fq->lock);
|
2016-03-04 05:59:00 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
/*
|
|
|
|
* Can be called while the sta lock is held. Anything that can cause packets to
|
|
|
|
* be generated will cause deadlock!
|
|
|
|
*/
|
2021-06-18 00:31:12 +08:00
|
|
|
static ieee80211_tx_result
|
|
|
|
ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta, u8 pn_offs,
|
|
|
|
struct ieee80211_key *key,
|
|
|
|
struct ieee80211_tx_data *tx)
|
2016-09-23 01:04:20 +08:00
|
|
|
{
|
2021-06-18 00:31:12 +08:00
|
|
|
struct sk_buff *skb = tx->skb;
|
2016-09-23 01:04:20 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_hdr *hdr = (void *)skb->data;
|
|
|
|
u8 tid = IEEE80211_NUM_TIDS;
|
|
|
|
|
2021-06-18 00:31:12 +08:00
|
|
|
if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL) &&
|
|
|
|
ieee80211_tx_h_rate_ctrl(tx) != TX_CONTINUE)
|
|
|
|
return TX_DROP;
|
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
if (key)
|
|
|
|
info->control.hw_key = &key->conf;
|
|
|
|
|
2020-11-14 05:46:24 +08:00
|
|
|
dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
|
2016-09-23 01:04:20 +08:00
|
|
|
|
|
|
|
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
|
|
|
|
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
|
|
hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
|
|
|
|
} else {
|
|
|
|
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
|
|
|
|
hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
|
|
|
|
sdata->sequence_number += 0x10;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (skb_shinfo(skb)->gso_size)
|
|
|
|
sta->tx_stats.msdu[tid] +=
|
|
|
|
DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
|
|
|
|
else
|
|
|
|
sta->tx_stats.msdu[tid]++;
|
|
|
|
|
|
|
|
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
|
|
|
|
|
|
/* statistics normally done by ieee80211_tx_h_stats (but that
|
|
|
|
* has to consider fragmentation, so is more complex)
|
|
|
|
*/
|
|
|
|
sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
|
|
|
|
sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
|
|
|
|
|
|
|
|
if (pn_offs) {
|
|
|
|
u64 pn;
|
|
|
|
u8 *crypto_hdr = skb->data + pn_offs;
|
|
|
|
|
|
|
|
switch (key->conf.cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP_256:
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP:
|
|
|
|
case WLAN_CIPHER_SUITE_GCMP_256:
|
|
|
|
pn = atomic64_inc_return(&key->conf.tx_pn);
|
|
|
|
crypto_hdr[0] = pn;
|
|
|
|
crypto_hdr[1] = pn >> 8;
|
2019-03-20 04:34:08 +08:00
|
|
|
crypto_hdr[3] = 0x20 | (key->conf.keyidx << 6);
|
2016-09-23 01:04:20 +08:00
|
|
|
crypto_hdr[4] = pn >> 16;
|
|
|
|
crypto_hdr[5] = pn >> 24;
|
|
|
|
crypto_hdr[6] = pn >> 32;
|
|
|
|
crypto_hdr[7] = pn >> 40;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-06-18 00:31:12 +08:00
|
|
|
|
|
|
|
return TX_CONTINUE;
|
2016-09-23 01:04:20 +08:00
|
|
|
}
|
|
|
|
|
2015-03-21 22:25:43 +08:00
|
|
|
static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
2016-09-23 01:04:20 +08:00
|
|
|
struct sta_info *sta,
|
2015-03-21 22:25:43 +08:00
|
|
|
struct ieee80211_fast_tx *fast_tx,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
u16 ethertype = (skb->data[12] << 8) | skb->data[13];
|
|
|
|
int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
|
|
|
|
int hw_headroom = sdata->local->hw.extra_tx_headroom;
|
|
|
|
struct ethhdr eth;
|
2017-01-02 18:19:29 +08:00
|
|
|
struct ieee80211_tx_info *info;
|
2015-03-21 22:25:43 +08:00
|
|
|
struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
|
|
|
|
struct ieee80211_tx_data tx;
|
|
|
|
ieee80211_tx_result r;
|
|
|
|
struct tid_ampdu_tx *tid_tx = NULL;
|
|
|
|
u8 tid = IEEE80211_NUM_TIDS;
|
|
|
|
|
|
|
|
/* control port protocol needs a lot of special handling */
|
|
|
|
if (cpu_to_be16(ethertype) == sdata->control_port_protocol)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* only RFC 1042 SNAP */
|
|
|
|
if (ethertype < ETH_P_802_3_MIN)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* don't handle TX status request here either */
|
|
|
|
if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
|
|
|
|
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
|
|
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
|
2015-06-09 22:45:08 +08:00
|
|
|
if (tid_tx) {
|
|
|
|
if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
|
|
|
|
return false;
|
|
|
|
if (tid_tx->timeout)
|
|
|
|
tid_tx->last_tx = jiffies;
|
|
|
|
}
|
2015-03-21 22:25:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* after this point (skb is modified) we cannot return false */
|
|
|
|
|
|
|
|
if (skb_shared(skb)) {
|
|
|
|
struct sk_buff *tmp_skb = skb;
|
|
|
|
|
|
|
|
skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
kfree_skb(tmp_skb);
|
|
|
|
|
|
|
|
if (!skb)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-03-04 05:59:00 +08:00
|
|
|
if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
|
|
|
|
ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
|
|
|
|
return true;
|
|
|
|
|
2015-03-21 22:25:43 +08:00
|
|
|
/* will not be crypto-handled beyond what we do here, so use false
|
|
|
|
* as the may-encrypt argument for the resize to not account for
|
|
|
|
* more room than we already have in 'extra_head'
|
|
|
|
*/
|
|
|
|
if (unlikely(ieee80211_skb_resize(sdata, skb,
|
|
|
|
max_t(int, extra_head + hw_headroom -
|
|
|
|
skb_headroom(skb), 0),
|
2020-10-09 19:25:41 +08:00
|
|
|
ENCRYPT_NO))) {
|
2015-03-21 22:25:43 +08:00
|
|
|
kfree_skb(skb);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(ð, skb->data, ETH_HLEN - 2);
|
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:23 +08:00
|
|
|
hdr = skb_push(skb, extra_head);
|
2015-03-21 22:25:43 +08:00
|
|
|
memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len);
|
|
|
|
memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
|
|
|
|
memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
|
|
|
|
|
2017-01-02 18:19:29 +08:00
|
|
|
info = IEEE80211_SKB_CB(skb);
|
2015-03-21 22:25:43 +08:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->band = fast_tx->band;
|
|
|
|
info->control.vif = &sdata->vif;
|
|
|
|
info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
|
|
|
|
IEEE80211_TX_CTL_DONTFRAG |
|
|
|
|
(tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
|
2016-09-23 01:04:20 +08:00
|
|
|
info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
|
2015-03-21 22:25:43 +08:00
|
|
|
|
2019-03-29 04:01:06 +08:00
|
|
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
|
|
if (local->force_tx_status)
|
|
|
|
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
|
|
|
|
#endif
|
|
|
|
|
2016-11-04 17:27:54 +08:00
|
|
|
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
|
|
|
|
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
|
|
*ieee80211_get_qos_ctl(hdr) = tid;
|
|
|
|
}
|
|
|
|
|
2015-03-21 22:25:43 +08:00
|
|
|
__skb_queue_head_init(&tx.skbs);
|
|
|
|
|
|
|
|
tx.flags = IEEE80211_TX_UNICAST;
|
|
|
|
tx.local = local;
|
|
|
|
tx.sdata = sdata;
|
|
|
|
tx.sta = sta;
|
|
|
|
tx.key = fast_tx->key;
|
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
if (ieee80211_queue_skb(local, sdata, sta, skb))
|
|
|
|
return true;
|
2015-03-21 22:25:43 +08:00
|
|
|
|
2021-06-18 00:31:12 +08:00
|
|
|
tx.skb = skb;
|
|
|
|
r = ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
|
|
|
|
fast_tx->key, &tx);
|
|
|
|
tx.skb = NULL;
|
|
|
|
if (r == TX_DROP) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return true;
|
|
|
|
}
|
2015-03-21 22:25:43 +08:00
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
|
|
sdata = container_of(sdata->bss,
|
|
|
|
struct ieee80211_sub_if_data, u.ap);
|
|
|
|
|
|
|
|
__skb_queue_tail(&tx.skbs, skb);
|
2019-10-02 05:26:35 +08:00
|
|
|
ieee80211_tx_frags(local, &sdata->vif, sta, &tx.skbs, false);
|
2015-03-21 22:25:43 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-09-23 01:04:19 +08:00
|
|
|
struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
struct txq_info *txqi = container_of(txq, struct txq_info, txq);
|
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
struct fq *fq = &local->fq;
|
|
|
|
struct fq_tin *tin = &txqi->tin;
|
2016-09-23 01:04:20 +08:00
|
|
|
struct ieee80211_tx_info *info;
|
|
|
|
struct ieee80211_tx_data tx;
|
|
|
|
ieee80211_tx_result r;
|
mac80211: add stop/start logic for software TXQs
Sometimes, it is required to stop the transmissions momentarily and
resume it later; stopping the txqs becomes very critical in scenarios where
the packet transmission has to be ceased completely. For example, during
the hardware restart, during off channel operations,
when initiating CSA(upon detecting a radar on the DFS channel), etc.
The TX queue stop/start logic in mac80211 works well in stopping the TX
when drivers make use of netdev queues, i.e, when Qdiscs in network layer
take care of traffic scheduling. Since the devices implementing
wake_tx_queue can run without Qdiscs, packets will be handed to mac80211
directly without queueing them in the netdev queues.
Also, mac80211 does not invoke any of the
netif_stop_*/netif_wake_* APIs if wake_tx_queue is implemented.
Since the queues are not stopped in this case, transmissions can continue
and this will impact negatively on the operation of the wireless device.
For example,
During hardware restart, we stop the netdev queues so that packets are
not sent to the driver. Since ath10k implements wake_tx_queue,
TX queues will not be stopped and packets might reach the hardware while
it is restarting; this can make hardware unresponsive and the only
possible option for recovery is to reboot the entire system.
There is another problem to this, it is observed that the packets
were sent on the DFS channel for a prolonged duration after radar
detection impacting the channel closing time.
We can still invoke netif stop/wake APIs when wake_tx_queue is implemented
but this could lead to packet drops in network layer; adding stop/start
logic for software TXQs in mac80211 instead makes more sense; the change
proposed adds the same in mac80211.
Signed-off-by: Manikanta Pubbisetty <mpubbise@codeaurora.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2018-07-11 02:42:53 +08:00
|
|
|
struct ieee80211_vif *vif = txq->vif;
|
2016-09-23 01:04:19 +08:00
|
|
|
|
2019-06-18 04:01:39 +08:00
|
|
|
WARN_ON_ONCE(softirq_count() == 0);
|
|
|
|
|
2019-11-19 14:06:10 +08:00
|
|
|
if (!ieee80211_txq_airtime_check(hw, txq))
|
|
|
|
return NULL;
|
|
|
|
|
2019-03-17 01:06:33 +08:00
|
|
|
begin:
|
2016-09-23 01:04:19 +08:00
|
|
|
spin_lock_bh(&fq->lock);
|
|
|
|
|
mac80211: add stop/start logic for software TXQs
Sometimes, it is required to stop the transmissions momentarily and
resume it later; stopping the txqs becomes very critical in scenarios where
the packet transmission has to be ceased completely. For example, during
the hardware restart, during off channel operations,
when initiating CSA(upon detecting a radar on the DFS channel), etc.
The TX queue stop/start logic in mac80211 works well in stopping the TX
when drivers make use of netdev queues, i.e, when Qdiscs in network layer
take care of traffic scheduling. Since the devices implementing
wake_tx_queue can run without Qdiscs, packets will be handed to mac80211
directly without queueing them in the netdev queues.
Also, mac80211 does not invoke any of the
netif_stop_*/netif_wake_* APIs if wake_tx_queue is implemented.
Since the queues are not stopped in this case, transmissions can continue
and this will impact negatively on the operation of the wireless device.
For example,
During hardware restart, we stop the netdev queues so that packets are
not sent to the driver. Since ath10k implements wake_tx_queue,
TX queues will not be stopped and packets might reach the hardware while
it is restarting; this can make hardware unresponsive and the only
possible option for recovery is to reboot the entire system.
There is another problem to this, it is observed that the packets
were sent on the DFS channel for a prolonged duration after radar
detection impacting the channel closing time.
We can still invoke netif stop/wake APIs when wake_tx_queue is implemented
but this could lead to packet drops in network layer; adding stop/start
logic for software TXQs in mac80211 instead makes more sense; the change
proposed adds the same in mac80211.
Signed-off-by: Manikanta Pubbisetty <mpubbise@codeaurora.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2018-07-11 02:42:53 +08:00
|
|
|
if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
|
|
|
|
test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
|
2016-09-23 01:04:19 +08:00
|
|
|
goto out;
|
|
|
|
|
mac80211: fix TXQ AC confusion
Normally, TXQs have
txq->tid = tid;
txq->ac = ieee80211_ac_from_tid(tid);
However, the special management TXQ actually has
txq->tid = IEEE80211_NUM_TIDS; // 16
txq->ac = IEEE80211_AC_VO;
This makes sense, but ieee80211_ac_from_tid(16) is the same
as ieee80211_ac_from_tid(0) which is just IEEE80211_AC_BE.
Now, normally this is fine. However, if the netdev queues
were stopped, then the code in ieee80211_tx_dequeue() will
propagate the stop from the interface (vif->txqs_stopped[])
if the AC 2 (ieee80211_ac_from_tid(txq->tid)) is marked as
stopped. On wake, however, __ieee80211_wake_txqs() will wake
the TXQ if AC 0 (txq->ac) is woken up.
If a driver stops all queues with ieee80211_stop_tx_queues()
and then wakes them again with ieee80211_wake_tx_queues(),
the ieee80211_wake_txqs() tasklet will run to resync queue
and TXQ state. If all queues were woken, then what'll happen
is that _ieee80211_wake_txqs() will run in order of HW queues
0-3, typically (and certainly for iwlwifi) corresponding to
ACs 0-3, so it'll call __ieee80211_wake_txqs() for each AC in
order 0-3.
When __ieee80211_wake_txqs() is called for AC 0 (VO) that'll
wake up the management TXQ (remember its tid is 16), and the
driver's wake_tx_queue() will be called. That tries to get a
frame, which will immediately *stop* the TXQ again, because
now we check against AC 2, and AC 2 hasn't yet been marked as
woken up again in sdata->vif.txqs_stopped[] since we're only
in the __ieee80211_wake_txqs() call for AC 0.
Thus, the management TXQ will never be started again.
Fix this by checking txq->ac directly instead of calculating
the AC as ieee80211_ac_from_tid(txq->tid).
Fixes: adf8ed01e4fd ("mac80211: add an optional TXQ for other PS-buffered frames")
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20210323210500.bf4d50afea4a.I136ffde910486301f8818f5442e3c9bf8670a9c4@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-03-24 04:05:01 +08:00
|
|
|
if (vif->txqs_stopped[txq->ac]) {
|
mac80211: add stop/start logic for software TXQs
Sometimes, it is required to stop the transmissions momentarily and
resume it later; stopping the txqs becomes very critical in scenarios where
the packet transmission has to be ceased completely. For example, during
the hardware restart, during off channel operations,
when initiating CSA(upon detecting a radar on the DFS channel), etc.
The TX queue stop/start logic in mac80211 works well in stopping the TX
when drivers make use of netdev queues, i.e, when Qdiscs in network layer
take care of traffic scheduling. Since the devices implementing
wake_tx_queue can run without Qdiscs, packets will be handed to mac80211
directly without queueing them in the netdev queues.
Also, mac80211 does not invoke any of the
netif_stop_*/netif_wake_* APIs if wake_tx_queue is implemented.
Since the queues are not stopped in this case, transmissions can continue
and this will impact negatively on the operation of the wireless device.
For example,
During hardware restart, we stop the netdev queues so that packets are
not sent to the driver. Since ath10k implements wake_tx_queue,
TX queues will not be stopped and packets might reach the hardware while
it is restarting; this can make hardware unresponsive and the only
possible option for recovery is to reboot the entire system.
There is another problem to this, it is observed that the packets
were sent on the DFS channel for a prolonged duration after radar
detection impacting the channel closing time.
We can still invoke netif stop/wake APIs when wake_tx_queue is implemented
but this could lead to packet drops in network layer; adding stop/start
logic for software TXQs in mac80211 instead makes more sense; the change
proposed adds the same in mac80211.
Signed-off-by: Manikanta Pubbisetty <mpubbise@codeaurora.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2018-07-11 02:42:53 +08:00
|
|
|
set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
/* Make sure fragments stay together. */
|
|
|
|
skb = __skb_dequeue(&txqi->frags);
|
2021-04-16 19:47:04 +08:00
|
|
|
if (unlikely(skb)) {
|
|
|
|
if (!(IEEE80211_SKB_CB(skb)->control.flags &
|
|
|
|
IEEE80211_TX_INTCFL_NEED_TXPROCESSING))
|
|
|
|
goto out;
|
|
|
|
IEEE80211_SKB_CB(skb)->control.flags &=
|
|
|
|
~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
|
|
|
|
} else {
|
|
|
|
skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
|
|
|
|
}
|
2016-09-23 01:04:20 +08:00
|
|
|
|
2016-09-23 01:04:19 +08:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
2019-03-17 01:06:33 +08:00
|
|
|
spin_unlock_bh(&fq->lock);
|
|
|
|
|
2016-09-23 01:04:19 +08:00
|
|
|
hdr = (struct ieee80211_hdr *)skb->data;
|
2016-09-23 01:04:20 +08:00
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
|
|
|
|
memset(&tx, 0, sizeof(tx));
|
|
|
|
__skb_queue_head_init(&tx.skbs);
|
|
|
|
tx.local = local;
|
|
|
|
tx.skb = skb;
|
|
|
|
tx.sdata = vif_to_sdata(info->control.vif);
|
|
|
|
|
2020-10-20 00:01:13 +08:00
|
|
|
if (txq->sta) {
|
2016-09-23 01:04:20 +08:00
|
|
|
tx.sta = container_of(txq->sta, struct sta_info, sta);
|
2020-03-26 22:51:34 +08:00
|
|
|
/*
|
|
|
|
* Drop unicast frames to unauthorised stations unless they are
|
2020-10-20 00:01:13 +08:00
|
|
|
* injected frames or EAPOL frames from the local station.
|
2020-03-26 22:51:34 +08:00
|
|
|
*/
|
2020-10-20 00:01:13 +08:00
|
|
|
if (unlikely(!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
|
|
|
|
ieee80211_is_data(hdr->frame_control) &&
|
2020-03-30 04:50:06 +08:00
|
|
|
!ieee80211_vif_is_mesh(&tx.sdata->vif) &&
|
2020-03-26 22:51:34 +08:00
|
|
|
tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
|
|
|
|
!is_multicast_ether_addr(hdr->addr1) &&
|
|
|
|
!test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
|
|
|
|
(!(info->control.flags &
|
|
|
|
IEEE80211_TX_CTRL_PORT_CTRL_PROTO) ||
|
|
|
|
!ether_addr_equal(tx.sdata->vif.addr,
|
|
|
|
hdr->addr2)))) {
|
|
|
|
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
goto begin;
|
|
|
|
}
|
|
|
|
}
|
2016-09-23 01:04:20 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The key can be removed while the packet was queued, so need to call
|
|
|
|
* this here to get the current key.
|
|
|
|
*/
|
|
|
|
r = ieee80211_tx_h_select_key(&tx);
|
|
|
|
if (r != TX_CONTINUE) {
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
goto begin;
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:27:52 +08:00
|
|
|
if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
|
|
|
|
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
|
|
|
else
|
|
|
|
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
|
|
|
|
2021-06-18 00:31:13 +08:00
|
|
|
if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
|
|
|
|
if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
|
|
|
|
r = ieee80211_tx_h_rate_ctrl(&tx);
|
|
|
|
if (r != TX_CONTINUE) {
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
goto begin;
|
|
|
|
}
|
|
|
|
}
|
2019-11-25 18:04:37 +08:00
|
|
|
goto encap_out;
|
2021-06-18 00:31:13 +08:00
|
|
|
}
|
2019-11-25 18:04:37 +08:00
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
|
2016-09-23 01:04:19 +08:00
|
|
|
struct sta_info *sta = container_of(txq->sta, struct sta_info,
|
|
|
|
sta);
|
2016-09-23 01:04:20 +08:00
|
|
|
u8 pn_offs = 0;
|
2016-09-23 01:04:19 +08:00
|
|
|
|
2016-09-23 01:04:20 +08:00
|
|
|
if (tx.key &&
|
|
|
|
(tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
|
|
|
|
pn_offs = ieee80211_hdrlen(hdr->frame_control);
|
|
|
|
|
2021-06-18 00:31:12 +08:00
|
|
|
r = ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
|
|
|
|
tx.key, &tx);
|
|
|
|
if (r != TX_CONTINUE) {
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
goto begin;
|
|
|
|
}
|
2016-09-23 01:04:20 +08:00
|
|
|
} else {
|
|
|
|
if (invoke_tx_handlers_late(&tx))
|
|
|
|
goto begin;
|
|
|
|
|
|
|
|
skb = __skb_dequeue(&tx.skbs);
|
|
|
|
|
2019-03-17 01:06:33 +08:00
|
|
|
if (!skb_queue_empty(&tx.skbs)) {
|
|
|
|
spin_lock_bh(&fq->lock);
|
2016-09-23 01:04:20 +08:00
|
|
|
skb_queue_splice_tail(&tx.skbs, &txqi->frags);
|
2019-03-17 01:06:33 +08:00
|
|
|
spin_unlock_bh(&fq->lock);
|
|
|
|
}
|
2016-09-23 01:04:19 +08:00
|
|
|
}
|
|
|
|
|
2018-12-15 17:03:09 +08:00
|
|
|
if (skb_has_frag_list(skb) &&
|
2016-10-04 15:22:19 +08:00
|
|
|
!ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
|
|
|
|
if (skb_linearize(skb)) {
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
goto begin;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-22 18:20:30 +08:00
|
|
|
switch (tx.sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_MONITOR:
|
|
|
|
if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
|
|
|
|
vif = &tx.sdata->vif;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tx.sdata = rcu_dereference(local->monitor_sdata);
|
|
|
|
if (tx.sdata) {
|
|
|
|
vif = &tx.sdata->vif;
|
|
|
|
info->hw_queue =
|
|
|
|
vif->hw_queue[skb_get_queue_mapping(skb)];
|
|
|
|
} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
goto begin;
|
|
|
|
} else {
|
|
|
|
vif = NULL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
tx.sdata = container_of(tx.sdata->bss,
|
|
|
|
struct ieee80211_sub_if_data, u.ap);
|
2020-07-08 04:45:48 +08:00
|
|
|
fallthrough;
|
2017-06-22 18:20:30 +08:00
|
|
|
default:
|
|
|
|
vif = &tx.sdata->vif;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-11-25 18:04:37 +08:00
|
|
|
encap_out:
|
2017-06-22 18:20:30 +08:00
|
|
|
IEEE80211_SKB_CB(skb)->control.vif = vif;
|
2019-11-19 14:06:10 +08:00
|
|
|
|
2020-02-21 17:45:45 +08:00
|
|
|
if (vif &&
|
|
|
|
wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
|
2020-07-25 02:28:16 +08:00
|
|
|
bool ampdu = txq->ac != IEEE80211_AC_VO;
|
2019-11-19 14:06:10 +08:00
|
|
|
u32 airtime;
|
|
|
|
|
|
|
|
airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta,
|
2020-07-25 02:28:16 +08:00
|
|
|
skb->len, ampdu);
|
2019-11-19 14:06:10 +08:00
|
|
|
if (airtime) {
|
|
|
|
airtime = ieee80211_info_set_tx_time_est(info, airtime);
|
|
|
|
ieee80211_sta_update_pending_airtime(local, tx.sta,
|
|
|
|
txq->ac,
|
|
|
|
airtime,
|
|
|
|
false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-17 01:06:33 +08:00
|
|
|
return skb;
|
mac80211: add stop/start logic for software TXQs
Sometimes, it is required to stop the transmissions momentarily and
resume it later; stopping the txqs becomes very critical in scenarios where
the packet transmission has to be ceased completely. For example, during
the hardware restart, during off channel operations,
when initiating CSA(upon detecting a radar on the DFS channel), etc.
The TX queue stop/start logic in mac80211 works well in stopping the TX
when drivers make use of netdev queues, i.e, when Qdiscs in network layer
take care of traffic scheduling. Since the devices implementing
wake_tx_queue can run without Qdiscs, packets will be handed to mac80211
directly without queueing them in the netdev queues.
Also, mac80211 does not invoke any of the
netif_stop_*/netif_wake_* APIs if wake_tx_queue is implemented.
Since the queues are not stopped in this case, transmissions can continue
and this will impact negatively on the operation of the wireless device.
For example,
During hardware restart, we stop the netdev queues so that packets are
not sent to the driver. Since ath10k implements wake_tx_queue,
TX queues will not be stopped and packets might reach the hardware while
it is restarting; this can make hardware unresponsive and the only
possible option for recovery is to reboot the entire system.
There is another problem to this, it is observed that the packets
were sent on the DFS channel for a prolonged duration after radar
detection impacting the channel closing time.
We can still invoke netif stop/wake APIs when wake_tx_queue is implemented
but this could lead to packet drops in network layer; adding stop/start
logic for software TXQs in mac80211 instead makes more sense; the change
proposed adds the same in mac80211.
Signed-off-by: Manikanta Pubbisetty <mpubbise@codeaurora.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2018-07-11 02:42:53 +08:00
|
|
|
|
2016-09-23 01:04:19 +08:00
|
|
|
out:
|
|
|
|
spin_unlock_bh(&fq->lock);
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_tx_dequeue);
|
|
|
|
|
2018-12-19 09:02:06 +08:00
|
|
|
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
2021-06-23 21:47:55 +08:00
|
|
|
struct airtime_sched_info *air_sched;
|
2021-12-17 19:42:58 +08:00
|
|
|
u64 now = ktime_get_coarse_boottime_ns();
|
2019-03-15 18:03:35 +08:00
|
|
|
struct ieee80211_txq *ret = NULL;
|
2021-06-23 21:47:55 +08:00
|
|
|
struct airtime_info *air_info;
|
|
|
|
struct txq_info *txqi = NULL;
|
|
|
|
struct rb_node *node;
|
|
|
|
bool first = false;
|
2018-12-19 09:02:06 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
air_sched = &local->airtime[ac];
|
|
|
|
spin_lock_bh(&air_sched->lock);
|
2018-12-19 09:02:06 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
node = air_sched->schedule_pos;
|
|
|
|
|
|
|
|
begin:
|
|
|
|
if (!node) {
|
|
|
|
node = rb_first_cached(&air_sched->active_txqs);
|
|
|
|
first = true;
|
|
|
|
} else {
|
|
|
|
node = rb_next(node);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!node)
|
2019-03-15 18:03:35 +08:00
|
|
|
goto out;
|
2018-12-19 09:02:08 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
txqi = container_of(node, struct txq_info, schedule_order);
|
|
|
|
air_info = to_airtime_info(&txqi->txq);
|
|
|
|
|
|
|
|
if (air_info->v_t > air_sched->v_t &&
|
|
|
|
(!first || !airtime_catchup_v_t(air_sched, air_info->v_t, now)))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!ieee80211_txq_airtime_check(hw, &txqi->txq)) {
|
|
|
|
first = false;
|
|
|
|
goto begin;
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 14:06:09 +08:00
|
|
|
}
|
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
air_sched->schedule_pos = node;
|
|
|
|
air_sched->last_schedule_activity = now;
|
|
|
|
ret = &txqi->txq;
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&air_sched->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_next_txq);
|
|
|
|
|
|
|
|
static void __ieee80211_insert_txq(struct rb_root_cached *root,
|
|
|
|
struct txq_info *txqi)
|
|
|
|
{
|
|
|
|
struct rb_node **new = &root->rb_root.rb_node;
|
|
|
|
struct airtime_info *old_air, *new_air;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct txq_info *__txqi;
|
|
|
|
bool leftmost = true;
|
|
|
|
|
|
|
|
while (*new) {
|
|
|
|
parent = *new;
|
|
|
|
__txqi = rb_entry(parent, struct txq_info, schedule_order);
|
|
|
|
old_air = to_airtime_info(&__txqi->txq);
|
|
|
|
new_air = to_airtime_info(&txqi->txq);
|
|
|
|
|
|
|
|
if (new_air->v_t <= old_air->v_t) {
|
|
|
|
new = &parent->rb_left;
|
|
|
|
} else {
|
|
|
|
new = &parent->rb_right;
|
|
|
|
leftmost = false;
|
|
|
|
}
|
|
|
|
}
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 14:06:09 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
rb_link_node(&txqi->schedule_order, parent, new);
|
|
|
|
rb_insert_color_cached(&txqi->schedule_order, root, leftmost);
|
|
|
|
}
|
2018-12-19 09:02:08 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
void ieee80211_resort_txq(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq)
|
|
|
|
{
|
|
|
|
struct airtime_info *air_info = to_airtime_info(txq);
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
struct txq_info *txqi = to_txq_info(txq);
|
|
|
|
struct airtime_sched_info *air_sched;
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 14:06:09 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
air_sched = &local->airtime[txq->ac];
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 14:06:09 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
lockdep_assert_held(&air_sched->lock);
|
|
|
|
|
|
|
|
if (!RB_EMPTY_NODE(&txqi->schedule_order)) {
|
|
|
|
struct airtime_info *a_prev = NULL, *a_next = NULL;
|
|
|
|
struct txq_info *t_prev, *t_next;
|
|
|
|
struct rb_node *n_prev, *n_next;
|
|
|
|
|
|
|
|
/* Erasing a node can cause an expensive rebalancing operation,
|
|
|
|
* so we check the previous and next nodes first and only remove
|
|
|
|
* and re-insert if the current node is not already in the
|
|
|
|
* correct position.
|
|
|
|
*/
|
|
|
|
if ((n_prev = rb_prev(&txqi->schedule_order)) != NULL) {
|
|
|
|
t_prev = container_of(n_prev, struct txq_info,
|
|
|
|
schedule_order);
|
|
|
|
a_prev = to_airtime_info(&t_prev->txq);
|
2018-12-19 09:02:08 +08:00
|
|
|
}
|
2021-06-23 21:47:55 +08:00
|
|
|
|
|
|
|
if ((n_next = rb_next(&txqi->schedule_order)) != NULL) {
|
|
|
|
t_next = container_of(n_next, struct txq_info,
|
|
|
|
schedule_order);
|
|
|
|
a_next = to_airtime_info(&t_next->txq);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((!a_prev || a_prev->v_t <= air_info->v_t) &&
|
|
|
|
(!a_next || a_next->v_t > air_info->v_t))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (air_sched->schedule_pos == &txqi->schedule_order)
|
|
|
|
air_sched->schedule_pos = n_prev;
|
|
|
|
|
|
|
|
rb_erase_cached(&txqi->schedule_order,
|
|
|
|
&air_sched->active_txqs);
|
|
|
|
RB_CLEAR_NODE(&txqi->schedule_order);
|
|
|
|
__ieee80211_insert_txq(&air_sched->active_txqs, txqi);
|
2018-12-19 09:02:08 +08:00
|
|
|
}
|
2021-06-23 21:47:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_update_airtime_weight(struct ieee80211_local *local,
|
|
|
|
struct airtime_sched_info *air_sched,
|
|
|
|
u64 now, bool force)
|
|
|
|
{
|
|
|
|
struct airtime_info *air_info, *tmp;
|
|
|
|
u64 weight_sum = 0;
|
|
|
|
|
|
|
|
if (unlikely(!now))
|
2021-12-17 19:42:58 +08:00
|
|
|
now = ktime_get_coarse_boottime_ns();
|
2018-12-19 09:02:08 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
lockdep_assert_held(&air_sched->lock);
|
2018-12-19 09:02:06 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
if (!force && (air_sched->last_weight_update <
|
|
|
|
now - AIRTIME_ACTIVE_DURATION))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(air_info, tmp,
|
|
|
|
&air_sched->active_list, list) {
|
|
|
|
if (airtime_is_active(air_info, now))
|
|
|
|
weight_sum += air_info->weight;
|
|
|
|
else
|
|
|
|
list_del_init(&air_info->list);
|
|
|
|
}
|
|
|
|
airtime_weight_sum_set(air_sched, weight_sum);
|
|
|
|
air_sched->last_weight_update = now;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq)
|
|
|
|
__acquires(txq_lock) __releases(txq_lock)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
struct txq_info *txqi = to_txq_info(txq);
|
|
|
|
struct airtime_sched_info *air_sched;
|
2021-12-17 19:42:58 +08:00
|
|
|
u64 now = ktime_get_coarse_boottime_ns();
|
2021-06-23 21:47:55 +08:00
|
|
|
struct airtime_info *air_info;
|
|
|
|
u8 ac = txq->ac;
|
|
|
|
bool was_active;
|
|
|
|
|
|
|
|
air_sched = &local->airtime[ac];
|
|
|
|
air_info = to_airtime_info(txq);
|
|
|
|
|
|
|
|
spin_lock_bh(&air_sched->lock);
|
|
|
|
was_active = airtime_is_active(air_info, now);
|
|
|
|
airtime_set_active(air_sched, air_info, now);
|
|
|
|
|
|
|
|
if (!RB_EMPTY_NODE(&txqi->schedule_order))
|
2019-03-15 18:03:35 +08:00
|
|
|
goto out;
|
2018-12-19 09:02:06 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
/* If the station has been inactive for a while, catch up its v_t so it
|
|
|
|
* doesn't get indefinite priority; see comment above the definition of
|
|
|
|
* AIRTIME_MAX_BEHIND.
|
|
|
|
*/
|
|
|
|
if ((!was_active && air_info->v_t < air_sched->v_t) ||
|
|
|
|
air_info->v_t < air_sched->v_t - AIRTIME_MAX_BEHIND)
|
|
|
|
air_info->v_t = air_sched->v_t;
|
|
|
|
|
|
|
|
ieee80211_update_airtime_weight(local, air_sched, now, !was_active);
|
|
|
|
__ieee80211_insert_txq(&air_sched->active_txqs, txqi);
|
2019-03-15 18:03:35 +08:00
|
|
|
|
|
|
|
out:
|
2021-06-23 21:47:55 +08:00
|
|
|
spin_unlock_bh(&air_sched->lock);
|
2018-12-19 09:02:06 +08:00
|
|
|
}
|
2021-06-23 21:47:55 +08:00
|
|
|
EXPORT_SYMBOL(ieee80211_schedule_txq);
|
2018-12-19 09:02:06 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq,
|
|
|
|
bool purge)
|
2018-12-19 09:02:06 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
struct txq_info *txqi = to_txq_info(txq);
|
2021-06-23 21:47:55 +08:00
|
|
|
struct airtime_sched_info *air_sched;
|
|
|
|
struct airtime_info *air_info;
|
2018-12-19 09:02:06 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
air_sched = &local->airtime[txq->ac];
|
|
|
|
air_info = to_airtime_info(&txqi->txq);
|
|
|
|
|
|
|
|
lockdep_assert_held(&air_sched->lock);
|
|
|
|
|
|
|
|
if (purge) {
|
|
|
|
list_del_init(&air_info->list);
|
|
|
|
ieee80211_update_airtime_weight(local, air_sched, 0, true);
|
2018-12-19 09:02:08 +08:00
|
|
|
}
|
2018-12-19 09:02:06 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
if (RB_EMPTY_NODE(&txqi->schedule_order))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (air_sched->schedule_pos == &txqi->schedule_order)
|
|
|
|
air_sched->schedule_pos = rb_prev(&txqi->schedule_order);
|
|
|
|
|
|
|
|
if (!purge)
|
|
|
|
airtime_set_active(air_sched, air_info,
|
2021-12-17 19:42:58 +08:00
|
|
|
ktime_get_coarse_boottime_ns());
|
2021-06-23 21:47:55 +08:00
|
|
|
|
|
|
|
rb_erase_cached(&txqi->schedule_order,
|
|
|
|
&air_sched->active_txqs);
|
|
|
|
RB_CLEAR_NODE(&txqi->schedule_order);
|
2019-01-22 22:20:16 +08:00
|
|
|
}
|
2021-06-23 21:47:55 +08:00
|
|
|
|
|
|
|
void ieee80211_unschedule_txq(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq,
|
|
|
|
bool purge)
|
|
|
|
__acquires(txq_lock) __releases(txq_lock)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
|
|
|
|
spin_lock_bh(&local->airtime[txq->ac].lock);
|
|
|
|
__ieee80211_unschedule_txq(hw, txq, purge);
|
|
|
|
spin_unlock_bh(&local->airtime[txq->ac].lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_return_txq(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq, bool force)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
struct txq_info *txqi = to_txq_info(txq);
|
|
|
|
|
|
|
|
spin_lock_bh(&local->airtime[txq->ac].lock);
|
|
|
|
|
|
|
|
if (!RB_EMPTY_NODE(&txqi->schedule_order) && !force &&
|
|
|
|
!txq_has_queue(txq))
|
|
|
|
__ieee80211_unschedule_txq(hw, txq, false);
|
|
|
|
|
|
|
|
spin_unlock_bh(&local->airtime[txq->ac].lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_return_txq);
|
2019-01-22 22:20:16 +08:00
|
|
|
|
2021-01-10 01:57:51 +08:00
|
|
|
DEFINE_STATIC_KEY_FALSE(aql_disable);
|
|
|
|
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 14:06:09 +08:00
|
|
|
bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq)
|
|
|
|
{
|
2021-06-23 21:47:55 +08:00
|
|
|
struct airtime_info *air_info = to_airtime_info(txq);
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 14:06:09 +08:00
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
|
2019-12-12 19:14:37 +08:00
|
|
|
if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 14:06:09 +08:00
|
|
|
return true;
|
|
|
|
|
2021-01-10 01:57:51 +08:00
|
|
|
if (static_branch_unlikely(&aql_disable))
|
|
|
|
return true;
|
|
|
|
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 14:06:09 +08:00
|
|
|
if (!txq->sta)
|
|
|
|
return true;
|
|
|
|
|
2021-03-20 06:28:01 +08:00
|
|
|
if (unlikely(txq->tid == IEEE80211_NUM_TIDS))
|
|
|
|
return true;
|
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
if (atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_low)
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 14:06:09 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
if (atomic_read(&local->aql_total_pending_airtime) <
|
|
|
|
local->aql_threshold &&
|
2021-06-23 21:47:55 +08:00
|
|
|
atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_high)
|
mac80211: Implement Airtime-based Queue Limit (AQL)
In order for the Fq_CoDel algorithm integrated in mac80211 layer to operate
effectively to control excessive queueing latency, the CoDel algorithm
requires an accurate measure of how long packets stays in the queue, AKA
sojourn time. The sojourn time measured at the mac80211 layer doesn't
include queueing latency in the lower layer (firmware/hardware) and CoDel
expects lower layer to have a short queue. However, most 802.11ac chipsets
offload tasks such TX aggregation to firmware or hardware, thus have a deep
lower layer queue.
Without a mechanism to control the lower layer queue size, packets only
stay in mac80211 layer transiently before being sent to firmware queue.
As a result, the sojourn time measured by CoDel in the mac80211 layer is
almost always lower than the CoDel latency target, hence CoDel does little
to control the latency, even when the lower layer queue causes excessive
latency.
The Byte Queue Limits (BQL) mechanism is commonly used to address the
similar issue with wired network interface. However, this method cannot be
applied directly to the wireless network interface. "Bytes" is not a
suitable measure of queue depth in the wireless network, as the data rate
can vary dramatically from station to station in the same network, from a
few Mbps to over Gbps.
This patch implements an Airtime-based Queue Limit (AQL) to make CoDel work
effectively with wireless drivers that utilized firmware/hardware
offloading. AQL allows each txq to release just enough packets to the lower
layer to form 1-2 large aggregations to keep hardware fully utilized and
retains the rest of the frames in mac80211 layer to be controlled by the
CoDel algorithm.
Signed-off-by: Kan Yan <kyan@google.com>
[ Toke: Keep API to set pending airtime internal, fix nits in commit msg ]
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/20191119060610.76681-4-kyan@google.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-11-19 14:06:09 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_txq_airtime_check);
|
|
|
|
|
2018-12-19 09:02:08 +08:00
|
|
|
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_txq *txq)
|
|
|
|
{
|
2021-06-23 21:47:55 +08:00
|
|
|
struct txq_info *first_txqi = NULL, *txqi = to_txq_info(txq);
|
2018-12-19 09:02:08 +08:00
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
2021-06-23 21:47:55 +08:00
|
|
|
struct airtime_sched_info *air_sched;
|
|
|
|
struct airtime_info *air_info;
|
|
|
|
struct rb_node *node = NULL;
|
|
|
|
bool ret = false;
|
|
|
|
u64 now;
|
2018-12-19 09:02:08 +08:00
|
|
|
|
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
if (!ieee80211_txq_airtime_check(hw, txq))
|
|
|
|
return false;
|
2018-12-19 09:02:08 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
air_sched = &local->airtime[txq->ac];
|
|
|
|
spin_lock_bh(&air_sched->lock);
|
|
|
|
|
|
|
|
if (RB_EMPTY_NODE(&txqi->schedule_order))
|
2018-12-19 09:02:08 +08:00
|
|
|
goto out;
|
|
|
|
|
2021-12-17 19:42:58 +08:00
|
|
|
now = ktime_get_coarse_boottime_ns();
|
2018-12-19 09:02:08 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
/* Like in ieee80211_next_txq(), make sure the first station in the
|
|
|
|
* scheduling order is eligible for transmission to avoid starvation.
|
|
|
|
*/
|
|
|
|
node = rb_first_cached(&air_sched->active_txqs);
|
|
|
|
if (node) {
|
|
|
|
first_txqi = container_of(node, struct txq_info,
|
|
|
|
schedule_order);
|
|
|
|
air_info = to_airtime_info(&first_txqi->txq);
|
|
|
|
|
|
|
|
if (air_sched->v_t < air_info->v_t)
|
|
|
|
airtime_catchup_v_t(air_sched, air_info->v_t, now);
|
2018-12-19 09:02:08 +08:00
|
|
|
}
|
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
air_info = to_airtime_info(&txqi->txq);
|
|
|
|
if (air_info->v_t <= air_sched->v_t) {
|
|
|
|
air_sched->last_schedule_activity = now;
|
|
|
|
ret = true;
|
|
|
|
}
|
2018-12-19 09:02:08 +08:00
|
|
|
|
|
|
|
out:
|
2021-06-23 21:47:55 +08:00
|
|
|
spin_unlock_bh(&air_sched->lock);
|
|
|
|
return ret;
|
2018-12-19 09:02:08 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_txq_may_transmit);
|
|
|
|
|
2018-12-19 09:02:06 +08:00
|
|
|
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
2021-06-23 21:47:55 +08:00
|
|
|
struct airtime_sched_info *air_sched = &local->airtime[ac];
|
2018-12-19 09:02:06 +08:00
|
|
|
|
2021-06-23 21:47:55 +08:00
|
|
|
spin_lock_bh(&air_sched->lock);
|
|
|
|
air_sched->schedule_pos = NULL;
|
|
|
|
spin_unlock_bh(&air_sched->lock);
|
2018-12-19 09:02:06 +08:00
|
|
|
}
|
2019-03-15 18:03:35 +08:00
|
|
|
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
|
2018-12-19 09:02:06 +08:00
|
|
|
|
2014-11-10 00:50:10 +08:00
|
|
|
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
2019-04-12 04:47:25 +08:00
|
|
|
u32 info_flags,
|
2020-05-28 00:03:34 +08:00
|
|
|
u32 ctrl_flags,
|
|
|
|
u64 *cookie)
|
2014-11-10 00:50:10 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
2019-03-25 15:59:23 +08:00
|
|
|
struct ieee80211_local *local = sdata->local;
|
2015-03-21 16:13:45 +08:00
|
|
|
struct sta_info *sta;
|
2015-04-14 20:50:41 +08:00
|
|
|
struct sk_buff *next;
|
2021-11-13 14:34:15 +08:00
|
|
|
int len = skb->len;
|
2014-11-10 00:50:10 +08:00
|
|
|
|
|
|
|
if (unlikely(skb->len < ETH_HLEN)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2015-04-10 20:10:10 +08:00
|
|
|
if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
|
|
|
|
goto out_free;
|
2014-11-10 00:50:10 +08:00
|
|
|
|
2019-03-25 15:59:23 +08:00
|
|
|
if (IS_ERR(sta))
|
|
|
|
sta = NULL;
|
|
|
|
|
|
|
|
if (local->ops->wake_tx_queue) {
|
|
|
|
u16 queue = __ieee80211_select_queue(sdata, sta, skb);
|
|
|
|
skb_set_queue_mapping(skb, queue);
|
2020-07-26 21:09:47 +08:00
|
|
|
skb_get_hash(skb);
|
2019-03-25 15:59:23 +08:00
|
|
|
}
|
|
|
|
|
2021-06-18 00:31:11 +08:00
|
|
|
ieee80211_aggr_check(sdata, sta, skb);
|
|
|
|
|
2021-11-18 19:58:24 +08:00
|
|
|
sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
|
|
|
|
|
2019-03-25 15:59:23 +08:00
|
|
|
if (sta) {
|
2015-03-21 22:25:43 +08:00
|
|
|
struct ieee80211_fast_tx *fast_tx;
|
|
|
|
|
|
|
|
fast_tx = rcu_dereference(sta->fast_tx);
|
|
|
|
|
|
|
|
if (fast_tx &&
|
2016-09-23 01:04:20 +08:00
|
|
|
ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
|
2015-03-21 22:25:43 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-04-14 20:50:41 +08:00
|
|
|
if (skb_is_gso(skb)) {
|
|
|
|
struct sk_buff *segs;
|
2015-04-13 22:58:25 +08:00
|
|
|
|
2015-04-14 20:50:41 +08:00
|
|
|
segs = skb_gso_segment(skb, 0);
|
|
|
|
if (IS_ERR(segs)) {
|
2015-04-10 20:10:10 +08:00
|
|
|
goto out_free;
|
2015-04-14 20:50:41 +08:00
|
|
|
} else if (segs) {
|
|
|
|
consume_skb(skb);
|
|
|
|
skb = segs;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* we cannot process non-linear frames on this path */
|
2021-11-13 14:34:15 +08:00
|
|
|
if (skb_linearize(skb))
|
|
|
|
goto out_free;
|
2015-04-14 20:50:41 +08:00
|
|
|
|
|
|
|
/* the frame could be fragmented, software-encrypted, and other
|
|
|
|
* things so we cannot really handle checksum offload with it -
|
|
|
|
* fix it up in software before we handle anything else.
|
|
|
|
*/
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
2015-05-05 21:25:33 +08:00
|
|
|
skb_set_transport_header(skb,
|
|
|
|
skb_checksum_start_offset(skb));
|
2015-04-14 20:50:41 +08:00
|
|
|
if (skb_checksum_help(skb))
|
|
|
|
goto out_free;
|
|
|
|
}
|
2015-04-10 20:10:10 +08:00
|
|
|
}
|
|
|
|
|
2020-01-14 07:42:33 +08:00
|
|
|
skb_list_walk_safe(skb, skb, next) {
|
|
|
|
skb_mark_not_on_list(skb);
|
2015-04-14 20:50:41 +08:00
|
|
|
|
2020-06-17 16:26:36 +08:00
|
|
|
if (skb->protocol == sdata->control_port_protocol)
|
|
|
|
ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
|
|
|
|
|
2019-04-12 04:47:25 +08:00
|
|
|
skb = ieee80211_build_hdr(sdata, skb, info_flags,
|
2020-05-28 00:03:34 +08:00
|
|
|
sta, ctrl_flags, cookie);
|
2020-01-14 07:42:33 +08:00
|
|
|
if (IS_ERR(skb)) {
|
|
|
|
kfree_skb_list(next);
|
2015-04-14 20:50:41 +08:00
|
|
|
goto out;
|
2020-01-14 07:42:33 +08:00
|
|
|
}
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2020-11-14 05:46:24 +08:00
|
|
|
dev_sw_netstats_tx_add(dev, 1, skb->len);
|
2015-04-14 20:50:41 +08:00
|
|
|
|
2020-07-23 18:01:52 +08:00
|
|
|
ieee80211_xmit(sdata, sta, skb);
|
2015-04-14 20:50:41 +08:00
|
|
|
}
|
2015-04-10 20:10:10 +08:00
|
|
|
goto out;
|
|
|
|
out_free:
|
|
|
|
kfree_skb(skb);
|
2021-11-13 14:34:15 +08:00
|
|
|
len = 0;
|
2014-11-10 00:50:10 +08:00
|
|
|
out:
|
2021-11-13 14:34:15 +08:00
|
|
|
if (len)
|
|
|
|
ieee80211_tpt_led_trig_tx(local, len);
|
2012-07-26 23:24:39 +08:00
|
|
|
rcu_read_unlock();
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2016-11-22 18:52:18 +08:00
|
|
|
static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta)
|
|
|
|
{
|
|
|
|
struct ethhdr *eth;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = skb_ensure_writable(skb, ETH_HLEN);
|
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
eth = (void *)skb->data;
|
|
|
|
ether_addr_copy(eth->h_dest, sta->sta.addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ieee80211_multicast_to_unicast(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
const struct ethhdr *eth = (void *)skb->data;
|
|
|
|
const struct vlan_ethhdr *ethvlan = (void *)skb->data;
|
|
|
|
__be16 ethertype;
|
|
|
|
|
|
|
|
if (likely(!is_multicast_ether_addr(eth->h_dest)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
if (sdata->u.vlan.sta)
|
|
|
|
return false;
|
|
|
|
if (sdata->wdev.use_4addr)
|
|
|
|
return false;
|
2020-07-08 04:45:48 +08:00
|
|
|
fallthrough;
|
2016-11-22 18:52:18 +08:00
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
/* check runtime toggle for this bss */
|
|
|
|
if (!sdata->bss->multicast_to_unicast)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* multicast to unicast conversion only for some payload */
|
|
|
|
ethertype = eth->h_proto;
|
|
|
|
if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
|
|
|
|
ethertype = ethvlan->h_vlan_encapsulated_proto;
|
|
|
|
switch (ethertype) {
|
|
|
|
case htons(ETH_P_ARP):
|
|
|
|
case htons(ETH_P_IP):
|
|
|
|
case htons(ETH_P_IPV6):
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
struct sk_buff_head *queue)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
const struct ethhdr *eth = (struct ethhdr *)skb->data;
|
|
|
|
struct sta_info *sta, *first = NULL;
|
|
|
|
struct sk_buff *cloned_skb;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(sta, &local->sta_list, list) {
|
|
|
|
if (sdata != sta->sdata)
|
|
|
|
/* AP-VLAN mismatch */
|
|
|
|
continue;
|
|
|
|
if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr)))
|
|
|
|
/* do not send back to source */
|
|
|
|
continue;
|
|
|
|
if (!first) {
|
|
|
|
first = sta;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
cloned_skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (!cloned_skb)
|
|
|
|
goto multicast;
|
|
|
|
if (unlikely(ieee80211_change_da(cloned_skb, sta))) {
|
|
|
|
dev_kfree_skb(cloned_skb);
|
|
|
|
goto multicast;
|
|
|
|
}
|
|
|
|
__skb_queue_tail(queue, cloned_skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(first)) {
|
|
|
|
if (unlikely(ieee80211_change_da(skb, first)))
|
|
|
|
goto multicast;
|
|
|
|
__skb_queue_tail(queue, skb);
|
|
|
|
} else {
|
|
|
|
/* no STA connected, drop */
|
|
|
|
kfree_skb(skb);
|
|
|
|
skb = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
multicast:
|
|
|
|
__skb_queue_purge(queue);
|
|
|
|
__skb_queue_tail(queue, skb);
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2014-11-10 00:50:10 +08:00
|
|
|
/**
|
|
|
|
* ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs
|
|
|
|
* @skb: packet to be sent
|
|
|
|
* @dev: incoming interface
|
|
|
|
*
|
|
|
|
* On failure skb will be freed.
|
|
|
|
*/
|
2014-11-10 00:50:07 +08:00
|
|
|
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
2016-11-22 18:52:18 +08:00
|
|
|
if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) {
|
|
|
|
struct sk_buff_head queue;
|
|
|
|
|
|
|
|
__skb_queue_head_init(&queue);
|
|
|
|
ieee80211_convert_to_unicast(skb, dev, &queue);
|
|
|
|
while ((skb = __skb_dequeue(&queue)))
|
2020-05-28 00:03:34 +08:00
|
|
|
__ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
|
2016-11-22 18:52:18 +08:00
|
|
|
} else {
|
2020-05-28 00:03:34 +08:00
|
|
|
__ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
|
2016-11-22 18:52:18 +08:00
|
|
|
}
|
|
|
|
|
2014-11-10 00:50:07 +08:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2019-11-25 18:04:37 +08:00
|
|
|
static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
|
2021-11-13 14:34:15 +08:00
|
|
|
struct sk_buff *skb, struct sta_info *sta,
|
2019-11-25 18:04:37 +08:00
|
|
|
bool txpending)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
struct ieee80211_tx_control control = {};
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_sta *pubsta = NULL;
|
|
|
|
unsigned long flags;
|
|
|
|
int q = info->hw_queue;
|
|
|
|
|
2021-03-09 06:01:49 +08:00
|
|
|
if (sta)
|
|
|
|
sk_pacing_shift_update(skb->sk, local->hw.tx_sk_pacing_shift);
|
|
|
|
|
2021-11-13 14:34:15 +08:00
|
|
|
ieee80211_tpt_led_trig_tx(local, skb->len);
|
|
|
|
|
2019-11-25 18:04:37 +08:00
|
|
|
if (ieee80211_queue_skb(local, sdata, sta, skb))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
|
|
|
|
|
|
if (local->queue_stop_reasons[q] ||
|
|
|
|
(!txpending && !skb_queue_empty(&local->pending[q]))) {
|
|
|
|
if (txpending)
|
|
|
|
skb_queue_head(&local->pending[q], skb);
|
|
|
|
else
|
|
|
|
skb_queue_tail(&local->pending[q], skb);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
|
|
|
|
|
|
|
if (sta && sta->uploaded)
|
|
|
|
pubsta = &sta->sta;
|
|
|
|
|
|
|
|
control.sta = pubsta;
|
|
|
|
|
|
|
|
drv_tx(local, &control, skb);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct net_device *dev, struct sta_info *sta,
|
2020-09-08 20:36:53 +08:00
|
|
|
struct ieee80211_key *key, struct sk_buff *skb)
|
2019-11-25 18:04:37 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
2020-09-08 20:36:50 +08:00
|
|
|
struct tid_ampdu_tx *tid_tx;
|
|
|
|
u8 tid;
|
2019-11-25 18:04:37 +08:00
|
|
|
|
2020-09-08 20:36:49 +08:00
|
|
|
if (local->ops->wake_tx_queue) {
|
|
|
|
u16 queue = __ieee80211_select_queue(sdata, sta, skb);
|
|
|
|
skb_set_queue_mapping(skb, queue);
|
|
|
|
skb_get_hash(skb);
|
|
|
|
}
|
|
|
|
|
2019-11-25 18:04:37 +08:00
|
|
|
if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
|
|
|
|
test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
|
|
|
|
goto out_free;
|
|
|
|
|
2020-07-22 22:20:17 +08:00
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
|
2021-06-18 00:31:11 +08:00
|
|
|
ieee80211_aggr_check(sdata, sta, skb);
|
|
|
|
|
2020-09-08 20:36:51 +08:00
|
|
|
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
|
|
tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
|
|
|
|
if (tid_tx) {
|
|
|
|
if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
|
|
|
|
/* fall back to non-offload slow path */
|
|
|
|
__ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
|
|
|
|
return;
|
2020-09-08 20:36:50 +08:00
|
|
|
}
|
2020-09-08 20:36:51 +08:00
|
|
|
|
|
|
|
info->flags |= IEEE80211_TX_CTL_AMPDU;
|
|
|
|
if (tid_tx->timeout)
|
|
|
|
tid_tx->last_tx = jiffies;
|
2020-09-08 20:36:50 +08:00
|
|
|
}
|
|
|
|
|
2020-09-08 20:36:51 +08:00
|
|
|
if (unlikely(skb->sk &&
|
2019-11-25 18:04:37 +08:00
|
|
|
skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
|
2020-07-22 22:20:17 +08:00
|
|
|
info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
|
|
|
|
&info->flags, NULL);
|
2019-11-25 18:04:37 +08:00
|
|
|
|
|
|
|
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
|
|
|
2020-11-14 05:46:24 +08:00
|
|
|
dev_sw_netstats_tx_add(dev, 1, skb->len);
|
2019-11-25 18:04:37 +08:00
|
|
|
|
2020-09-08 20:36:51 +08:00
|
|
|
sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
|
|
|
|
sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
|
2019-11-25 18:04:37 +08:00
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
|
|
sdata = container_of(sdata->bss,
|
|
|
|
struct ieee80211_sub_if_data, u.ap);
|
|
|
|
|
2020-09-08 20:36:57 +08:00
|
|
|
info->flags |= IEEE80211_TX_CTL_HW_80211_ENCAP;
|
2019-11-25 18:04:37 +08:00
|
|
|
info->control.vif = &sdata->vif;
|
|
|
|
|
2020-09-08 20:36:52 +08:00
|
|
|
if (key)
|
|
|
|
info->control.hw_key = &key->conf;
|
|
|
|
|
2021-11-13 14:34:15 +08:00
|
|
|
ieee80211_tx_8023(sdata, skb, sta, false);
|
2019-11-25 18:04:37 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
kfree_skb(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
2020-09-08 20:36:51 +08:00
|
|
|
struct ethhdr *ehdr = (struct ethhdr *)skb->data;
|
2020-09-08 20:36:53 +08:00
|
|
|
struct ieee80211_key *key;
|
2019-11-25 18:04:37 +08:00
|
|
|
struct sta_info *sta;
|
|
|
|
|
|
|
|
if (unlikely(skb->len < ETH_HLEN)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
2020-09-08 20:36:53 +08:00
|
|
|
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
|
2019-11-25 18:04:37 +08:00
|
|
|
kfree_skb(skb);
|
2020-09-08 20:36:53 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
|
|
|
|
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
|
2020-12-19 02:47:16 +08:00
|
|
|
sdata->control_port_protocol == ehdr->h_proto))
|
|
|
|
goto skip_offload;
|
|
|
|
|
|
|
|
key = rcu_dereference(sta->ptk[sta->ptk_idx]);
|
|
|
|
if (!key)
|
|
|
|
key = rcu_dereference(sdata->default_unicast_key);
|
|
|
|
|
|
|
|
if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
|
|
|
|
key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
|
|
|
|
goto skip_offload;
|
|
|
|
|
|
|
|
ieee80211_8023_xmit(sdata, dev, sta, key, skb);
|
|
|
|
goto out;
|
2019-11-25 18:04:37 +08:00
|
|
|
|
2020-12-19 02:47:16 +08:00
|
|
|
skip_offload:
|
|
|
|
ieee80211_subif_start_xmit(skb, dev);
|
2020-09-08 20:36:53 +08:00
|
|
|
out:
|
2019-11-25 18:04:37 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2014-11-10 00:50:11 +08:00
|
|
|
struct sk_buff *
|
|
|
|
ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, u32 info_flags)
|
|
|
|
{
|
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
struct ieee80211_tx_data tx = {
|
|
|
|
.local = sdata->local,
|
|
|
|
.sdata = sdata,
|
|
|
|
};
|
2015-03-21 16:13:45 +08:00
|
|
|
struct sta_info *sta;
|
2014-11-10 00:50:11 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
2015-03-21 16:13:45 +08:00
|
|
|
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
skb = ERR_PTR(-EINVAL);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-05-28 00:03:34 +08:00
|
|
|
skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, 0, NULL);
|
2014-11-10 00:50:11 +08:00
|
|
|
if (IS_ERR(skb))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
hdr = (void *)skb->data;
|
|
|
|
tx.sta = sta_info_get(sdata, hdr->addr1);
|
|
|
|
tx.skb = skb;
|
|
|
|
|
|
|
|
if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2008-05-17 06:57:14 +08:00
|
|
|
/*
|
|
|
|
* ieee80211_clear_tx_pending may not be called in a context where
|
|
|
|
* it is possible that it packets could come in again.
|
|
|
|
*/
|
2007-07-27 21:43:22 +08:00
|
|
|
void ieee80211_clear_tx_pending(struct ieee80211_local *local)
|
|
|
|
{
|
2012-11-10 10:44:14 +08:00
|
|
|
struct sk_buff *skb;
|
2009-03-24 00:28:35 +08:00
|
|
|
int i;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2012-11-10 10:44:14 +08:00
|
|
|
for (i = 0; i < local->hw.queues; i++) {
|
|
|
|
while ((skb = skb_dequeue(&local->pending[i])) != NULL)
|
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
|
|
|
}
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2011-02-24 21:42:06 +08:00
|
|
|
/*
|
|
|
|
* Returns false if the frame couldn't be transmitted but was queued instead,
|
|
|
|
* which in this case means re-queued -- take as an indication to stop sending
|
|
|
|
* more pending frames.
|
|
|
|
*/
|
2009-03-24 00:28:41 +08:00
|
|
|
static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct sta_info *sta;
|
|
|
|
struct ieee80211_hdr *hdr;
|
2011-02-24 21:42:06 +08:00
|
|
|
bool result;
|
2012-07-26 23:24:39 +08:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
2009-03-24 00:28:41 +08:00
|
|
|
|
2009-07-14 06:33:34 +08:00
|
|
|
sdata = vif_to_sdata(info->control.vif);
|
2009-03-24 00:28:41 +08:00
|
|
|
|
2020-09-08 20:36:57 +08:00
|
|
|
if (info->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING) {
|
2012-07-26 23:24:39 +08:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
|
|
|
if (unlikely(!chanctx_conf)) {
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
return true;
|
|
|
|
}
|
2014-11-10 00:50:09 +08:00
|
|
|
info->band = chanctx_conf->def.chan->band;
|
2020-07-23 18:01:52 +08:00
|
|
|
result = ieee80211_tx(sdata, NULL, skb, true);
|
2020-09-08 20:36:57 +08:00
|
|
|
} else if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
|
2019-11-25 18:04:37 +08:00
|
|
|
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ERR(sta) || (sta && !sta->uploaded))
|
|
|
|
sta = NULL;
|
|
|
|
|
2021-11-13 14:34:15 +08:00
|
|
|
result = ieee80211_tx_8023(sdata, skb, sta, true);
|
2009-03-24 00:28:41 +08:00
|
|
|
} else {
|
2011-11-16 22:28:55 +08:00
|
|
|
struct sk_buff_head skbs;
|
|
|
|
|
|
|
|
__skb_queue_head_init(&skbs);
|
|
|
|
__skb_queue_tail(&skbs, skb);
|
|
|
|
|
2009-03-24 00:28:41 +08:00
|
|
|
hdr = (struct ieee80211_hdr *)skb->data;
|
2009-11-26 00:46:18 +08:00
|
|
|
sta = sta_info_get(sdata, hdr->addr1);
|
2009-03-24 00:28:41 +08:00
|
|
|
|
2021-11-13 14:34:15 +08:00
|
|
|
result = __ieee80211_tx(local, &skbs, sta, true);
|
2009-03-24 00:28:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2008-05-17 06:57:14 +08:00
|
|
|
/*
|
2009-06-17 23:43:56 +08:00
|
|
|
* Transmit all pending packets. Called from tasklet.
|
2008-05-17 06:57:14 +08:00
|
|
|
*/
|
2020-11-03 17:18:18 +08:00
|
|
|
void ieee80211_tx_pending(struct tasklet_struct *t)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
2020-11-03 17:18:18 +08:00
|
|
|
struct ieee80211_local *local = from_tasklet(local, t,
|
|
|
|
tx_pending_tasklet);
|
2009-03-24 00:28:37 +08:00
|
|
|
unsigned long flags;
|
2009-03-24 00:28:41 +08:00
|
|
|
int i;
|
2009-06-17 23:43:56 +08:00
|
|
|
bool txok;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2009-03-24 00:28:36 +08:00
|
|
|
rcu_read_lock();
|
2008-05-17 06:57:14 +08:00
|
|
|
|
2009-06-17 23:43:56 +08:00
|
|
|
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
2009-03-24 00:28:37 +08:00
|
|
|
for (i = 0; i < local->hw.queues; i++) {
|
|
|
|
/*
|
|
|
|
* If queue is stopped by something other than due to pending
|
|
|
|
* frames, or we have no pending frames, proceed to next queue.
|
|
|
|
*/
|
2009-06-17 23:43:56 +08:00
|
|
|
if (local->queue_stop_reasons[i] ||
|
2009-03-24 00:28:37 +08:00
|
|
|
skb_queue_empty(&local->pending[i]))
|
2007-07-27 21:43:22 +08:00
|
|
|
continue;
|
2008-05-17 06:57:14 +08:00
|
|
|
|
2009-03-24 00:28:37 +08:00
|
|
|
while (!skb_queue_empty(&local->pending[i])) {
|
2009-06-17 23:43:56 +08:00
|
|
|
struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
|
2009-07-14 06:33:34 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
|
2009-07-27 16:33:31 +08:00
|
|
|
if (WARN_ON(!info->control.vif)) {
|
2012-10-08 20:39:33 +08:00
|
|
|
ieee80211_free_txskb(&local->hw, skb);
|
2009-07-27 16:33:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-06-17 23:43:56 +08:00
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
|
|
|
|
flags);
|
|
|
|
|
|
|
|
txok = ieee80211_tx_pending_skb(local, skb);
|
|
|
|
spin_lock_irqsave(&local->queue_stop_reason_lock,
|
|
|
|
flags);
|
|
|
|
if (!txok)
|
2009-03-24 00:28:37 +08:00
|
|
|
break;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
2010-03-23 04:42:43 +08:00
|
|
|
|
|
|
|
if (skb_queue_empty(&local->pending[i]))
|
2012-04-03 22:28:50 +08:00
|
|
|
ieee80211_propagate_queue_wake(local, i);
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
2009-06-17 23:43:56 +08:00
|
|
|
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
2009-03-24 00:28:37 +08:00
|
|
|
|
2009-03-24 00:28:36 +08:00
|
|
|
rcu_read_unlock();
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* functions for drivers to get certain frames */
|
|
|
|
|
2013-01-07 23:04:50 +08:00
|
|
|
static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
|
2014-05-09 19:11:49 +08:00
|
|
|
struct ps_data *ps, struct sk_buff *skb,
|
|
|
|
bool is_template)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
|
|
|
u8 *pos, *tim;
|
|
|
|
int aid0 = 0;
|
|
|
|
int i, have_bits = 0, n1, n2;
|
|
|
|
|
|
|
|
/* Generate bitmap for TIM only if there are any STAs in power save
|
|
|
|
* mode. */
|
2012-10-11 03:39:50 +08:00
|
|
|
if (atomic_read(&ps->num_sta_ps) > 0)
|
2007-07-27 21:43:22 +08:00
|
|
|
/* in the hope that this is faster than
|
|
|
|
* checking byte-for-byte */
|
2013-12-18 15:44:16 +08:00
|
|
|
have_bits = !bitmap_empty((unsigned long *)ps->tim,
|
2007-07-27 21:43:22 +08:00
|
|
|
IEEE80211_MAX_AID+1);
|
2014-05-09 19:11:49 +08:00
|
|
|
if (!is_template) {
|
|
|
|
if (ps->dtim_count == 0)
|
|
|
|
ps->dtim_count = sdata->vif.bss_conf.dtim_period - 1;
|
|
|
|
else
|
|
|
|
ps->dtim_count--;
|
|
|
|
}
|
2007-07-27 21:43:22 +08:00
|
|
|
|
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:21 +08:00
|
|
|
tim = pos = skb_put(skb, 6);
|
2007-07-27 21:43:22 +08:00
|
|
|
*pos++ = WLAN_EID_TIM;
|
|
|
|
*pos++ = 4;
|
2012-10-11 03:39:50 +08:00
|
|
|
*pos++ = ps->dtim_count;
|
2012-02-13 22:17:18 +08:00
|
|
|
*pos++ = sdata->vif.bss_conf.dtim_period;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2012-10-11 03:39:50 +08:00
|
|
|
if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
|
2007-07-27 21:43:22 +08:00
|
|
|
aid0 = 1;
|
|
|
|
|
2012-10-11 03:39:50 +08:00
|
|
|
ps->dtim_bc_mc = aid0 == 1;
|
2011-02-01 02:48:44 +08:00
|
|
|
|
2007-07-27 21:43:22 +08:00
|
|
|
if (have_bits) {
|
|
|
|
/* Find largest even number N1 so that bits numbered 1 through
|
|
|
|
* (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
|
|
|
|
* (N2 + 1) x 8 through 2007 are 0. */
|
|
|
|
n1 = 0;
|
|
|
|
for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
|
2012-10-11 03:39:50 +08:00
|
|
|
if (ps->tim[i]) {
|
2007-07-27 21:43:22 +08:00
|
|
|
n1 = i & 0xfe;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
n2 = n1;
|
|
|
|
for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
|
2012-10-11 03:39:50 +08:00
|
|
|
if (ps->tim[i]) {
|
2007-07-27 21:43:22 +08:00
|
|
|
n2 = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bitmap control */
|
|
|
|
*pos++ = n1 | aid0;
|
|
|
|
/* Part Virt Bitmap */
|
2011-11-24 22:50:00 +08:00
|
|
|
skb_put(skb, n2 - n1);
|
2012-10-11 03:39:50 +08:00
|
|
|
memcpy(pos, ps->tim + n1, n2 - n1 + 1);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
|
|
|
tim[1] = n2 - n1 + 4;
|
|
|
|
} else {
|
|
|
|
*pos++ = aid0; /* Bitmap control */
|
|
|
|
*pos++ = 0; /* Part Virt Bitmap */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-07 23:04:50 +08:00
|
|
|
static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
|
2014-05-09 19:11:49 +08:00
|
|
|
struct ps_data *ps, struct sk_buff *skb,
|
|
|
|
bool is_template)
|
2013-01-07 23:04:50 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not very nice, but we want to allow the driver to call
|
|
|
|
* ieee80211_beacon_get() as a response to the set_tim()
|
|
|
|
* callback. That, however, is already invoked under the
|
|
|
|
* sta_lock to guarantee consistent and race-free update
|
|
|
|
* of the tim bitmap in mac80211 and the driver.
|
|
|
|
*/
|
|
|
|
if (local->tim_in_locked_section) {
|
2014-05-09 19:11:49 +08:00
|
|
|
__ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
|
2013-01-07 23:04:50 +08:00
|
|
|
} else {
|
2013-02-22 19:55:01 +08:00
|
|
|
spin_lock_bh(&local->tim_lock);
|
2014-05-09 19:11:49 +08:00
|
|
|
__ieee80211_beacon_add_tim(sdata, ps, skb, is_template);
|
2013-02-22 19:55:01 +08:00
|
|
|
spin_unlock_bh(&local->tim_lock);
|
2013-01-07 23:04:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct beacon_data *beacon)
|
2013-07-11 22:09:06 +08:00
|
|
|
{
|
2021-07-03 01:44:08 +08:00
|
|
|
u8 *beacon_data, count, max_count = 1;
|
2013-07-11 22:09:06 +08:00
|
|
|
struct probe_resp *resp;
|
2013-08-28 19:41:31 +08:00
|
|
|
size_t beacon_data_len;
|
2021-07-03 01:44:08 +08:00
|
|
|
u16 *bcn_offsets;
|
2014-05-09 19:11:47 +08:00
|
|
|
int i;
|
2013-08-28 19:41:31 +08:00
|
|
|
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
beacon_data = beacon->tail;
|
|
|
|
beacon_data_len = beacon->tail_len;
|
|
|
|
break;
|
|
|
|
case NL80211_IFTYPE_ADHOC:
|
|
|
|
beacon_data = beacon->head;
|
|
|
|
beacon_data_len = beacon->head_len;
|
|
|
|
break;
|
2013-10-18 06:55:02 +08:00
|
|
|
case NL80211_IFTYPE_MESH_POINT:
|
|
|
|
beacon_data = beacon->head;
|
|
|
|
beacon_data_len = beacon->head_len;
|
|
|
|
break;
|
2013-08-28 19:41:31 +08:00
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
2013-07-11 22:09:06 +08:00
|
|
|
|
2014-06-05 20:21:36 +08:00
|
|
|
rcu_read_lock();
|
2021-07-03 01:44:08 +08:00
|
|
|
resp = rcu_dereference(sdata->u.ap.probe_resp);
|
|
|
|
|
|
|
|
bcn_offsets = beacon->cntdwn_counter_offsets;
|
|
|
|
count = beacon->cntdwn_current_counter;
|
|
|
|
if (sdata->vif.csa_active)
|
|
|
|
max_count = IEEE80211_MAX_CNTDWN_COUNTERS_NUM;
|
2014-05-09 19:11:47 +08:00
|
|
|
|
2021-07-03 01:44:08 +08:00
|
|
|
for (i = 0; i < max_count; ++i) {
|
|
|
|
if (bcn_offsets[i]) {
|
|
|
|
if (WARN_ON_ONCE(bcn_offsets[i] >= beacon_data_len)) {
|
2014-05-09 19:11:47 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
return;
|
|
|
|
}
|
2021-07-03 01:44:08 +08:00
|
|
|
beacon_data[bcn_offsets[i]] = count;
|
2013-07-11 22:09:06 +08:00
|
|
|
}
|
2014-06-05 20:21:36 +08:00
|
|
|
|
2021-07-03 01:44:08 +08:00
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP && resp) {
|
|
|
|
u16 *resp_offsets = resp->cntdwn_counter_offsets;
|
|
|
|
|
|
|
|
resp->data[resp_offsets[i]] = count;
|
|
|
|
}
|
2013-07-11 22:09:06 +08:00
|
|
|
}
|
2014-06-05 20:21:36 +08:00
|
|
|
rcu_read_unlock();
|
2014-05-09 19:11:50 +08:00
|
|
|
}
|
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon)
|
2015-06-10 19:06:53 +08:00
|
|
|
{
|
2020-08-11 16:01:04 +08:00
|
|
|
beacon->cntdwn_current_counter--;
|
2015-06-10 19:06:53 +08:00
|
|
|
|
|
|
|
/* the counter should never reach 0 */
|
2020-08-11 16:01:04 +08:00
|
|
|
WARN_ON_ONCE(!beacon->cntdwn_current_counter);
|
2015-06-10 19:06:53 +08:00
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
return beacon->cntdwn_current_counter;
|
2015-06-10 19:06:53 +08:00
|
|
|
}
|
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif)
|
2014-05-09 19:11:50 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
2014-06-05 20:21:36 +08:00
|
|
|
struct beacon_data *beacon = NULL;
|
|
|
|
u8 count = 0;
|
2014-05-09 19:11:47 +08:00
|
|
|
|
2014-06-05 20:21:36 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
|
|
|
beacon = rcu_dereference(sdata->u.ap.beacon);
|
|
|
|
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
|
|
|
|
beacon = rcu_dereference(sdata->u.ibss.presp);
|
|
|
|
else if (ieee80211_vif_is_mesh(&sdata->vif))
|
|
|
|
beacon = rcu_dereference(sdata->u.mesh.beacon);
|
|
|
|
|
|
|
|
if (!beacon)
|
|
|
|
goto unlock;
|
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
count = __ieee80211_beacon_update_cntdwn(beacon);
|
2014-05-09 19:11:50 +08:00
|
|
|
|
2014-06-05 20:21:36 +08:00
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return count;
|
2013-07-11 22:09:06 +08:00
|
|
|
}
|
2020-08-11 16:01:04 +08:00
|
|
|
EXPORT_SYMBOL(ieee80211_beacon_update_cntdwn);
|
2013-07-11 22:09:06 +08:00
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter)
|
2018-04-20 18:49:24 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
struct beacon_data *beacon = NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
|
|
|
beacon = rcu_dereference(sdata->u.ap.beacon);
|
|
|
|
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
|
|
|
|
beacon = rcu_dereference(sdata->u.ibss.presp);
|
|
|
|
else if (ieee80211_vif_is_mesh(&sdata->vif))
|
|
|
|
beacon = rcu_dereference(sdata->u.mesh.beacon);
|
|
|
|
|
|
|
|
if (!beacon)
|
|
|
|
goto unlock;
|
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
if (counter < beacon->cntdwn_current_counter)
|
|
|
|
beacon->cntdwn_current_counter = counter;
|
2018-04-20 18:49:24 +08:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
2020-08-11 16:01:04 +08:00
|
|
|
EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn);
|
2018-04-20 18:49:24 +08:00
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
|
2013-07-11 22:09:06 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
struct beacon_data *beacon = NULL;
|
|
|
|
u8 *beacon_data;
|
|
|
|
size_t beacon_data_len;
|
|
|
|
int ret = false;
|
|
|
|
|
|
|
|
if (!ieee80211_sdata_running(sdata))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
if (vif->type == NL80211_IFTYPE_AP) {
|
|
|
|
struct ieee80211_if_ap *ap = &sdata->u.ap;
|
|
|
|
|
|
|
|
beacon = rcu_dereference(ap->beacon);
|
|
|
|
if (WARN_ON(!beacon || !beacon->tail))
|
|
|
|
goto out;
|
|
|
|
beacon_data = beacon->tail;
|
|
|
|
beacon_data_len = beacon->tail_len;
|
2013-08-28 19:41:31 +08:00
|
|
|
} else if (vif->type == NL80211_IFTYPE_ADHOC) {
|
|
|
|
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
|
|
|
|
|
|
|
|
beacon = rcu_dereference(ifibss->presp);
|
|
|
|
if (!beacon)
|
|
|
|
goto out;
|
|
|
|
|
2013-10-18 06:55:02 +08:00
|
|
|
beacon_data = beacon->head;
|
|
|
|
beacon_data_len = beacon->head_len;
|
|
|
|
} else if (vif->type == NL80211_IFTYPE_MESH_POINT) {
|
|
|
|
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
|
|
|
|
|
|
|
|
beacon = rcu_dereference(ifmsh->beacon);
|
|
|
|
if (!beacon)
|
|
|
|
goto out;
|
|
|
|
|
2013-08-28 19:41:31 +08:00
|
|
|
beacon_data = beacon->head;
|
|
|
|
beacon_data_len = beacon->head_len;
|
2013-07-11 22:09:06 +08:00
|
|
|
} else {
|
|
|
|
WARN_ON(1);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
if (!beacon->cntdwn_counter_offsets[0])
|
2014-06-05 20:21:37 +08:00
|
|
|
goto out;
|
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
if (WARN_ON_ONCE(beacon->cntdwn_counter_offsets[0] > beacon_data_len))
|
2013-07-11 22:09:06 +08:00
|
|
|
goto out;
|
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
if (beacon_data[beacon->cntdwn_counter_offsets[0]] == 1)
|
2013-07-11 22:09:06 +08:00
|
|
|
ret = true;
|
2020-08-11 16:01:04 +08:00
|
|
|
|
2013-07-11 22:09:06 +08:00
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2020-08-11 16:01:04 +08:00
|
|
|
EXPORT_SYMBOL(ieee80211_beacon_cntdwn_is_complete);
|
2013-07-11 22:09:06 +08:00
|
|
|
|
2020-02-22 21:25:46 +08:00
|
|
|
static int ieee80211_beacon_protect(struct sk_buff *skb,
|
|
|
|
struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata)
|
|
|
|
{
|
|
|
|
ieee80211_tx_result res;
|
|
|
|
struct ieee80211_tx_data tx;
|
2020-03-20 17:20:23 +08:00
|
|
|
struct sk_buff *check_skb;
|
2020-02-22 21:25:46 +08:00
|
|
|
|
|
|
|
memset(&tx, 0, sizeof(tx));
|
|
|
|
tx.key = rcu_dereference(sdata->default_beacon_key);
|
|
|
|
if (!tx.key)
|
|
|
|
return 0;
|
|
|
|
tx.local = local;
|
|
|
|
tx.sdata = sdata;
|
|
|
|
__skb_queue_head_init(&tx.skbs);
|
|
|
|
__skb_queue_tail(&tx.skbs, skb);
|
|
|
|
res = ieee80211_tx_h_encrypt(&tx);
|
2020-03-20 17:20:23 +08:00
|
|
|
check_skb = __skb_dequeue(&tx.skbs);
|
|
|
|
/* we may crash after this, but it'd be a bug in crypto */
|
|
|
|
WARN_ON(check_skb != skb);
|
2020-02-22 21:25:46 +08:00
|
|
|
if (WARN_ON_ONCE(res != TX_CONTINUE))
|
2020-03-20 17:20:23 +08:00
|
|
|
return -EINVAL;
|
2020-02-22 21:25:46 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-06 12:09:36 +08:00
|
|
|
static void
|
|
|
|
ieee80211_beacon_get_finish(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_mutable_offsets *offs,
|
|
|
|
struct beacon_data *beacon,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf,
|
|
|
|
u16 csa_off_base)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
struct ieee80211_tx_info *info;
|
|
|
|
enum nl80211_band band;
|
|
|
|
struct ieee80211_tx_rate_control txrc;
|
|
|
|
|
|
|
|
/* CSA offsets */
|
|
|
|
if (offs && beacon) {
|
|
|
|
u16 i;
|
|
|
|
|
|
|
|
for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; i++) {
|
|
|
|
u16 csa_off = beacon->cntdwn_counter_offsets[i];
|
|
|
|
|
|
|
|
if (!csa_off)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
offs->cntdwn_counter_offs[i] = csa_off_base + csa_off;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
band = chanctx_conf->def.chan->band;
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
|
|
|
info->flags |= IEEE80211_TX_CTL_NO_ACK;
|
|
|
|
info->band = band;
|
|
|
|
|
|
|
|
memset(&txrc, 0, sizeof(txrc));
|
|
|
|
txrc.hw = hw;
|
|
|
|
txrc.sband = local->hw.wiphy->bands[band];
|
|
|
|
txrc.bss_conf = &sdata->vif.bss_conf;
|
|
|
|
txrc.skb = skb;
|
|
|
|
txrc.reported_rate.idx = -1;
|
|
|
|
if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band])
|
|
|
|
txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band];
|
|
|
|
else
|
|
|
|
txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
|
|
|
|
txrc.bss = true;
|
|
|
|
rate_control_get_rate(sdata, NULL, &txrc);
|
|
|
|
|
|
|
|
info->control.vif = vif;
|
|
|
|
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT |
|
|
|
|
IEEE80211_TX_CTL_ASSIGN_SEQ |
|
|
|
|
IEEE80211_TX_CTL_FIRST_FRAGMENT;
|
|
|
|
}
|
|
|
|
|
2022-02-24 19:54:58 +08:00
|
|
|
static void
|
|
|
|
ieee80211_beacon_add_mbssid(struct sk_buff *skb, struct beacon_data *beacon)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!beacon->mbssid_ies)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < beacon->mbssid_ies->cnt; i++)
|
|
|
|
skb_put_data(skb, beacon->mbssid_ies->elem[i].data,
|
|
|
|
beacon->mbssid_ies->elem[i].len);
|
|
|
|
}
|
|
|
|
|
2021-10-06 12:09:36 +08:00
|
|
|
static struct sk_buff *
|
|
|
|
ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_mutable_offsets *offs,
|
|
|
|
bool is_template,
|
|
|
|
struct beacon_data *beacon,
|
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
struct ieee80211_if_ap *ap = &sdata->u.ap;
|
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
u16 csa_off_base = 0;
|
2022-02-24 19:54:58 +08:00
|
|
|
int mbssid_len;
|
2021-10-06 12:09:36 +08:00
|
|
|
|
|
|
|
if (beacon->cntdwn_counter_offsets[0]) {
|
|
|
|
if (!is_template)
|
|
|
|
ieee80211_beacon_update_cntdwn(vif);
|
|
|
|
|
|
|
|
ieee80211_set_beacon_cntdwn(sdata, beacon);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* headroom, head length,
|
2022-02-24 19:54:58 +08:00
|
|
|
* tail length, maximum TIM length and multiple BSSID length
|
2021-10-06 12:09:36 +08:00
|
|
|
*/
|
2022-02-24 19:54:58 +08:00
|
|
|
mbssid_len = ieee80211_get_mbssid_beacon_len(beacon->mbssid_ies);
|
2021-10-06 12:09:36 +08:00
|
|
|
skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
|
|
|
|
beacon->tail_len + 256 +
|
2022-02-24 19:54:58 +08:00
|
|
|
local->hw.extra_beacon_tailroom + mbssid_len);
|
2021-10-06 12:09:36 +08:00
|
|
|
if (!skb)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
skb_reserve(skb, local->tx_headroom);
|
|
|
|
skb_put_data(skb, beacon->head, beacon->head_len);
|
|
|
|
|
|
|
|
ieee80211_beacon_add_tim(sdata, &ap->ps, skb, is_template);
|
|
|
|
|
|
|
|
if (offs) {
|
|
|
|
offs->tim_offset = beacon->head_len;
|
|
|
|
offs->tim_length = skb->len - beacon->head_len;
|
|
|
|
offs->cntdwn_counter_offs[0] = beacon->cntdwn_counter_offsets[0];
|
|
|
|
|
2022-02-24 19:54:58 +08:00
|
|
|
if (mbssid_len) {
|
|
|
|
ieee80211_beacon_add_mbssid(skb, beacon);
|
|
|
|
offs->mbssid_off = skb->len - mbssid_len;
|
|
|
|
}
|
|
|
|
|
2021-10-06 12:09:36 +08:00
|
|
|
/* for AP the csa offsets are from tail */
|
|
|
|
csa_off_base = skb->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (beacon->tail)
|
|
|
|
skb_put_data(skb, beacon->tail, beacon->tail_len);
|
|
|
|
|
|
|
|
if (ieee80211_beacon_protect(skb, local, sdata) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ieee80211_beacon_get_finish(hw, vif, offs, beacon, skb, chanctx_conf,
|
|
|
|
csa_off_base);
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2014-05-09 19:11:49 +08:00
|
|
|
static struct sk_buff *
|
|
|
|
__ieee80211_beacon_get(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_mutable_offsets *offs,
|
|
|
|
bool is_template)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
2014-06-05 20:21:36 +08:00
|
|
|
struct beacon_data *beacon = NULL;
|
2008-07-09 20:40:37 +08:00
|
|
|
struct sk_buff *skb = NULL;
|
2007-07-27 21:43:22 +08:00
|
|
|
struct ieee80211_sub_if_data *sdata = NULL;
|
2012-07-26 23:24:39 +08:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
2008-01-25 02:38:38 +08:00
|
|
|
|
2007-12-19 09:03:33 +08:00
|
|
|
rcu_read_lock();
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2007-12-19 08:31:26 +08:00
|
|
|
sdata = vif_to_sdata(vif);
|
2012-07-26 23:24:39 +08:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2012-07-26 23:24:39 +08:00
|
|
|
if (!ieee80211_sdata_running(sdata) || !chanctx_conf)
|
2011-01-25 02:28:49 +08:00
|
|
|
goto out;
|
|
|
|
|
2014-05-09 19:11:49 +08:00
|
|
|
if (offs)
|
|
|
|
memset(offs, 0, sizeof(*offs));
|
2009-10-29 15:30:35 +08:00
|
|
|
|
2008-09-11 06:01:58 +08:00
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP) {
|
2012-10-11 03:39:50 +08:00
|
|
|
struct ieee80211_if_ap *ap = &sdata->u.ap;
|
|
|
|
|
2014-06-05 20:21:36 +08:00
|
|
|
beacon = rcu_dereference(ap->beacon);
|
2021-10-06 12:09:36 +08:00
|
|
|
if (!beacon)
|
2008-07-09 20:40:37 +08:00
|
|
|
goto out;
|
2021-10-06 12:09:36 +08:00
|
|
|
|
|
|
|
skb = ieee80211_beacon_get_ap(hw, vif, offs, is_template,
|
|
|
|
beacon, chanctx_conf);
|
2008-09-11 06:01:58 +08:00
|
|
|
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
|
2009-02-15 19:44:28 +08:00
|
|
|
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
|
2008-07-09 20:40:37 +08:00
|
|
|
struct ieee80211_hdr *hdr;
|
2008-02-23 22:17:10 +08:00
|
|
|
|
2014-06-05 20:21:36 +08:00
|
|
|
beacon = rcu_dereference(ifibss->presp);
|
|
|
|
if (!beacon)
|
2008-07-09 20:40:37 +08:00
|
|
|
goto out;
|
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
if (beacon->cntdwn_counter_offsets[0]) {
|
2014-05-09 19:11:50 +08:00
|
|
|
if (!is_template)
|
2020-08-11 16:01:04 +08:00
|
|
|
__ieee80211_beacon_update_cntdwn(beacon);
|
2013-08-28 19:41:31 +08:00
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
ieee80211_set_beacon_cntdwn(sdata, beacon);
|
2014-05-09 19:11:50 +08:00
|
|
|
}
|
2013-08-28 19:41:31 +08:00
|
|
|
|
2014-06-05 20:21:36 +08:00
|
|
|
skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
|
2013-12-14 20:54:53 +08:00
|
|
|
local->hw.extra_beacon_tailroom);
|
2008-07-09 20:40:37 +08:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
2013-03-08 03:54:29 +08:00
|
|
|
skb_reserve(skb, local->tx_headroom);
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:20 +08:00
|
|
|
skb_put_data(skb, beacon->head, beacon->head_len);
|
2008-07-09 20:40:37 +08:00
|
|
|
|
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
2008-07-16 09:44:13 +08:00
|
|
|
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
|
|
|
|
IEEE80211_STYPE_BEACON);
|
2021-10-06 12:09:36 +08:00
|
|
|
|
|
|
|
ieee80211_beacon_get_finish(hw, vif, offs, beacon, skb,
|
|
|
|
chanctx_conf, 0);
|
2008-02-23 22:17:19 +08:00
|
|
|
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
|
2012-04-01 02:31:32 +08:00
|
|
|
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
|
2008-09-11 06:01:49 +08:00
|
|
|
|
2014-06-05 20:21:36 +08:00
|
|
|
beacon = rcu_dereference(ifmsh->beacon);
|
|
|
|
if (!beacon)
|
2011-01-18 20:45:32 +08:00
|
|
|
goto out;
|
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
if (beacon->cntdwn_counter_offsets[0]) {
|
2014-05-09 19:11:50 +08:00
|
|
|
if (!is_template)
|
|
|
|
/* TODO: For mesh csa_counter is in TU, so
|
|
|
|
* decrementing it by one isn't correct, but
|
|
|
|
* for now we leave it consistent with overall
|
|
|
|
* mac80211's behavior.
|
|
|
|
*/
|
2020-08-11 16:01:04 +08:00
|
|
|
__ieee80211_beacon_update_cntdwn(beacon);
|
2014-05-09 19:11:50 +08:00
|
|
|
|
2020-08-11 16:01:04 +08:00
|
|
|
ieee80211_set_beacon_cntdwn(sdata, beacon);
|
2014-05-09 19:11:50 +08:00
|
|
|
}
|
2013-10-18 06:55:02 +08:00
|
|
|
|
2012-04-01 02:31:32 +08:00
|
|
|
if (ifmsh->sync_ops)
|
2016-12-08 09:15:51 +08:00
|
|
|
ifmsh->sync_ops->adjust_tsf(sdata, beacon);
|
2012-04-01 02:31:32 +08:00
|
|
|
|
2011-10-27 05:47:25 +08:00
|
|
|
skb = dev_alloc_skb(local->tx_headroom +
|
2014-06-05 20:21:36 +08:00
|
|
|
beacon->head_len +
|
2013-01-31 01:14:08 +08:00
|
|
|
256 + /* TIM IE */
|
2014-06-05 20:21:36 +08:00
|
|
|
beacon->tail_len +
|
2013-12-14 20:54:53 +08:00
|
|
|
local->hw.extra_beacon_tailroom);
|
2008-02-23 22:17:19 +08:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
2013-02-15 03:20:13 +08:00
|
|
|
skb_reserve(skb, local->tx_headroom);
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:20 +08:00
|
|
|
skb_put_data(skb, beacon->head, beacon->head_len);
|
2014-05-09 19:11:49 +08:00
|
|
|
ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
|
|
|
|
|
|
|
|
if (offs) {
|
2014-06-05 20:21:36 +08:00
|
|
|
offs->tim_offset = beacon->head_len;
|
|
|
|
offs->tim_length = skb->len - beacon->head_len;
|
2014-05-09 19:11:49 +08:00
|
|
|
}
|
|
|
|
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:20 +08:00
|
|
|
skb_put_data(skb, beacon->tail, beacon->tail_len);
|
2021-10-06 12:09:36 +08:00
|
|
|
ieee80211_beacon_get_finish(hw, vif, offs, beacon, skb,
|
|
|
|
chanctx_conf, 0);
|
2008-07-09 20:40:37 +08:00
|
|
|
} else {
|
|
|
|
WARN_ON(1);
|
2007-12-19 09:03:33 +08:00
|
|
|
goto out;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
out:
|
2007-12-19 09:03:33 +08:00
|
|
|
rcu_read_unlock();
|
2007-07-27 21:43:22 +08:00
|
|
|
return skb;
|
2014-05-09 19:11:49 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
struct sk_buff *
|
|
|
|
ieee80211_beacon_get_template(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_mutable_offsets *offs)
|
|
|
|
{
|
|
|
|
return __ieee80211_beacon_get(hw, vif, offs, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_beacon_get_template);
|
|
|
|
|
|
|
|
struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
u16 *tim_offset, u16 *tim_length)
|
|
|
|
{
|
|
|
|
struct ieee80211_mutable_offsets offs = {};
|
|
|
|
struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
|
2015-09-09 15:46:32 +08:00
|
|
|
struct sk_buff *copy;
|
|
|
|
struct ieee80211_supported_band *sband;
|
|
|
|
int shift;
|
|
|
|
|
|
|
|
if (!bcn)
|
|
|
|
return bcn;
|
2014-05-09 19:11:49 +08:00
|
|
|
|
|
|
|
if (tim_offset)
|
|
|
|
*tim_offset = offs.tim_offset;
|
|
|
|
|
|
|
|
if (tim_length)
|
|
|
|
*tim_length = offs.tim_length;
|
|
|
|
|
2015-09-09 15:46:32 +08:00
|
|
|
if (ieee80211_hw_check(hw, BEACON_TX_STATUS) ||
|
|
|
|
!hw_to_local(hw)->monitors)
|
|
|
|
return bcn;
|
|
|
|
|
|
|
|
/* send a copy to monitor interfaces */
|
|
|
|
copy = skb_copy(bcn, GFP_ATOMIC);
|
|
|
|
if (!copy)
|
|
|
|
return bcn;
|
|
|
|
|
|
|
|
shift = ieee80211_vif_get_shift(vif);
|
2017-04-27 15:15:38 +08:00
|
|
|
sband = ieee80211_get_sband(vif_to_sdata(vif));
|
|
|
|
if (!sband)
|
|
|
|
return bcn;
|
|
|
|
|
2019-07-14 23:44:15 +08:00
|
|
|
ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false,
|
|
|
|
NULL);
|
2015-09-09 15:46:32 +08:00
|
|
|
|
2014-05-09 19:11:49 +08:00
|
|
|
return bcn;
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
2009-10-29 15:30:35 +08:00
|
|
|
EXPORT_SYMBOL(ieee80211_beacon_get_tim);
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2011-11-10 17:28:57 +08:00
|
|
|
struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif)
|
|
|
|
{
|
|
|
|
struct ieee80211_if_ap *ap = NULL;
|
2012-08-06 19:26:16 +08:00
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
struct probe_resp *presp = NULL;
|
2011-11-10 17:28:57 +08:00
|
|
|
struct ieee80211_hdr *hdr;
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
|
|
|
|
if (sdata->vif.type != NL80211_IFTYPE_AP)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
ap = &sdata->u.ap;
|
|
|
|
presp = rcu_dereference(ap->probe_resp);
|
|
|
|
if (!presp)
|
|
|
|
goto out;
|
|
|
|
|
2012-08-06 19:26:16 +08:00
|
|
|
skb = dev_alloc_skb(presp->len);
|
2011-11-10 17:28:57 +08:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:20 +08:00
|
|
|
skb_put_data(skb, presp->data, presp->len);
|
2012-08-06 19:26:16 +08:00
|
|
|
|
2011-11-10 17:28:57 +08:00
|
|
|
hdr = (struct ieee80211_hdr *) skb->data;
|
|
|
|
memset(hdr->addr1, 0, sizeof(hdr->addr1));
|
|
|
|
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_proberesp_get);
|
|
|
|
|
2020-09-11 08:05:31 +08:00
|
|
|
struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
struct fils_discovery_data *tmpl = NULL;
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
|
|
|
|
if (sdata->vif.type != NL80211_IFTYPE_AP)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
tmpl = rcu_dereference(sdata->u.ap.fils_discovery);
|
|
|
|
if (!tmpl) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len);
|
|
|
|
if (skb) {
|
|
|
|
skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
|
|
|
|
skb_put_data(skb, tmpl->data, tmpl->len);
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_get_fils_discovery_tmpl);
|
|
|
|
|
2020-09-11 08:33:01 +08:00
|
|
|
struct sk_buff *
|
|
|
|
ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
struct unsol_bcast_probe_resp_data *tmpl = NULL;
|
|
|
|
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
|
|
|
|
|
|
if (sdata->vif.type != NL80211_IFTYPE_AP)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
tmpl = rcu_dereference(sdata->u.ap.unsol_bcast_probe_resp);
|
|
|
|
if (!tmpl) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len);
|
|
|
|
if (skb) {
|
|
|
|
skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
|
|
|
|
skb_put_data(skb, tmpl->data, tmpl->len);
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_get_unsol_bcast_probe_resp_tmpl);
|
|
|
|
|
2010-01-06 02:16:19 +08:00
|
|
|
struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct ieee80211_if_managed *ifmgd;
|
|
|
|
struct ieee80211_pspoll *pspoll;
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
sdata = vif_to_sdata(vif);
|
|
|
|
ifmgd = &sdata->u.mgd;
|
|
|
|
local = sdata->local;
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
|
2011-08-30 05:17:31 +08:00
|
|
|
if (!skb)
|
2010-01-06 02:16:19 +08:00
|
|
|
return NULL;
|
2011-08-30 05:17:31 +08:00
|
|
|
|
2010-01-06 02:16:19 +08:00
|
|
|
skb_reserve(skb, local->hw.extra_tx_headroom);
|
|
|
|
|
networking: convert many more places to skb_put_zero()
There were many places that my previous spatch didn't find,
as pointed out by yuan linyu in various patches.
The following spatch found many more and also removes the
now unnecessary casts:
@@
identifier p, p2;
expression len;
expression skb;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_zero(skb, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_zero(skb, len);
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, len);
|
-memset(p, 0, len);
)
@@
type t, t2;
identifier p, p2;
expression skb;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, sizeof(*p));
|
-memset(p, 0, sizeof(*p));
)
@@
expression skb, len;
@@
-memset(skb_put(skb, len), 0, len);
+skb_put_zero(skb, len);
Apply it to the tree (with one manual fixup to keep the
comment in vxlan.c, which spatch removed.)
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:19 +08:00
|
|
|
pspoll = skb_put_zero(skb, sizeof(*pspoll));
|
2010-01-06 02:16:19 +08:00
|
|
|
pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
|
|
|
|
IEEE80211_STYPE_PSPOLL);
|
2020-04-17 18:38:04 +08:00
|
|
|
pspoll->aid = cpu_to_le16(sdata->vif.bss_conf.aid);
|
2010-01-06 02:16:19 +08:00
|
|
|
|
|
|
|
/* aid in PS-Poll has its two MSBs each set to 1 */
|
|
|
|
pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
|
|
|
|
|
|
|
|
memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
|
|
|
|
memcpy(pspoll->ta, vif->addr, ETH_ALEN);
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_pspoll_get);
|
|
|
|
|
|
|
|
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
|
2017-11-21 21:46:08 +08:00
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
bool qos_ok)
|
2010-01-06 02:16:19 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_hdr_3addr *nullfunc;
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct ieee80211_if_managed *ifmgd;
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct sk_buff *skb;
|
2017-11-21 21:46:08 +08:00
|
|
|
bool qos = false;
|
2010-01-06 02:16:19 +08:00
|
|
|
|
|
|
|
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
sdata = vif_to_sdata(vif);
|
|
|
|
ifmgd = &sdata->u.mgd;
|
|
|
|
local = sdata->local;
|
|
|
|
|
2017-11-21 21:46:08 +08:00
|
|
|
if (qos_ok) {
|
|
|
|
struct sta_info *sta;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
sta = sta_info_get(sdata, ifmgd->bssid);
|
|
|
|
qos = sta && sta->sta.wme;
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
|
|
|
|
sizeof(*nullfunc) + 2);
|
2011-08-30 05:17:31 +08:00
|
|
|
if (!skb)
|
2010-01-06 02:16:19 +08:00
|
|
|
return NULL;
|
2011-08-30 05:17:31 +08:00
|
|
|
|
2010-01-06 02:16:19 +08:00
|
|
|
skb_reserve(skb, local->hw.extra_tx_headroom);
|
|
|
|
|
networking: convert many more places to skb_put_zero()
There were many places that my previous spatch didn't find,
as pointed out by yuan linyu in various patches.
The following spatch found many more and also removes the
now unnecessary casts:
@@
identifier p, p2;
expression len;
expression skb;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_zero(skb, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_zero(skb, len);
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, len);
|
-memset(p, 0, len);
)
@@
type t, t2;
identifier p, p2;
expression skb;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, sizeof(*p));
|
-memset(p, 0, sizeof(*p));
)
@@
expression skb, len;
@@
-memset(skb_put(skb, len), 0, len);
+skb_put_zero(skb, len);
Apply it to the tree (with one manual fixup to keep the
comment in vxlan.c, which spatch removed.)
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:19 +08:00
|
|
|
nullfunc = skb_put_zero(skb, sizeof(*nullfunc));
|
2010-01-06 02:16:19 +08:00
|
|
|
nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
|
|
|
|
IEEE80211_STYPE_NULLFUNC |
|
|
|
|
IEEE80211_FCTL_TODS);
|
2017-11-21 21:46:08 +08:00
|
|
|
if (qos) {
|
2018-11-09 18:16:46 +08:00
|
|
|
__le16 qoshdr = cpu_to_le16(7);
|
2017-11-21 21:46:08 +08:00
|
|
|
|
|
|
|
BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
|
|
|
|
IEEE80211_STYPE_NULLFUNC) !=
|
|
|
|
IEEE80211_STYPE_QOS_NULLFUNC);
|
|
|
|
nullfunc->frame_control |=
|
|
|
|
cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
|
|
|
|
skb->priority = 7;
|
|
|
|
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
|
2018-11-09 18:16:46 +08:00
|
|
|
skb_put_data(skb, &qoshdr, sizeof(qoshdr));
|
2017-11-21 21:46:08 +08:00
|
|
|
}
|
|
|
|
|
2010-01-06 02:16:19 +08:00
|
|
|
memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
|
|
|
|
memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
|
|
|
|
memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_nullfunc_get);
|
|
|
|
|
2010-01-06 02:16:38 +08:00
|
|
|
struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
|
2014-06-13 04:24:31 +08:00
|
|
|
const u8 *src_addr,
|
2010-01-06 02:16:38 +08:00
|
|
|
const u8 *ssid, size_t ssid_len,
|
2012-11-29 20:00:10 +08:00
|
|
|
size_t tailroom)
|
2010-01-06 02:16:38 +08:00
|
|
|
{
|
2014-06-13 04:24:31 +08:00
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
2010-01-06 02:16:38 +08:00
|
|
|
struct ieee80211_hdr_3addr *hdr;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
size_t ie_ssid_len;
|
|
|
|
u8 *pos;
|
|
|
|
|
|
|
|
ie_ssid_len = 2 + ssid_len;
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
|
2012-11-29 20:00:10 +08:00
|
|
|
ie_ssid_len + tailroom);
|
2011-08-30 05:17:31 +08:00
|
|
|
if (!skb)
|
2010-01-06 02:16:38 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
skb_reserve(skb, local->hw.extra_tx_headroom);
|
|
|
|
|
networking: convert many more places to skb_put_zero()
There were many places that my previous spatch didn't find,
as pointed out by yuan linyu in various patches.
The following spatch found many more and also removes the
now unnecessary casts:
@@
identifier p, p2;
expression len;
expression skb;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_zero(skb, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_zero(skb, len);
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, len);
|
-memset(p, 0, len);
)
@@
type t, t2;
identifier p, p2;
expression skb;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, sizeof(*p));
|
-memset(p, 0, sizeof(*p));
)
@@
expression skb, len;
@@
-memset(skb_put(skb, len), 0, len);
+skb_put_zero(skb, len);
Apply it to the tree (with one manual fixup to keep the
comment in vxlan.c, which spatch removed.)
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 20:29:19 +08:00
|
|
|
hdr = skb_put_zero(skb, sizeof(*hdr));
|
2010-01-06 02:16:38 +08:00
|
|
|
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
|
|
|
|
IEEE80211_STYPE_PROBE_REQ);
|
2012-07-13 22:23:07 +08:00
|
|
|
eth_broadcast_addr(hdr->addr1);
|
2014-06-13 04:24:31 +08:00
|
|
|
memcpy(hdr->addr2, src_addr, ETH_ALEN);
|
2012-07-13 22:23:07 +08:00
|
|
|
eth_broadcast_addr(hdr->addr3);
|
2010-01-06 02:16:38 +08:00
|
|
|
|
|
|
|
pos = skb_put(skb, ie_ssid_len);
|
|
|
|
*pos++ = WLAN_EID_SSID;
|
|
|
|
*pos++ = ssid_len;
|
2012-03-29 22:30:41 +08:00
|
|
|
if (ssid_len)
|
2010-01-06 02:16:38 +08:00
|
|
|
memcpy(pos, ssid, ssid_len);
|
|
|
|
pos += ssid_len;
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_probereq_get);
|
|
|
|
|
2007-12-19 08:31:26 +08:00
|
|
|
void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
2007-07-27 21:43:22 +08:00
|
|
|
const void *frame, size_t frame_len,
|
2008-05-15 18:55:29 +08:00
|
|
|
const struct ieee80211_tx_info *frame_txctl,
|
2007-07-27 21:43:22 +08:00
|
|
|
struct ieee80211_rts *rts)
|
|
|
|
{
|
|
|
|
const struct ieee80211_hdr *hdr = frame;
|
|
|
|
|
2008-06-23 07:45:27 +08:00
|
|
|
rts->frame_control =
|
|
|
|
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
|
2007-12-19 08:31:26 +08:00
|
|
|
rts->duration = ieee80211_rts_duration(hw, vif, frame_len,
|
|
|
|
frame_txctl);
|
2007-07-27 21:43:22 +08:00
|
|
|
memcpy(rts->ra, hdr->addr1, sizeof(rts->ra));
|
|
|
|
memcpy(rts->ta, hdr->addr2, sizeof(rts->ta));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_rts_get);
|
|
|
|
|
2007-12-19 08:31:26 +08:00
|
|
|
void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
2007-07-27 21:43:22 +08:00
|
|
|
const void *frame, size_t frame_len,
|
2008-05-15 18:55:29 +08:00
|
|
|
const struct ieee80211_tx_info *frame_txctl,
|
2007-07-27 21:43:22 +08:00
|
|
|
struct ieee80211_cts *cts)
|
|
|
|
{
|
|
|
|
const struct ieee80211_hdr *hdr = frame;
|
|
|
|
|
2008-06-23 07:45:27 +08:00
|
|
|
cts->frame_control =
|
|
|
|
cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
|
2007-12-19 08:31:26 +08:00
|
|
|
cts->duration = ieee80211_ctstoself_duration(hw, vif,
|
|
|
|
frame_len, frame_txctl);
|
2007-07-27 21:43:22 +08:00
|
|
|
memcpy(cts->ra, hdr->addr1, sizeof(cts->ra));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_ctstoself_get);
|
|
|
|
|
|
|
|
struct sk_buff *
|
2007-12-19 08:31:26 +08:00
|
|
|
ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_vif *vif)
|
2007-07-27 21:43:22 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
2008-05-27 22:50:51 +08:00
|
|
|
struct sk_buff *skb = NULL;
|
2008-02-25 23:27:43 +08:00
|
|
|
struct ieee80211_tx_data tx;
|
2007-07-27 21:43:22 +08:00
|
|
|
struct ieee80211_sub_if_data *sdata;
|
2012-10-11 03:39:50 +08:00
|
|
|
struct ps_data *ps;
|
2008-05-15 18:55:29 +08:00
|
|
|
struct ieee80211_tx_info *info;
|
2012-07-26 23:24:39 +08:00
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2007-12-19 08:31:26 +08:00
|
|
|
sdata = vif_to_sdata(vif);
|
2007-12-19 09:03:33 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2012-07-26 23:24:39 +08:00
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
2007-12-19 09:03:33 +08:00
|
|
|
|
2012-10-11 03:39:50 +08:00
|
|
|
if (!chanctx_conf)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP) {
|
|
|
|
struct beacon_data *beacon =
|
|
|
|
rcu_dereference(sdata->u.ap.beacon);
|
|
|
|
|
|
|
|
if (!beacon || !beacon->head)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ps = &sdata->u.ap.ps;
|
2013-01-31 01:14:08 +08:00
|
|
|
} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
|
|
|
|
ps = &sdata->u.mesh.ps;
|
2012-10-11 03:39:50 +08:00
|
|
|
} else {
|
2008-05-27 22:50:51 +08:00
|
|
|
goto out;
|
2012-10-11 03:39:50 +08:00
|
|
|
}
|
2007-12-19 09:03:33 +08:00
|
|
|
|
2012-10-11 03:39:50 +08:00
|
|
|
if (ps->dtim_count != 0 || !ps->dtim_bc_mc)
|
2008-05-27 22:50:51 +08:00
|
|
|
goto out; /* send buffered bc/mc only after DTIM beacon */
|
2008-05-15 18:55:29 +08:00
|
|
|
|
2007-07-27 21:43:22 +08:00
|
|
|
while (1) {
|
2012-10-11 03:39:50 +08:00
|
|
|
skb = skb_dequeue(&ps->bc_buf);
|
2007-07-27 21:43:22 +08:00
|
|
|
if (!skb)
|
2008-05-27 22:50:51 +08:00
|
|
|
goto out;
|
2007-07-27 21:43:22 +08:00
|
|
|
local->total_ps_buffered--;
|
|
|
|
|
2012-10-11 03:39:50 +08:00
|
|
|
if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) {
|
2007-07-27 21:43:22 +08:00
|
|
|
struct ieee80211_hdr *hdr =
|
|
|
|
(struct ieee80211_hdr *) skb->data;
|
|
|
|
/* more buffered multicast/broadcast frames ==> set
|
|
|
|
* MoreData flag in IEEE 802.11 header to inform PS
|
|
|
|
* STAs */
|
|
|
|
hdr->frame_control |=
|
|
|
|
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
|
|
|
|
}
|
|
|
|
|
2014-03-06 22:08:43 +08:00
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
2013-03-01 23:01:18 +08:00
|
|
|
sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
|
2015-03-20 21:18:27 +08:00
|
|
|
if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
|
2007-07-27 21:43:22 +08:00
|
|
|
break;
|
2016-08-02 17:13:41 +08:00
|
|
|
ieee80211_free_txskb(hw, skb);
|
2007-07-27 21:43:22 +08:00
|
|
|
}
|
2008-05-15 18:55:29 +08:00
|
|
|
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
|
2008-02-25 23:27:43 +08:00
|
|
|
tx.flags |= IEEE80211_TX_PS_BUFFERED;
|
2012-11-09 18:39:59 +08:00
|
|
|
info->band = chanctx_conf->def.chan->band;
|
2007-07-27 21:43:22 +08:00
|
|
|
|
2008-06-20 07:22:30 +08:00
|
|
|
if (invoke_tx_handlers(&tx))
|
2007-07-27 21:43:22 +08:00
|
|
|
skb = NULL;
|
2008-06-20 07:22:30 +08:00
|
|
|
out:
|
2008-02-25 23:27:46 +08:00
|
|
|
rcu_read_unlock();
|
2007-07-27 21:43:22 +08:00
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_get_buffered_bc);
|
2009-06-17 23:43:56 +08:00
|
|
|
|
2014-11-19 19:47:38 +08:00
|
|
|
int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid)
|
|
|
|
{
|
|
|
|
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
|
|
|
|
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
int ret;
|
|
|
|
u32 queues;
|
|
|
|
|
|
|
|
lockdep_assert_held(&local->sta_mtx);
|
|
|
|
|
|
|
|
/* only some cases are supported right now */
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WARN_ON(tid >= IEEE80211_NUM_UPS))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (sta->reserved_tid == tid) {
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sta->reserved_tid != IEEE80211_TID_UNRESERVED) {
|
|
|
|
sdata_err(sdata, "TID reservation already active\n");
|
|
|
|
ret = -EALREADY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ieee80211_stop_vif_queues(sdata->local, sdata,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
|
|
|
|
|
|
|
|
synchronize_net();
|
|
|
|
|
|
|
|
/* Tear down BA sessions so we stop aggregating on this TID */
|
2015-06-03 03:39:54 +08:00
|
|
|
if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) {
|
2014-11-19 19:47:38 +08:00
|
|
|
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
|
|
|
|
__ieee80211_stop_tx_ba_session(sta, tid,
|
|
|
|
AGG_STOP_LOCAL_REQUEST);
|
|
|
|
}
|
|
|
|
|
|
|
|
queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]);
|
2015-01-07 21:42:39 +08:00
|
|
|
__ieee80211_flush_queues(local, sdata, queues, false);
|
2014-11-19 19:47:38 +08:00
|
|
|
|
|
|
|
sta->reserved_tid = tid;
|
|
|
|
|
|
|
|
ieee80211_wake_vif_queues(local, sdata,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
|
|
|
|
|
2015-06-03 03:39:54 +08:00
|
|
|
if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION))
|
2014-11-19 19:47:38 +08:00
|
|
|
clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_reserve_tid);
|
|
|
|
|
|
|
|
void ieee80211_unreserve_tid(struct ieee80211_sta *pubsta, u8 tid)
|
|
|
|
{
|
|
|
|
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
|
|
|
|
struct ieee80211_sub_if_data *sdata = sta->sdata;
|
|
|
|
|
|
|
|
lockdep_assert_held(&sdata->local->sta_mtx);
|
|
|
|
|
|
|
|
/* only some cases are supported right now */
|
|
|
|
switch (sdata->vif.type) {
|
|
|
|
case NL80211_IFTYPE_STATION:
|
|
|
|
case NL80211_IFTYPE_AP:
|
|
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tid != sta->reserved_tid) {
|
|
|
|
sdata_err(sdata, "TID to unreserve (%d) isn't reserved\n", tid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sta->reserved_tid = IEEE80211_TID_UNRESERVED;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ieee80211_unreserve_tid);
|
|
|
|
|
2012-07-26 23:24:39 +08:00
|
|
|
void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, int tid,
|
2020-07-23 18:01:52 +08:00
|
|
|
enum nl80211_band band)
|
2009-06-17 23:43:56 +08:00
|
|
|
{
|
2017-01-24 23:42:10 +08:00
|
|
|
int ac = ieee80211_ac_from_tid(tid);
|
2012-04-03 22:28:50 +08:00
|
|
|
|
2016-03-03 09:16:56 +08:00
|
|
|
skb_reset_mac_header(skb);
|
2012-04-03 22:28:50 +08:00
|
|
|
skb_set_queue_mapping(skb, ac);
|
2011-12-15 17:18:34 +08:00
|
|
|
skb->priority = tid;
|
2010-01-06 01:00:58 +08:00
|
|
|
|
2013-02-13 22:39:57 +08:00
|
|
|
skb->dev = sdata->dev;
|
|
|
|
|
2009-06-18 23:25:11 +08:00
|
|
|
/*
|
|
|
|
* The other path calling ieee80211_xmit is from the tasklet,
|
|
|
|
* and while we can handle concurrent transmissions locking
|
|
|
|
* requirements are that we do not come into tx with bhs on.
|
|
|
|
*/
|
|
|
|
local_bh_disable();
|
2014-11-10 00:50:09 +08:00
|
|
|
IEEE80211_SKB_CB(skb)->band = band;
|
2020-07-23 18:01:52 +08:00
|
|
|
ieee80211_xmit(sdata, NULL, skb);
|
2009-06-18 23:25:11 +08:00
|
|
|
local_bh_enable();
|
2009-06-17 23:43:56 +08:00
|
|
|
}
|
2018-03-27 01:52:50 +08:00
|
|
|
|
|
|
|
int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
|
|
|
|
const u8 *buf, size_t len,
|
2020-05-08 22:42:00 +08:00
|
|
|
const u8 *dest, __be16 proto, bool unencrypted,
|
|
|
|
u64 *cookie)
|
2018-03-27 01:52:50 +08:00
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
2021-02-06 19:51:12 +08:00
|
|
|
struct sta_info *sta;
|
2018-03-27 01:52:50 +08:00
|
|
|
struct sk_buff *skb;
|
|
|
|
struct ethhdr *ehdr;
|
2020-03-26 22:53:34 +08:00
|
|
|
u32 ctrl_flags = 0;
|
2020-05-28 00:03:34 +08:00
|
|
|
u32 flags = 0;
|
2018-03-27 01:52:50 +08:00
|
|
|
|
|
|
|
/* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
|
|
|
|
* or Pre-Authentication
|
|
|
|
*/
|
|
|
|
if (proto != sdata->control_port_protocol &&
|
|
|
|
proto != cpu_to_be16(ETH_P_PREAUTH))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-03-26 22:53:34 +08:00
|
|
|
if (proto == sdata->control_port_protocol)
|
2020-06-17 16:26:36 +08:00
|
|
|
ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO |
|
|
|
|
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
|
2020-03-26 22:53:34 +08:00
|
|
|
|
2018-03-27 01:52:50 +08:00
|
|
|
if (unencrypted)
|
2020-05-28 00:03:34 +08:00
|
|
|
flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
|
|
|
|
|
|
|
if (cookie)
|
|
|
|
ctrl_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
|
|
|
|
|
2021-02-06 19:51:12 +08:00
|
|
|
flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX;
|
2018-03-27 01:52:50 +08:00
|
|
|
|
|
|
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
|
|
|
|
sizeof(struct ethhdr) + len);
|
|
|
|
if (!skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
skb_reserve(skb, local->hw.extra_tx_headroom + sizeof(struct ethhdr));
|
|
|
|
|
|
|
|
skb_put_data(skb, buf, len);
|
|
|
|
|
|
|
|
ehdr = skb_push(skb, sizeof(struct ethhdr));
|
|
|
|
memcpy(ehdr->h_dest, dest, ETH_ALEN);
|
2020-02-24 17:19:11 +08:00
|
|
|
memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN);
|
2018-03-27 01:52:50 +08:00
|
|
|
ehdr->h_proto = proto;
|
|
|
|
|
|
|
|
skb->dev = dev;
|
2021-02-06 19:51:12 +08:00
|
|
|
skb->protocol = proto;
|
2018-03-27 01:52:50 +08:00
|
|
|
skb_reset_network_header(skb);
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
2021-02-06 19:51:12 +08:00
|
|
|
/* update QoS header to prioritize control port frames if possible,
|
|
|
|
* priorization also happens for control port frames send over
|
|
|
|
* AF_PACKET
|
|
|
|
*/
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
if (ieee80211_lookup_ra_sta(sdata, skb, &sta) == 0 && !IS_ERR(sta)) {
|
|
|
|
u16 queue = __ieee80211_select_queue(sdata, sta, skb);
|
|
|
|
|
|
|
|
skb_set_queue_mapping(skb, queue);
|
|
|
|
skb_get_hash(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2020-05-28 00:03:34 +08:00
|
|
|
/* mutex lock is only needed for incrementing the cookie counter */
|
|
|
|
mutex_lock(&local->mtx);
|
|
|
|
|
2018-06-19 23:39:50 +08:00
|
|
|
local_bh_disable();
|
2020-05-28 00:03:34 +08:00
|
|
|
__ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags, cookie);
|
2018-06-19 23:39:50 +08:00
|
|
|
local_bh_enable();
|
2018-03-27 01:52:50 +08:00
|
|
|
|
2020-05-28 00:03:34 +08:00
|
|
|
mutex_unlock(&local->mtx);
|
|
|
|
|
2018-03-27 01:52:50 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2019-04-12 04:47:26 +08:00
|
|
|
|
|
|
|
int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev,
|
|
|
|
const u8 *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(local->hw.extra_tx_headroom + len +
|
|
|
|
30 + /* header size */
|
|
|
|
18); /* 11s header size */
|
|
|
|
if (!skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
skb_reserve(skb, local->hw.extra_tx_headroom);
|
|
|
|
skb_put_data(skb, buf, len);
|
|
|
|
|
|
|
|
skb->dev = dev;
|
|
|
|
skb->protocol = htons(ETH_P_802_3);
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
|
|
|
|
local_bh_disable();
|
|
|
|
__ieee80211_subif_start_xmit(skb, skb->dev, 0,
|
2020-05-28 00:03:34 +08:00
|
|
|
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP,
|
|
|
|
NULL);
|
2018-06-19 23:39:50 +08:00
|
|
|
local_bh_enable();
|
2018-03-27 01:52:50 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|