mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 13:13:57 +08:00
wl12xx: Change TX queue to be per AC
With the current single-queue implementation traffic priorization is not working correctly - when using multiple BE streams and one, say VI stream, the VI stream will share bandwidth almost equally with the BE streams. To fix the issue, implement per AC queues, which are emptied in priority order to the firmware. To keep it relatively simple, maintain a global buffer count and global queue stop/wake instead of per-AC. With these changes, priorization appears to work just fine. Signed-off-by: Juuso Oikarinen <juuso.oikarinen@nokia.com> Signed-off-by: Luciano Coelho <luciano.coelho@nokia.com>
This commit is contained in:
parent
17c1755c24
commit
6742f554db
@ -225,7 +225,7 @@ static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
|
||||
char buf[20];
|
||||
int res;
|
||||
|
||||
queue_len = skb_queue_len(&wl->tx_queue);
|
||||
queue_len = wl->tx_queue_count;
|
||||
|
||||
res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
|
||||
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
|
||||
|
@ -570,7 +570,7 @@ static void wl1271_irq_work(struct work_struct *work)
|
||||
|
||||
/* Check if any tx blocks were freed */
|
||||
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
|
||||
!skb_queue_empty(&wl->tx_queue)) {
|
||||
wl->tx_queue_count) {
|
||||
/*
|
||||
* In order to avoid starvation of the TX path,
|
||||
* call the work function directly.
|
||||
@ -891,6 +891,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_sta *sta = txinfo->control.sta;
|
||||
unsigned long flags;
|
||||
int q;
|
||||
|
||||
/*
|
||||
* peek into the rates configured in the STA entry.
|
||||
@ -918,10 +919,12 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
|
||||
}
|
||||
#endif
|
||||
wl->tx_queue_count++;
|
||||
spin_unlock_irqrestore(&wl->wl_lock, flags);
|
||||
|
||||
/* queue the packet */
|
||||
skb_queue_tail(&wl->tx_queue, skb);
|
||||
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
|
||||
skb_queue_tail(&wl->tx_queue[q], skb);
|
||||
|
||||
/*
|
||||
* The chip specific setup must run before the first TX packet -
|
||||
@ -935,7 +938,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
* The workqueue is slow to process the tx_queue and we need stop
|
||||
* the queue here, otherwise the queue will get too long.
|
||||
*/
|
||||
if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
|
||||
if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
|
||||
wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
|
||||
|
||||
spin_lock_irqsave(&wl->wl_lock, flags);
|
||||
@ -2719,7 +2722,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
|
||||
wl->hw = hw;
|
||||
wl->plat_dev = plat_dev;
|
||||
|
||||
skb_queue_head_init(&wl->tx_queue);
|
||||
for (i = 0; i < NUM_TX_QUEUES; i++)
|
||||
skb_queue_head_init(&wl->tx_queue[i]);
|
||||
|
||||
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
|
||||
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
|
||||
|
@ -125,7 +125,6 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
|
||||
/* queue (we use same identifiers for tid's and ac's */
|
||||
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
|
||||
desc->tid = ac;
|
||||
|
||||
desc->aid = TX_HW_DEFAULT_AID;
|
||||
desc->reserved = 0;
|
||||
|
||||
@ -228,7 +227,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
|
||||
unsigned long flags;
|
||||
|
||||
if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
|
||||
skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
|
||||
wl->tx_queue_count <= WL1271_TX_QUEUE_LOW_WATERMARK) {
|
||||
/* firmware buffer has space, restart queues */
|
||||
spin_lock_irqsave(&wl->wl_lock, flags);
|
||||
ieee80211_wake_queues(wl->hw);
|
||||
@ -237,6 +236,43 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
|
||||
}
|
||||
}
|
||||
|
||||
static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VO]);
|
||||
if (skb)
|
||||
goto out;
|
||||
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VI]);
|
||||
if (skb)
|
||||
goto out;
|
||||
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BE]);
|
||||
if (skb)
|
||||
goto out;
|
||||
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BK]);
|
||||
|
||||
out:
|
||||
if (skb) {
|
||||
spin_lock_irqsave(&wl->wl_lock, flags);
|
||||
wl->tx_queue_count--;
|
||||
spin_unlock_irqrestore(&wl->wl_lock, flags);
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
|
||||
{
|
||||
unsigned long flags;
|
||||
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
|
||||
|
||||
skb_queue_head(&wl->tx_queue[q], skb);
|
||||
spin_lock_irqsave(&wl->wl_lock, flags);
|
||||
wl->tx_queue_count++;
|
||||
spin_unlock_irqrestore(&wl->wl_lock, flags);
|
||||
}
|
||||
|
||||
void wl1271_tx_work_locked(struct wl1271 *wl)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@ -270,7 +306,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
|
||||
wl1271_acx_rate_policies(wl);
|
||||
}
|
||||
|
||||
while ((skb = skb_dequeue(&wl->tx_queue))) {
|
||||
while ((skb = wl1271_skb_dequeue(wl))) {
|
||||
if (!woken_up) {
|
||||
ret = wl1271_ps_elp_wakeup(wl, false);
|
||||
if (ret < 0)
|
||||
@ -284,9 +320,9 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
|
||||
* Aggregation buffer is full.
|
||||
* Flush buffer and try again.
|
||||
*/
|
||||
skb_queue_head(&wl->tx_queue, skb);
|
||||
wl1271_skb_queue_head(wl, skb);
|
||||
wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
|
||||
buf_offset, true);
|
||||
buf_offset, true);
|
||||
sent_packets = true;
|
||||
buf_offset = 0;
|
||||
continue;
|
||||
@ -295,7 +331,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
|
||||
* Firmware buffer is full.
|
||||
* Queue back last skb, and stop aggregating.
|
||||
*/
|
||||
skb_queue_head(&wl->tx_queue, skb);
|
||||
wl1271_skb_queue_head(wl, skb);
|
||||
/* No work left, avoid scheduling redundant tx work */
|
||||
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
|
||||
goto out_ack;
|
||||
@ -440,10 +476,13 @@ void wl1271_tx_reset(struct wl1271 *wl)
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* TX failure */
|
||||
while ((skb = skb_dequeue(&wl->tx_queue))) {
|
||||
wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
|
||||
ieee80211_tx_status(wl->hw, skb);
|
||||
for (i = 0; i < NUM_TX_QUEUES; i++) {
|
||||
while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
|
||||
wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
|
||||
ieee80211_tx_status(wl->hw, skb);
|
||||
}
|
||||
}
|
||||
wl->tx_queue_count = 0;
|
||||
|
||||
/*
|
||||
* Make sure the driver is at a consistent state, in case this
|
||||
@ -472,8 +511,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
|
||||
mutex_lock(&wl->mutex);
|
||||
wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
|
||||
wl->tx_frames_cnt);
|
||||
if ((wl->tx_frames_cnt == 0) &&
|
||||
skb_queue_empty(&wl->tx_queue)) {
|
||||
if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
|
||||
mutex_unlock(&wl->mutex);
|
||||
return;
|
||||
}
|
||||
|
@ -292,7 +292,8 @@ struct wl1271 {
|
||||
int session_counter;
|
||||
|
||||
/* Frames scheduled for transmission, not handled yet */
|
||||
struct sk_buff_head tx_queue;
|
||||
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
|
||||
int tx_queue_count;
|
||||
|
||||
struct work_struct tx_work;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user