Skip to content

Commit

Permalink
mt76: reduce q->lock hold time
Browse files Browse the repository at this point in the history
Instead of holding it for the duration of an entire station schedule run,
which can block out competing tasks for a significant amount of time,
only hold it for scheduling one batch of packets for one station.
Improves responsiveness under load

Signed-off-by: Felix Fietkau <[email protected]>
  • Loading branch information
nbd168 committed Jan 24, 2021
1 parent 8c796a3 commit 4eb5caa
Showing 1 changed file with 7 additions and 8 deletions.
15 changes: 7 additions & 8 deletions tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,6 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
struct mt76_wcid *wcid;
int ret = 0;

spin_lock_bh(&q->lock);
while (1) {
if (test_bit(MT76_STATE_PM, &phy->state) ||
test_bit(MT76_RESET, &phy->state)) {
Expand All @@ -470,14 +469,9 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)

if (dev->queue_ops->tx_cleanup &&
q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
spin_unlock_bh(&q->lock);
dev->queue_ops->tx_cleanup(dev, q, false);
spin_lock_bh(&q->lock);
}

if (mt76_txq_stopped(q))
break;

txq = ieee80211_next_txq(phy->hw, qid);
if (!txq)
break;
Expand All @@ -487,6 +481,8 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
continue;

spin_lock_bh(&q->lock);

if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
Expand All @@ -500,10 +496,13 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
spin_lock_bh(&q->lock);
}

ret += mt76_txq_send_burst(phy, q, mtxq);
if (!mt76_txq_stopped(q))
ret += mt76_txq_send_burst(phy, q, mtxq);

spin_unlock_bh(&q->lock);

ieee80211_return_txq(phy->hw, txq, false);
}
spin_unlock_bh(&q->lock);

return ret;
}
Expand Down

0 comments on commit 4eb5caa

Please sign in to comment.