Lines Matching refs:wcid
65 struct mt76_wcid *wcid; in mt76_tx_status_unlock() local
67 wcid = rcu_dereference(dev->wcid[cb->wcid]); in mt76_tx_status_unlock()
68 if (wcid) { in mt76_tx_status_unlock()
69 status.sta = wcid_to_sta(wcid); in mt76_tx_status_unlock()
70 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { in mt76_tx_status_unlock()
71 rs.rate_idx = wcid->rate; in mt76_tx_status_unlock()
121 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, in mt76_tx_status_skb_add() argument
131 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx])) in mt76_tx_status_skb_add()
149 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST, in mt76_tx_status_skb_add()
156 cb->wcid = wcid->idx; in mt76_tx_status_skb_add()
159 if (list_empty(&wcid->list)) in mt76_tx_status_skb_add()
160 list_add_tail(&wcid->list, &dev->wcid_list); in mt76_tx_status_skb_add()
170 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, in mt76_tx_status_skb_get() argument
178 skb = idr_remove(&wcid->pktid, pktid); in mt76_tx_status_skb_get()
183 idr_for_each_entry(&wcid->pktid, skb, id) { in mt76_tx_status_skb_get()
198 idr_remove(&wcid->pktid, cb->pktid); in mt76_tx_status_skb_get()
204 if (idr_is_empty(&wcid->pktid)) in mt76_tx_status_skb_get()
205 list_del_init(&wcid->list); in mt76_tx_status_skb_get()
214 struct mt76_wcid *wcid, *tmp; in mt76_tx_status_check() local
218 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list) in mt76_tx_status_check()
219 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); in mt76_tx_status_check()
225 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid, in mt76_tx_check_non_aql() argument
231 if (!wcid || info->tx_time_est) in mt76_tx_check_non_aql()
234 pending = atomic_dec_return(&wcid->non_aql_packets); in mt76_tx_check_non_aql()
236 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); in mt76_tx_check_non_aql()
247 struct mt76_wcid *wcid = NULL; in __mt76_tx_complete_skb() local
253 if (wcid_idx < ARRAY_SIZE(dev->wcid)) in __mt76_tx_complete_skb()
254 wcid = rcu_dereference(dev->wcid[wcid_idx]); in __mt76_tx_complete_skb()
256 mt76_tx_check_non_aql(dev, wcid, skb); in __mt76_tx_complete_skb()
276 status.sta = wcid_to_sta(wcid); in __mt76_tx_complete_skb()
277 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { in __mt76_tx_complete_skb()
278 rs.rate_idx = wcid->rate; in __mt76_tx_complete_skb()
300 struct mt76_wcid *wcid, struct ieee80211_sta *sta, in __mt76_tx_queue_skb() argument
311 idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta); in __mt76_tx_queue_skb()
315 wcid = (struct mt76_wcid *)sta->drv_priv; in __mt76_tx_queue_skb()
316 q->entry[idx].wcid = wcid->idx; in __mt76_tx_queue_skb()
321 pending = atomic_inc_return(&wcid->non_aql_packets); in __mt76_tx_queue_skb()
330 struct mt76_wcid *wcid, struct sk_buff *skb) in mt76_tx() argument
342 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET)) in mt76_tx()
348 spin_lock_bh(&wcid->tx_pending.lock); in mt76_tx()
349 __skb_queue_tail(&wcid->tx_pending, skb); in mt76_tx()
350 spin_unlock_bh(&wcid->tx_pending.lock); in mt76_tx()
353 if (list_empty(&wcid->tx_list)) in mt76_tx()
354 list_add_tail(&wcid->tx_list, &phy->tx_list); in mt76_tx()
382 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; in mt76_queue_ps_skb() local
391 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL); in mt76_queue_ps_skb()
448 struct mt76_txq *mtxq, struct mt76_wcid *wcid) in mt76_txq_send_burst() argument
459 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) in mt76_txq_send_burst()
462 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) in mt76_txq_send_burst()
470 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) in mt76_txq_send_burst()
475 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); in mt76_txq_send_burst()
492 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) in mt76_txq_send_burst()
497 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); in mt76_txq_send_burst()
519 struct mt76_wcid *wcid; in mt76_txq_schedule_list() local
538 wcid = rcu_dereference(dev->wcid[mtxq->wcid]); in mt76_txq_schedule_list()
539 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) in mt76_txq_schedule_list()
554 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid); in mt76_txq_schedule_list()
589 mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid) in mt76_txq_schedule_pending_wcid() argument
597 spin_lock(&wcid->tx_pending.lock); in mt76_txq_schedule_pending_wcid()
598 while ((skb = skb_peek(&wcid->tx_pending)) != NULL) { in mt76_txq_schedule_pending_wcid()
615 __skb_unlink(skb, &wcid->tx_pending); in mt76_txq_schedule_pending_wcid()
616 spin_unlock(&wcid->tx_pending.lock); in mt76_txq_schedule_pending_wcid()
618 sta = wcid_to_sta(wcid); in mt76_txq_schedule_pending_wcid()
620 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); in mt76_txq_schedule_pending_wcid()
624 spin_lock(&wcid->tx_pending.lock); in mt76_txq_schedule_pending_wcid()
626 spin_unlock(&wcid->tx_pending.lock); in mt76_txq_schedule_pending_wcid()
641 struct mt76_wcid *wcid = NULL; in mt76_txq_schedule_pending() local
644 wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list); in mt76_txq_schedule_pending()
645 list_del_init(&wcid->tx_list); in mt76_txq_schedule_pending()
648 ret = mt76_txq_schedule_pending_wcid(phy, wcid); in mt76_txq_schedule_pending()
652 if (list_empty(&wcid->tx_list)) in mt76_txq_schedule_pending()
653 list_add_tail(&wcid->tx_list, &phy->tx_list); in mt76_txq_schedule_pending()