Home
last modified time | relevance | path

Searched refs:fq (Results 1 – 25 of 69) sorted by relevance

123

/openbmc/linux/include/net/
H A Dfq_impl.h16 __fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets, in __fq_adjust_removal() argument
25 fq->backlog -= packets; in __fq_adjust_removal()
26 fq->memory_usage -= truesize; in __fq_adjust_removal()
36 idx = flow - fq->flows; in __fq_adjust_removal()
37 __clear_bit(idx, fq->flows_bitmap); in __fq_adjust_removal()
40 static void fq_adjust_removal(struct fq *fq, in fq_adjust_removal() argument
44 __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize); in fq_adjust_removal()
47 static struct sk_buff *fq_flow_dequeue(struct fq *fq, in fq_flow_dequeue() argument
52 lockdep_assert_held(&fq->lock); in fq_flow_dequeue()
58 fq_adjust_removal(fq, flow, skb); in fq_flow_dequeue()
[all …]
H A Dipv6_frag.h34 struct frag_queue *fq = container_of(q, struct frag_queue, q); in ip6frag_init() local
38 fq->ecn = 0; in ip6frag_init()
49 const struct inet_frag_queue *fq = data; in ip6frag_obj_hashfn() local
51 return jhash2((const u32 *)&fq->key.v6, in ip6frag_obj_hashfn()
59 const struct inet_frag_queue *fq = ptr; in ip6frag_obj_cmpfn() local
61 return !!memcmp(&fq->key, key, sizeof(*key)); in ip6frag_obj_cmpfn()
65 ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) in ip6frag_expire_frag_queue() argument
72 if (READ_ONCE(fq->q.fqdir->dead)) in ip6frag_expire_frag_queue()
74 spin_lock(&fq->q.lock); in ip6frag_expire_frag_queue()
76 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6frag_expire_frag_queue()
[all …]
H A Dfq.h68 struct fq { struct
85 typedef struct sk_buff *fq_tin_dequeue_t(struct fq *, argument
89 typedef void fq_skb_free_t(struct fq *,
95 typedef bool fq_skb_filter_t(struct fq *,
/openbmc/linux/block/
H A Dblk-flush.c95 struct blk_flush_queue *fq, blk_opf_t flags);
100 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; in blk_get_flush_queue()
164 struct blk_flush_queue *fq, in blk_flush_complete_seq() argument
168 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq()
185 fq->flush_pending_since = jiffies; in blk_flush_complete_seq()
190 fq->flush_data_in_flight++; in blk_flush_complete_seq()
213 blk_kick_flush(q, fq, cmd_flags); in blk_flush_complete_seq()
223 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); in flush_end_io() local
226 spin_lock_irqsave(&fq->mq_flush_lock, flags); in flush_end_io()
229 fq->rq_status = error; in flush_end_io()
[all …]
/openbmc/linux/net/ipv6/netfilter/
H A Dnf_conntrack_reasm.c126 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
137 struct frag_queue *fq; in nf_ct_frag6_expire() local
139 fq = container_of(frag, struct frag_queue, q); in nf_ct_frag6_expire()
141 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in nf_ct_frag6_expire()
170 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, in nf_ct_frag6_queue() argument
179 if (fq->q.flags & INET_FRAG_COMPLETE) { in nf_ct_frag6_queue()
209 if (end < fq->q.len || in nf_ct_frag6_queue()
210 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { in nf_ct_frag6_queue()
214 fq->q.flags |= INET_FRAG_LAST_IN; in nf_ct_frag6_queue()
215 fq->q.len = end; in nf_ct_frag6_queue()
[all …]
/openbmc/linux/net/ieee802154/6lowpan/
H A Dreassembly.c33 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
47 struct frag_queue *fq; in lowpan_frag_expire() local
49 fq = container_of(frag, struct frag_queue, q); in lowpan_frag_expire()
51 spin_lock(&fq->q.lock); in lowpan_frag_expire()
53 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_expire()
56 inet_frag_kill(&fq->q); in lowpan_frag_expire()
58 spin_unlock(&fq->q.lock); in lowpan_frag_expire()
59 inet_frag_put(&fq->q); in lowpan_frag_expire()
84 static int lowpan_frag_queue(struct lowpan_frag_queue *fq, in lowpan_frag_queue() argument
97 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_queue()
[all …]
/openbmc/linux/net/ipv6/
H A Dreassembly.c70 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
76 struct frag_queue *fq; in ip6_frag_expire() local
78 fq = container_of(frag, struct frag_queue, q); in ip6_frag_expire()
80 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in ip6_frag_expire()
106 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, in ip6_frag_queue() argument
119 if (fq->q.flags & INET_FRAG_COMPLETE) { in ip6_frag_queue()
151 if (end < fq->q.len || in ip6_frag_queue()
152 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in ip6_frag_queue()
154 fq->q.flags |= INET_FRAG_LAST_IN; in ip6_frag_queue()
155 fq->q.len = end; in ip6_frag_queue()
[all …]
/openbmc/linux/drivers/soc/fsl/qbman/
H A Dqman.c268 struct qm_mcc_fq fq; member
957 static inline void fq_set(struct qman_fq *fq, u32 mask) in fq_set() argument
959 fq->flags |= mask; in fq_set()
962 static inline void fq_clear(struct qman_fq *fq, u32 mask) in fq_clear() argument
964 fq->flags &= ~mask; in fq_clear()
967 static inline int fq_isset(struct qman_fq *fq, u32 mask) in fq_isset() argument
969 return fq->flags & mask; in fq_isset()
972 static inline int fq_isclear(struct qman_fq *fq, u32 mask) in fq_isclear() argument
974 return !(fq->flags & mask); in fq_isclear()
1121 struct qman_fq *fq; in idx_to_fq() local
[all …]
H A Dqman_test_api.c106 static int do_enqueues(struct qman_fq *fq) in do_enqueues() argument
112 if (qman_enqueue(fq, &fd)) { in do_enqueues()
126 struct qman_fq *fq = &fq_base; in qman_test_api() local
133 err = qman_create_fq(0, FQ_FLAGS, fq); in qman_test_api()
138 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); in qman_test_api()
144 err = do_enqueues(fq); in qman_test_api()
149 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api()
154 err = do_enqueues(fq); in qman_test_api()
159 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api()
167 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api()
[all …]
/openbmc/linux/drivers/crypto/caam/
H A Dqi.c142 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, in caam_fq_ern_cb() argument
225 static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) in empty_retired_fq() argument
229 ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT | in empty_retired_fq()
234 dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); in empty_retired_fq()
243 } while (fq->flags & QMAN_FQ_STATE_NE); in empty_retired_fq()
248 static int kill_fq(struct device *qidev, struct qman_fq *fq) in kill_fq() argument
253 ret = qman_retire_fq(fq, &flags); in kill_fq()
267 } while (fq->state != qman_fq_state_retired); in kill_fq()
269 WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); in kill_fq()
270 WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); in kill_fq()
[all …]
/openbmc/linux/drivers/net/ethernet/freescale/dpaa/
H A Ddpaa_eth_trace.h32 struct qman_fq *fq,
36 TP_ARGS(netdev, fq, fd),
54 __entry->fqid = fq->fqid;
78 struct qman_fq *fq,
81 TP_ARGS(netdev, fq, fd)
88 struct qman_fq *fq,
91 TP_ARGS(netdev, fq, fd)
98 struct qman_fq *fq,
101 TP_ARGS(netdev, fq, fd)
H A Ddpaa_eth_sysfs.c33 struct dpaa_fq *fq; in dpaa_eth_show_fqids() local
40 list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { in dpaa_eth_show_fqids()
41 switch (fq->fq_type) { in dpaa_eth_show_fqids()
67 if (prev && (abs(fq->fqid - prev->fqid) != 1 || in dpaa_eth_show_fqids()
78 if (prev && abs(fq->fqid - prev->fqid) == 1 && in dpaa_eth_show_fqids()
80 last_fqid = fq->fqid; in dpaa_eth_show_fqids()
82 first_fqid = fq->fqid; in dpaa_eth_show_fqids()
83 last_fqid = fq->fqid; in dpaa_eth_show_fqids()
86 prev = fq; in dpaa_eth_show_fqids()
H A Ddpaa_eth.c636 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) in dpaa_assign_wq() argument
638 switch (fq->fq_type) { in dpaa_assign_wq()
641 fq->wq = 1; in dpaa_assign_wq()
645 fq->wq = 5; in dpaa_assign_wq()
649 fq->wq = 6; in dpaa_assign_wq()
655 fq->wq = 6; in dpaa_assign_wq()
659 fq->wq = 2; in dpaa_assign_wq()
663 fq->wq = 1; in dpaa_assign_wq()
667 fq->wq = 0; in dpaa_assign_wq()
676 fq->fq_type, fq->fqid); in dpaa_assign_wq()
[all …]
/openbmc/linux/drivers/net/ethernet/freescale/dpaa2/
H A Ddpaa2-eth-debugfs.c47 static char *fq_type_to_str(struct dpaa2_eth_fq *fq) in fq_type_to_str() argument
49 switch (fq->type) { in fq_type_to_str()
62 struct dpaa2_eth_fq *fq; in dpaa2_dbg_fqs_show() local
71 fq = &priv->fq[i]; in dpaa2_dbg_fqs_show()
72 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); in dpaa2_dbg_fqs_show()
77 if (!fq->stats.frames && !fcnt) in dpaa2_dbg_fqs_show()
81 fq->fqid, in dpaa2_dbg_fqs_show()
82 fq->target_cpu, in dpaa2_dbg_fqs_show()
83 fq->tc, in dpaa2_dbg_fqs_show()
84 fq_type_to_str(fq), in dpaa2_dbg_fqs_show()
[all …]
H A Ddpaa2-eth.c335 struct dpaa2_eth_fq *fq, in dpaa2_eth_xdp_flush() argument
350 err = priv->enqueue(priv, fq, &fds[total_enqueued], in dpaa2_eth_xdp_flush()
365 struct dpaa2_eth_fq *fq) in dpaa2_eth_xdp_tx_flush() argument
374 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds); in dpaa2_eth_xdp_tx_flush()
378 fds = fq->xdp_tx_fds.fds; in dpaa2_eth_xdp_tx_flush()
383 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { in dpaa2_eth_xdp_tx_flush()
388 fq->xdp_tx_fds.num = 0; in dpaa2_eth_xdp_tx_flush()
398 struct dpaa2_eth_fq *fq; in dpaa2_eth_xdp_enqueue() local
415 fq = &priv->fq[queue_id]; in dpaa2_eth_xdp_enqueue()
416 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++]; in dpaa2_eth_xdp_enqueue()
[all …]
H A Ddpaa2-xsk.c17 struct dpaa2_eth_fq *fq; in dpaa2_eth_setup_consume_func() local
21 fq = &priv->fq[i]; in dpaa2_eth_setup_consume_func()
23 if (fq->type != type) in dpaa2_eth_setup_consume_func()
25 if (fq->channel != ch) in dpaa2_eth_setup_consume_func()
28 fq->consume = consume; in dpaa2_eth_setup_consume_func()
106 struct dpaa2_eth_fq *fq) in dpaa2_xsk_rx() argument
127 xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); in dpaa2_xsk_rx()
143 dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); in dpaa2_xsk_rx()
399 struct dpaa2_eth_fq *fq; in dpaa2_xsk_tx() local
408 fq = &priv->fq[ch->nctx.desired_cpu]; in dpaa2_xsk_tx()
[all …]
H A Ddpaa2-switch.c664 napi_enable(&ethsw->fq[i].napi); in dpaa2_switch_enable_ctrl_if_napi()
680 napi_disable(&ethsw->fq[i].napi); in dpaa2_switch_disable_ctrl_if_napi()
2393 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, in dpaa2_switch_tx_conf() argument
2396 dpaa2_switch_free_fd(fq->ethsw, fd); in dpaa2_switch_tx_conf()
2399 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, in dpaa2_switch_rx() argument
2402 struct ethsw_core *ethsw = fq->ethsw; in dpaa2_switch_rx()
2486 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; in dpaa2_switch_setup_fqs()
2487 ethsw->fq[i].ethsw = ethsw; in dpaa2_switch_setup_fqs()
2488 ethsw->fq[i++].type = DPSW_QUEUE_RX; in dpaa2_switch_setup_fqs()
2490 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; in dpaa2_switch_setup_fqs()
[all …]
H A Ddpaa2-eth.h455 struct dpaa2_eth_fq *fq);
553 struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; member
555 struct dpaa2_eth_fq *fq,
823 struct dpaa2_eth_fq *fq,
830 struct dpaa2_eth_fq *fq);
851 struct dpaa2_eth_fq *fq,
/openbmc/linux/net/ipv4/
H A Dinet_fragment.c133 struct inet_frag_queue *fq = ptr; in inet_frags_free_cb() local
136 count = del_timer_sync(&fq->timer) ? 1 : 0; in inet_frags_free_cb()
138 spin_lock_bh(&fq->lock); in inet_frags_free_cb()
139 fq->flags |= INET_FRAG_DROP; in inet_frags_free_cb()
140 if (!(fq->flags & INET_FRAG_COMPLETE)) { in inet_frags_free_cb()
141 fq->flags |= INET_FRAG_COMPLETE; in inet_frags_free_cb()
143 } else if (fq->flags & INET_FRAG_HASH_DEAD) { in inet_frags_free_cb()
146 spin_unlock_bh(&fq->lock); in inet_frags_free_cb()
148 if (refcount_sub_and_test(count, &fq->refcnt)) in inet_frags_free_cb()
149 inet_frag_destroy(fq); in inet_frags_free_cb()
[all …]
/openbmc/linux/drivers/iommu/
H A Ddma-iommu.c54 struct iova_fq __percpu *fq; /* Flush queue */ member
108 #define fq_ring_for_each(i, fq) \ argument
109 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
111 static inline bool fq_full(struct iova_fq *fq) in fq_full() argument
113 assert_spin_locked(&fq->lock); in fq_full()
114 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); in fq_full()
117 static inline unsigned int fq_ring_add(struct iova_fq *fq) in fq_ring_add() argument
119 unsigned int idx = fq->tail; in fq_ring_add()
121 assert_spin_locked(&fq->lock); in fq_ring_add()
123 fq->tail = (idx + 1) % IOVA_FQ_SIZE; in fq_ring_add()
[all …]
/openbmc/linux/net/mac80211/
H A Ddebugfs.c81 struct fq *fq = &local->fq; in aqm_read() local
85 spin_lock_bh(&local->fq.lock); in aqm_read()
99 fq->flows_cnt, in aqm_read()
100 fq->backlog, in aqm_read()
101 fq->overmemory, in aqm_read()
102 fq->overlimit, in aqm_read()
103 fq->collisions, in aqm_read()
104 fq->memory_usage, in aqm_read()
105 fq->memory_limit, in aqm_read()
106 fq->limit, in aqm_read()
[all …]
H A Dtx.c1370 struct fq *fq; in codel_dequeue_func() local
1375 fq = &local->fq; in codel_dequeue_func()
1380 flow = &fq->flows[cvars - local->cvars]; in codel_dequeue_func()
1382 return fq_flow_dequeue(fq, flow); in codel_dequeue_func()
1399 static struct sk_buff *fq_tin_dequeue_func(struct fq *fq, in fq_tin_dequeue_func() argument
1409 local = container_of(fq, struct ieee80211_local, fq); in fq_tin_dequeue_func()
1424 cvars = &local->cvars[flow - fq->flows]; in fq_tin_dequeue_func()
1437 static void fq_skb_free_func(struct fq *fq, in fq_skb_free_func() argument
1444 local = container_of(fq, struct ieee80211_local, fq); in fq_skb_free_func()
1452 struct fq *fq = &local->fq; in ieee80211_txq_enqueue() local
[all …]
/openbmc/linux/net/xdp/
H A Dxsk_buff_pool.c95 pool->fq = xs->fq_tmp; in xp_create_and_assign_umem()
235 if (!pool->fq || !pool->cq) in xp_assign_dev_shared()
265 if (pool->fq) { in xp_release_deferred()
266 xskq_destroy(pool->fq); in xp_release_deferred()
267 pool->fq = NULL; in xp_release_deferred()
498 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { in __xp_alloc()
499 pool->fq->queue_empty_descs++; in __xp_alloc()
506 pool->fq->invalid_descs++; in __xp_alloc()
507 xskq_cons_release(pool->fq); in __xp_alloc()
522 xskq_cons_release(pool->fq); in __xp_alloc()
[all …]
/openbmc/linux/include/soc/fsl/
H A Dqman.h300 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ member
691 struct qman_fq *fq,
699 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
969 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
979 void qman_destroy_fq(struct qman_fq *fq);
985 u32 qman_fq_fqid(struct qman_fq *fq);
1021 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
1030 int qman_schedule_fq(struct qman_fq *fq);
1050 int qman_retire_fq(struct qman_fq *fq, u32 *flags);
1059 int qman_oos_fq(struct qman_fq *fq);
[all …]
/openbmc/qemu/net/
H A Daf-xdp.c39 struct xsk_ring_prod fq; member
204 if (!n || !xsk_ring_prod__reserve(&s->fq, n, &idx)) { in af_xdp_fq_refill()
209 *xsk_ring_prod__fill_addr(&s->fq, idx++) = s->pool[--s->n_pool]; in af_xdp_fq_refill()
211 xsk_ring_prod__submit(&s->fq, n); in af_xdp_fq_refill()
213 if (xsk_ring_prod__needs_wakeup(&s->fq)) { in af_xdp_fq_refill()
310 &s->fq, &s->cq, &config); in af_xdp_umem_create()
313 &s->fq, &s->cq, &config); in af_xdp_umem_create()

123