Home
last modified time | relevance | path

Searched refs:q (Results 26 – 50 of 1910) sorted by relevance

12345678910>>...77

/openbmc/linux/net/sched/
H A Dsch_skbprio.c44 for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { in calc_new_high_prio()
94 sch->q.qlen++; in skbprio_enqueue()
123 if (q->lowest_prio == q->highest_prio) { in skbprio_enqueue()
129 q->lowest_prio = calc_new_low_prio(q); in skbprio_enqueue()
142 struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio]; in skbprio_dequeue()
148 sch->q.qlen--; in skbprio_dequeue()
152 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); in skbprio_dequeue()
156 if (q->lowest_prio == q->highest_prio) { in skbprio_dequeue()
161 q->highest_prio = calc_new_high_prio(q); in skbprio_dequeue()
189 memset(&q->qstats, 0, sizeof(q->qstats)); in skbprio_init()
[all …]
H A Dsch_fq.c302 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)]; in fq_classify()
304 if (q->flows >= (2U << q->fq_trees_log) && in fq_classify()
305 q->inactive_flows > q->flows/2) in fq_classify()
359 q->flows++; in fq_classify()
401 sch->q.qlen--; in fq_dequeue_skb()
467 skb->tstamp = q->ktime_cache + q->horizon; in fq_enqueue()
494 sch->q.qlen++; in fq_enqueue()
511 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; in fq_check_throttled()
588 if ((head == &q->new_flows) && q->old_flows.first) { in fq_dequeue()
754 if (q->fq_root && log == q->fq_trees_log) in fq_resize()
[all …]
H A Dsch_red.c78 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue()
85 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) { in red_enqueue()
111 if (red_use_harddrop(q) || !red_use_ecn(q)) { in red_enqueue()
198 opt.set.min = q->parms.qth_min >> q->parms.Wlog; in red_offload()
199 opt.set.max = q->parms.qth_max >> q->parms.Wlog; in red_offload()
302 if (!q->qdisc->q.qlen) in __red_change()
329 red_adaptative_algo(&q->parms, &q->vars); in red_adaptative_timer()
417 .qth_min = q->parms.qth_min >> q->parms.Wlog, in red_dump()
418 .qth_max = q->parms.qth_max >> q->parms.Wlog, in red_dump()
464 st.early = q->stats.prob_drop + q->stats.forced_drop; in red_dump_stats()
[all …]
H A Dsch_cbs.c100 sch->q.qlen++; in cbs_child_enqueue()
120 if (sch->q.qlen == 0 && q->credits > 0) { in cbs_enqueue_soft()
171 sch->q.qlen--; in cbs_child_dequeue()
187 qdisc_watchdog_schedule_ns(&q->watchdog, q->last); in cbs_dequeue_soft()
191 credits = timediff_to_credits(now - q->last, q->idleslope); in cbs_dequeue_soft()
194 q->credits = min_t(s64, credits, q->hicredit); in cbs_dequeue_soft()
199 delay = delay_from_credits(q->credits, q->idleslope); in cbs_dequeue_soft()
220 q->credits = max_t(s64, credits, q->locredit); in cbs_dequeue_soft()
223 q->last = now; in cbs_dequeue_soft()
414 if (!q->qdisc) in cbs_init()
[all …]
H A Dsch_plug.c105 if (q->throttled) in plug_dequeue()
113 q->throttled = true; in plug_dequeue()
116 q->pkts_to_release--; in plug_dequeue()
128 q->pkts_last_epoch = 0; in plug_init()
129 q->pkts_to_release = 0; in plug_init()
144 q->throttled = true; in plug_init()
171 q->pkts_last_epoch = q->pkts_current_epoch; in plug_change()
174 q->throttled = true; in plug_change()
181 q->pkts_to_release += q->pkts_last_epoch; in plug_change()
183 q->throttled = false; in plug_change()
[all …]
H A Dsch_hhf.c228 if (q->hh_flows_current_cnt >= q->hh_flows_limit) { in alloc_new_hh()
259 prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout; in hhf_classify()
271 flow = seek_list(hash, &q->hh_flows[flow_pos], q); in hhf_classify()
307 flow = alloc_new_hh(&q->hh_flows[flow_pos], q); in hhf_classify()
362 sch->q.qlen--; in hhf_drop()
445 sch->q.qlen--; in hhf_dequeue()
451 if ((head == &q->new_buckets) && !list_empty(&q->old_buckets)) in hhf_dequeue()
481 if (!q->hh_flows) in hhf_destroy()
560 qlen = sch->q.qlen; in hhf_change()
582 get_random_bytes(&q->perturbation, sizeof(q->perturbation)); in hhf_init()
[all …]
H A Dsch_multiq.c79 sch->q.qlen++; in multiq_enqueue()
96 q->curband++; in multiq_dequeue()
97 if (q->curband >= q->bands) in multiq_dequeue()
98 q->curband = 0; in multiq_dequeue()
105 qdisc = q->queues[q->curband]; in multiq_dequeue()
109 sch->q.qlen--; in multiq_dequeue()
155 q->curband = 0; in multiq_reset()
168 kfree(q->queues); in multiq_destroy()
195 for (i = q->bands; i < q->max_bands; i++) { in multiq_tune()
246 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in multiq_init()
[all …]
H A Dsch_cake.c1896 if (q->buffer_used > q->buffer_max_used) in cake_enqueue()
1897 q->buffer_max_used = q->buffer_used; in cake_enqueue()
1899 if (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1941 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) in cake_clear_tin()
1991 if (q->cur_tin >= q->tin_cnt) { in cake_dequeue()
2549 q->rate_ns = q->tins[ft].tin_rate_ns; in cake_reconfigure()
2550 q->rate_shft = q->tins[ft].tin_rate_shft; in cake_reconfigure()
2555 u64 t = q->rate_bps * q->interval; in cake_reconfigure()
2565 q->buffer_limit = min(q->buffer_limit, in cake_reconfigure()
2680 q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0; in cake_change()
[all …]
H A Dsch_qfq.c309 q->iwsum = ONE_FP / q->wsum; in qfq_update_agg()
337 q->iwsum = ONE_FP / q->wsum; in qfq_destroy_agg()
340 q->in_serv_agg = qfq_choose_next_agg(q); in qfq_destroy_agg()
775 q->bitmaps[dst] |= q->bitmaps[src] & mask; in qfq_move_groups()
807 unsigned long vslot = q->V >> q->min_slot_shift; in qfq_make_eligible()
965 ineligible = q->bitmaps[IR] | q->bitmaps[IB]; in qfq_update_eligible()
1161 q->V += (u64)len * q->iwsum; in qfq_dequeue()
1176 q->oldV = q->V; in qfq_choose_next_agg()
1181 grp = qfq_ffs(q, q->bitmaps[ER]); in qfq_choose_next_agg()
1340 q->oldV = q->V = agg->S; in qfq_activate_agg()
[all …]
H A Dsch_tbf.c260 sch->q.qlen++; in tbf_enqueue()
274 skb = q->qdisc->ops->peek(q->qdisc); in tbf_dequeue()
283 toks = min_t(s64, now - q->t_c, q->buffer); in tbf_dequeue()
301 q->t_c = now; in tbf_dequeue()
305 sch->q.qlen--; in tbf_dequeue()
335 q->tokens = q->buffer; in tbf_reset()
336 q->ptokens = q->mtu; in tbf_reset()
450 old = q->qdisc; in tbf_change()
455 q->mtu = mtu; in tbf_change()
463 q->tokens = q->buffer; in tbf_change()
[all …]
/openbmc/linux/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c580 q->sdesc[q->pidx] = q->sdesc[idx]; in recycle_rx_buf()
587 if (++q->pidx == q->size) { in recycle_rx_buf()
647 memset(q, 0, sizeof(*q)); in t3_reset_qset()
1323 if (q->pidx >= q->size) { in t3_eth_xmit()
1324 q->pidx -= q->size; in t3_eth_xmit()
1500 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in ctrl_xmit()
1533 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in restart_ctrlq()
1723 if (q->pidx >= q->size) { in ofld_xmit()
1724 q->pidx -= q->size; in ofld_xmit()
1778 q->pidx -= q->size; in restart_offloadq()
[all …]
/openbmc/linux/drivers/net/wireless/mediatek/mt76/
H A Ddma.c187 Q_WRITE(dev, q, ring_size, q->ndesc); in mt76_dma_sync_idx()
188 q->head = Q_READ(dev, q, dma_idx); in mt76_dma_sync_idx()
189 q->tail = q->head; in mt76_dma_sync_idx()
197 if (!q || !q->ndesc) in mt76_dma_queue_reset()
248 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_rx_buf()
273 next = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
341 Q_WRITE(dev, q, cpu_idx, q->head); in mt76_dma_kick_queue()
350 if (!q || !q->ndesc) in mt76_dma_tx_cleanup()
453 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
469 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
[all …]
/openbmc/linux/drivers/infiniband/sw/rxe/
H A Drxe_queue.c66 q = kzalloc(sizeof(*q), GFP_KERNEL); in rxe_queue_init()
67 if (!q) in rxe_queue_init()
90 if (!q->buf) in rxe_queue_init()
93 q->buf->log2_elem_size = q->log2_elem_size; in rxe_queue_init()
94 q->buf->index_mask = q->index_mask; in rxe_queue_init()
99 return q; in rxe_queue_init()
102 kfree(q); in rxe_queue_init()
118 if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type))) in resize_finish()
139 q->index = q->buf->consumer_index; in resize_finish()
195 if (q->ip) in rxe_queue_cleanup()
[all …]
/openbmc/linux/sound/core/seq/oss/
H A Dseq_oss_event.c99 return note_off_event(dp, 0, q->n.chn, q->n.note, q->n.vel, ev); in old_event()
102 return note_on_event(dp, 0, q->n.chn, q->n.note, q->n.vel, ev); in old_event()
110 q->n.chn, 0, q->n.note, ev); in old_event()
127 return note_off_event(dp, q->e.dev, q->e.chn, q->e.p1, q->e.p2, ev); in extended_event()
130 return note_on_event(dp, q->e.dev, q->e.chn, q->e.p1, q->e.p2, ev); in extended_event()
134 q->e.chn, 0, q->e.p1, ev); in extended_event()
138 q->e.chn, 0, q->e.p1, ev); in extended_event()
181 return note_on_event(dp, q->v.dev, q->v.chn, q->v.note, q->v.parm, ev); in chn_voice_event()
184 return note_off_event(dp, q->v.dev, q->v.chn, q->v.note, q->v.parm, ev); in chn_voice_event()
188 q->v.chn, q->v.note, q->v.parm, ev); in chn_voice_event()
[all …]
/openbmc/linux/kernel/sched/
H A Dswait.c9 raw_spin_lock_init(&q->lock); in __init_swait_queue_head()
11 INIT_LIST_HEAD(&q->task_list); in __init_swait_queue_head()
25 if (list_empty(&q->task_list)) in swake_up_locked()
44 swake_up_locked(q, 0); in swake_up_all_locked()
52 swake_up_locked(q, 0); in swake_up_one()
66 raw_spin_lock_irq(&q->lock); in swake_up_all()
77 raw_spin_unlock_irq(&q->lock); in swake_up_all()
78 raw_spin_lock_irq(&q->lock); in swake_up_all()
80 raw_spin_unlock_irq(&q->lock); in swake_up_all()
96 __prepare_to_swait(q, wait); in prepare_to_swait_exclusive()
[all …]
/openbmc/linux/include/media/
H A Dvideobuf-core.h50 struct videobuf_queue *q; member
103 int (*buf_setup)(struct videobuf_queue *q,
108 void (*buf_queue)(struct videobuf_queue *q,
122 int (*iolock) (struct videobuf_queue *q,
125 int (*sync) (struct videobuf_queue *q,
164 if (!q->ext_lock) in videobuf_queue_lock()
165 mutex_lock(&q->vb_lock); in videobuf_queue_lock()
170 if (!q->ext_lock) in videobuf_queue_unlock()
171 mutex_unlock(&q->vb_lock); in videobuf_queue_unlock()
202 int videobuf_qbuf(struct videobuf_queue *q,
[all …]
/openbmc/linux/drivers/accel/habanalabs/common/
H A Dhw_queue.c33 int delta = (q->pi - queue_ci_get(&q->ci, queue_len)); in queue_free_slots()
94 q->pi = hl_queue_inc_ptr(q->pi); in hl_hw_queue_submit_bd()
179 free_slots_cnt = queue_free_slots(q, q->int_queue_len); in int_queue_sanity_checks()
317 q->shadow_queue[hl_pi_2_offset(q->pi)] = job; in ext_queue_schedule_job()
351 pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd); in int_queue_schedule_job()
353 q->pi++; in int_queue_schedule_job()
354 q->pi &= ((q->int_queue_len << 1) - 1); in int_queue_schedule_job()
840 q->pi = 0; in ext_and_cpu_queue_init()
859 &q->bus_address, &q->int_queue_len); in int_queue_init()
868 q->pi = 0; in int_queue_init()
[all …]
/openbmc/linux/drivers/net/ethernet/pensando/ionic/
H A Dionic_txrx.c36 if (q->tail_idx == q->head_idx) { in ionic_txq_poke_doorbell()
47 q->dbval | q->head_idx); in ionic_txq_poke_doorbell()
63 if (q->tail_idx == q->head_idx) in ionic_rxq_poke_doorbell()
72 q->dbval | q->head_idx); in ionic_rxq_poke_doorbell()
388 if (q->tail_idx == q->head_idx) in ionic_rx_service()
394 desc_info = &q->info[q->tail_idx]; in ionic_rx_service()
395 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); in ionic_rx_service()
442 desc_info = &q->info[q->head_idx]; in ionic_rx_fill()
501 q->dbval | q->head_idx); in ionic_rx_fill()
863 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); in ionic_tx_service()
[all …]
/openbmc/linux/drivers/net/ethernet/mediatek/
H A Dmtk_wed_wo.c103 int index = (q->tail + 1) % q->n_desc; in mtk_wed_wo_dequeue()
140 while (q->queued < q->n_desc) { in mtk_wed_wo_queue_refill()
155 q->head = (q->head + 1) % q->n_desc; in mtk_wed_wo_queue_refill()
156 entry = &q->entry[q->head]; in mtk_wed_wo_queue_refill()
159 q->entry[q->head].buf = buf; in mtk_wed_wo_queue_refill()
217 u32 index = (q->head - 1) % q->n_desc; in mtk_wed_wo_rx_run_queue()
281 dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc, in mtk_wed_wo_queue_free()
308 memset(&q->cache, 0, sizeof(q->cache)); in mtk_wed_wo_queue_tx_clean()
330 memset(&q->cache, 0, sizeof(q->cache)); in mtk_wed_wo_queue_rx_clean()
350 index = (q->head + 1) % q->n_desc; in mtk_wed_wo_queue_tx_skb()
[all …]
/openbmc/linux/block/
H A Dblk-settings.c24 q->rq_timeout = timeout; in blk_queue_rq_timeout()
100 q->limits.bounce = bounce; in blk_queue_bounce_limit()
149 if (!q->disk) in blk_queue_max_hw_sectors()
342 if (q->limits.physical_block_size < q->limits.logical_block_size) in blk_queue_physical_block_size()
343 q->limits.physical_block_size = q->limits.logical_block_size; in blk_queue_physical_block_size()
345 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_physical_block_size()
346 q->limits.io_min = q->limits.physical_block_size; in blk_queue_physical_block_size()
367 if (q->limits.zone_write_granularity < q->limits.logical_block_size) in blk_queue_zone_write_granularity()
368 q->limits.zone_write_granularity = q->limits.logical_block_size; in blk_queue_zone_write_granularity()
483 if (!q->disk) in blk_queue_io_opt()
[all …]
H A Dblk-mq-sched.c353 ctx = blk_mq_get_ctx(q); in blk_mq_sched_bio_merge()
392 q->nr_requests); in blk_mq_sched_alloc_map_and_rqs()
456 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, in blk_mq_init_sched()
471 ret = e->ops.init_sched(q, e); in blk_mq_init_sched()
483 eq = q->elevator; in blk_mq_init_sched()
484 blk_mq_sched_free_rqs(q); in blk_mq_init_sched()
485 blk_mq_exit_sched(q, eq); in blk_mq_init_sched()
498 blk_mq_sched_free_rqs(q); in blk_mq_init_sched()
501 q->elevator = NULL; in blk_mq_init_sched()
515 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, in blk_mq_sched_free_rqs()
[all …]
/openbmc/linux/net/xdp/
H A Dxsk_queue.c26 struct xsk_queue *q; in xskq_create() local
29 q = kzalloc(sizeof(*q), GFP_KERNEL); in xskq_create()
30 if (!q) in xskq_create()
33 q->nentries = nentries; in xskq_create()
43 kfree(q); in xskq_create()
50 if (!q->ring) { in xskq_create()
51 kfree(q); in xskq_create()
56 return q; in xskq_create()
61 if (!q) in xskq_destroy()
64 vfree(q->ring); in xskq_destroy()
[all …]
/openbmc/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_device_queue_manager.c358 q->doorbell_id = q->properties.queue_id; in allocate_doorbell()
570 q->pipe, q->queue); in create_queue_nocpsch()
594 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, in create_queue_nocpsch()
599 &q->gart_mqd_addr, &q->properties); in create_queue_nocpsch()
612 q->queue, &q->properties, current->mm); in create_queue_nocpsch()
793 q->pipe, q->queue); in destroy_queue_nocpsch_locked()
896 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); in update_queue()
943 q->pipe, q->queue, in update_queue()
1208 q->queue, &q->properties, mm); in restore_process_queues_nocpsch()
1830 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, in create_queue_cpsch()
[all …]
/openbmc/linux/drivers/net/
H A Dtap.c153 if (q->enabled) in tap_enable_queue()
177 q->file = file; in tap_set_queue()
195 if (!q->enabled) in tap_disable_queue()
232 if (q->enabled) in tap_put_queue()
334 if (!q) in tap_handle_frame()
526 if (!q) in tap_open()
538 sock_init_data_uid(&q->sock, &q->sk, current_fsuid()); in tap_open()
590 if (!q) in tap_poll()
1032 q->flags = (q->flags & ~TAP_IFFEATURES) | u; in tap_ioctl()
1294 if (!q) in tap_get_socket()
[all …]
/openbmc/qemu/hw/usb/
H A Dhcd-ehci.c552 q = g_malloc0(sizeof(*q)); in ehci_alloc_queue()
566 if (!q->last_pid || !q->dev) { in ehci_queue_stopped()
570 usb_device_ep_stopped(q->dev, usb_ep_get(q->dev, q->last_pid, endp)); in ehci_queue_stopped()
608 EHCIQueueHead *head = q->async ? &q->ehci->aqueues : &q->ehci->pqueues; in ehci_free_queue()
1278 q->qhaddr, q->qh.next, q->qtdaddr, in ehci_execute_complete()
1675 if (q->qhaddr != q->qh.next) { in ehci_state_fetchqh()
1691 q->qtdaddr = q->qh.current_qtd; in ehci_state_fetchqh()
1774 q->qtdaddr = q->qh.altnext_qtd; in ehci_state_advqueue()
1781 q->qtdaddr = q->qh.next_qtd; in ehci_state_advqueue()
1866 if (ehci_get_fetch_addr(q->ehci, q->async) != q->qh.next) { in ehci_state_horizqh()
[all …]

12345678910>>...77