/openbmc/linux/net/sched/ |
H A D | sch_multiq.c | 25 struct Qdisc **queues; member 54 return q->queues[0]; in multiq_classify() 56 return q->queues[band]; in multiq_classify() 105 qdisc = q->queues[q->curband]; in multiq_dequeue() 137 qdisc = q->queues[curband]; in multiq_peek() 154 qdisc_reset(q->queues[band]); in multiq_reset() 166 qdisc_put(q->queues[band]); in multiq_destroy() 168 kfree(q->queues); in multiq_destroy() 196 if (q->queues[i] != &noop_qdisc) { in multiq_tune() 197 struct Qdisc *child = q->queues[i]; in multiq_tune() [all …]
|
H A D | sch_prio.c | 26 struct Qdisc *queues[TCQ_PRIO_BANDS]; member 57 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify() 63 return q->queues[q->prio2band[0]]; in prio_classify() 65 return q->queues[band]; in prio_classify() 103 struct Qdisc *qdisc = q->queues[prio]; in prio_peek() 117 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue() 137 qdisc_reset(q->queues[prio]); in prio_reset() 173 qdisc_put(q->queues[prio]); in prio_destroy() 180 struct Qdisc *queues[TCQ_PRIO_BANDS]; in prio_tune() local 198 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in prio_tune() [all …]
|
/openbmc/qemu/net/ |
H A D | vhost-user.c | 61 static void vhost_user_stop(int queues, NetClientState *ncs[]) in vhost_user_stop() argument 66 for (i = 0; i < queues; i++) { in vhost_user_stop() 78 static int vhost_user_start(int queues, NetClientState *ncs[], in vhost_user_start() argument 89 for (i = 0; i < queues; i++) { in vhost_user_start() 106 if (queues > max_queues) { in vhost_user_start() 253 int queues, i; in chr_closed_bh() local 255 queues = qemu_find_net_clients_except(name, ncs, in chr_closed_bh() 258 assert(queues < MAX_QUEUE_NUM); in chr_closed_bh() 262 for (i = queues -1; i >= 0; i--) { in chr_closed_bh() 283 int queues; in net_vhost_user_event() local [all …]
|
/openbmc/qemu/backends/ |
H A D | cryptodev-vhost-user.c | 76 static void cryptodev_vhost_user_stop(int queues, in cryptodev_vhost_user_stop() argument 81 for (i = 0; i < queues; i++) { in cryptodev_vhost_user_stop() 92 cryptodev_vhost_user_start(int queues, in cryptodev_vhost_user_start() argument 100 for (i = 0; i < queues; i++) { in cryptodev_vhost_user_start() 117 if (queues > max_queues) { in cryptodev_vhost_user_start() 158 int queues = b->conf.peers.queues; in cryptodev_vhost_user_event() local 160 assert(queues < MAX_CRYPTO_QUEUE_NUM); in cryptodev_vhost_user_event() 164 if (cryptodev_vhost_user_start(queues, s) < 0) { in cryptodev_vhost_user_event() 171 cryptodev_vhost_user_stop(queues, s); in cryptodev_vhost_user_event() 184 int queues = backend->conf.peers.queues; in cryptodev_vhost_user_init() local [all …]
|
/openbmc/linux/drivers/scsi/aacraid/ |
H A D | comminit.c | 373 struct aac_entry * queues; in aac_comm_init() local 375 struct aac_queue_block * comm = dev->queues; in aac_comm_init() 394 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init() 397 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init() 399 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init() 403 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init() 406 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init() 410 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init() 413 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init() 417 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init() [all …]
|
/openbmc/linux/drivers/net/wireless/silabs/wfx/ |
H A D | queue.c | 229 struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; in wfx_tx_queues_get_skb() local 239 WARN_ON(num_queues >= ARRAY_SIZE(queues)); in wfx_tx_queues_get_skb() 240 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb() 242 if (wfx_tx_queue_get_weight(queues[j]) < in wfx_tx_queues_get_skb() 243 wfx_tx_queue_get_weight(queues[j - 1])) in wfx_tx_queues_get_skb() 244 swap(queues[j - 1], queues[j]); in wfx_tx_queues_get_skb() 254 skb = skb_dequeue(&queues[i]->cab); in wfx_tx_queues_get_skb() 262 WARN_ON(queues[i] != &wvif->tx_queue[skb_get_queue_mapping(skb)]); in wfx_tx_queues_get_skb() 263 atomic_inc(&queues[i]->pending_frames); in wfx_tx_queues_get_skb() 264 trace_queues_stats(wdev, queues[i]); in wfx_tx_queues_get_skb() [all …]
|
/openbmc/linux/drivers/nvme/target/ |
H A D | loop.c | 30 struct nvme_loop_queue *queues; member 71 return queue - queue->ctrl->queues; in nvme_loop_queue_idx() 176 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event() 198 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod() 222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx() 242 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx() 266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) in nvme_loop_destroy_admin_queue() 275 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue() 292 kfree(ctrl->queues); in nvme_loop_free_ctrl() 303 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); in nvme_loop_destroy_io_queues() [all …]
|
/openbmc/linux/drivers/media/platform/nxp/imx8-isi/ |
H A D | imx8-isi-m2m.c | 58 } queues; member 85 return &ctx->queues.out; in mxc_isi_m2m_ctx_qdata() 87 return &ctx->queues.cap; in mxc_isi_m2m_ctx_qdata() 112 src_vbuf->sequence = ctx->queues.out.sequence++; in mxc_isi_m2m_frame_write_done() 113 dst_vbuf->sequence = ctx->queues.cap.sequence++; in mxc_isi_m2m_frame_write_done() 135 .width = ctx->queues.out.format.width, in mxc_isi_m2m_device_run() 136 .height = ctx->queues.out.format.height, in mxc_isi_m2m_device_run() 139 .width = ctx->queues.cap.format.width, in mxc_isi_m2m_device_run() 140 .height = ctx->queues.cap.format.height, in mxc_isi_m2m_device_run() 143 .width = ctx->queues.cap.format.width, in mxc_isi_m2m_device_run() [all …]
|
/openbmc/linux/Documentation/ABI/testing/ |
H A D | sysfs-class-net-queues | 1 What: /sys/class/net/<iface>/queues/rx-<queue>/rps_cpus 11 What: /sys/class/net/<iface>/queues/rx-<queue>/rps_flow_cnt 19 What: /sys/class/net/<iface>/queues/tx-<queue>/tx_timeout 27 What: /sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate 35 What: /sys/class/net/<iface>/queues/tx-<queue>/xps_cpus 45 What: /sys/class/net/<iface>/queues/tx-<queue>/xps_rxqs 56 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time 65 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/inflight 73 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit 82 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max [all …]
|
/openbmc/linux/sound/virtio/ |
H A D | virtio_card.h | 51 struct virtio_snd_queue queues[VIRTIO_SND_VQ_MAX]; member 70 return &snd->queues[VIRTIO_SND_VQ_CONTROL]; in virtsnd_control_queue() 76 return &snd->queues[VIRTIO_SND_VQ_EVENT]; in virtsnd_event_queue() 82 return &snd->queues[VIRTIO_SND_VQ_TX]; in virtsnd_tx_queue() 88 return &snd->queues[VIRTIO_SND_VQ_RX]; in virtsnd_rx_queue()
|
/openbmc/linux/Documentation/devicetree/bindings/soc/ti/ |
H A D | keystone-navigator-qmss.txt | 9 management of the packet queues. Packets are queued/de-queued by writing or 32 -- managed-queues : the actual queues managed by each queue manager 33 instance, specified as <"base queue #" "# of queues">. 51 - qpend : pool of qpend(interruptible) queues 52 - general-purpose : pool of general queues, primarily used 53 as free descriptor queues or the 54 transmit DMA queues. 55 - accumulator : pool of queues on PDSP accumulator channel 57 -- qrange : number of queues to use per queue range, specified as 58 <"base queue #" "# of queues">. [all …]
|
/openbmc/qemu/tests/qtest/fuzz/ |
H A D | virtio_scsi_fuzz.c | 64 static void virtio_scsi_fuzz(QTestState *s, QVirtioSCSIQueues* queues, in virtio_scsi_fuzz() argument 98 vqa.queue = vqa.queue % queues->num_queues; in virtio_scsi_fuzz() 106 q = queues->vq[vqa.queue]; in virtio_scsi_fuzz() 129 qvirtqueue_kick(s, dev, queues->vq[i], free_head[i]); in virtio_scsi_fuzz() 138 static QVirtioSCSIQueues *queues; in virtio_scsi_with_flag_fuzz() local 141 queues = qvirtio_scsi_init(scsi->vdev, *(uint64_t *)Data); in virtio_scsi_with_flag_fuzz() 142 virtio_scsi_fuzz(s, queues, in virtio_scsi_with_flag_fuzz()
|
H A D | virtio_blk_fuzz.c | 61 static void virtio_blk_fuzz(QTestState *s, QVirtioBlkQueues* queues, in virtio_blk_fuzz() argument 95 vqa.queue = vqa.queue % queues->num_queues; in virtio_blk_fuzz() 102 q = queues->vq[vqa.queue]; in virtio_blk_fuzz() 125 qvirtqueue_kick(s, dev, queues->vq[i], free_head[i]); in virtio_blk_fuzz() 134 static QVirtioBlkQueues *queues; in virtio_blk_with_flag_fuzz() local 137 queues = qvirtio_blk_init(blk->vdev, *(uint64_t *)Data); in virtio_blk_with_flag_fuzz() 138 virtio_blk_fuzz(s, queues, in virtio_blk_with_flag_fuzz()
|
/openbmc/linux/Documentation/networking/device_drivers/ethernet/ti/ |
H A D | cpsw.rst | 26 - TX queues must be rated starting from txq0 that has highest priority 28 - CBS shapers should be used with rated queues 30 potential incoming rate, thus, rate of all incoming tx queues has 150 // Add 4 tx queues, for interface Eth0, and 1 tx queue for Eth1 156 // Check if num of queues is set correctly: 172 // TX queues must be rated starting from 0, so set bws for tx0 and tx1 175 // Leave last 2 tx queues not rated. 176 $ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate 177 $ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate 181 // Check maximum rate of tx (cpdma) queues: [all …]
|
/openbmc/linux/tools/perf/util/ |
H A D | intel-bts.c | 46 struct auxtrace_queues queues; member 211 for (i = 0; i < bts->queues.nr_queues; i++) { in intel_bts_setup_queues() 212 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i], in intel_bts_setup_queues() 222 if (bts->queues.new_data) { in intel_bts_update_queues() 223 bts->queues.new_data = false; in intel_bts_update_queues() 465 queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; in intel_bts_process_queue() 539 struct auxtrace_queues *queues = &bts->queues; in intel_bts_process_tid_exit() local 542 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_process_tid_exit() 543 struct auxtrace_queue *queue = &bts->queues.queue_array[i]; in intel_bts_process_tid_exit() 568 queue = &bts->queues.queue_array[queue_nr]; in intel_bts_process_queues() [all …]
|
H A D | s390-cpumsf.c | 170 struct auxtrace_queues queues; member 203 if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu) in s390_cpumcf_dumpctr() 206 q = &sf->queues.queue_array[sample->cpu]; in s390_cpumcf_dumpctr() 701 queue = &sfq->sf->queues.queue_array[sfq->queue_nr]; in s390_cpumsf_run_decoder() 825 for (i = 0; i < sf->queues.nr_queues; i++) { in s390_cpumsf_setup_queues() 826 ret = s390_cpumsf_setup_queue(sf, &sf->queues.queue_array[i], in s390_cpumsf_setup_queues() 836 if (!sf->queues.new_data) in s390_cpumsf_update_queues() 839 sf->queues.new_data = false; in s390_cpumsf_update_queues() 860 queue = &sf->queues.queue_array[queue_nr]; in s390_cpumsf_process_queues() 985 err = auxtrace_queues__add_event(&sf->queues, session, event, in s390_cpumsf_process_auxtrace_event() [all …]
|
H A D | auxtrace.c | 221 int auxtrace_queues__init(struct auxtrace_queues *queues) in auxtrace_queues__init() argument 223 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; in auxtrace_queues__init() 224 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); in auxtrace_queues__init() 225 if (!queues->queue_array) in auxtrace_queues__init() 230 static int auxtrace_queues__grow(struct auxtrace_queues *queues, in auxtrace_queues__grow() argument 233 unsigned int nr_queues = queues->nr_queues; in auxtrace_queues__grow() 243 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) in auxtrace_queues__grow() 250 for (i = 0; i < queues->nr_queues; i++) { in auxtrace_queues__grow() 251 list_splice_tail(&queues->queue_array[i].head, in auxtrace_queues__grow() 253 queue_array[i].tid = queues->queue_array[i].tid; in auxtrace_queues__grow() [all …]
|
H A D | arm-spe.c | 42 struct auxtrace_queues queues; member 154 queue = &speq->spe->queues.queue_array[speq->queue_nr]; in arm_spe_get_trace() 271 arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]); in arm_spe_set_tid() 768 for (i = 0; i < spe->queues.nr_queues; i++) { in arm_spe__setup_queues() 769 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i); in arm_spe__setup_queues() 779 if (spe->queues.new_data) { in arm_spe__update_queues() 780 spe->queues.new_data = false; in arm_spe__update_queues() 822 queue = &spe->queues.queue_array[queue_nr]; in arm_spe_process_queues() 863 struct auxtrace_queues *queues = &spe->queues; in arm_spe_process_timeless_queues() local 867 for (i = 0; i < queues->nr_queues; i++) { in arm_spe_process_timeless_queues() [all …]
|
/openbmc/linux/drivers/target/ |
H A D | target_core_tmr.c | 118 flush_work(&dev->queues[i].sq.work); in core_tmr_abort_task() 120 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_abort_task() 121 list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, in core_tmr_abort_task() 148 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task() 163 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task() 301 flush_work(&dev->queues[i].sq.work); in core_tmr_drain_state_list() 303 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_drain_state_list() 304 list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, in core_tmr_drain_state_list() 333 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_drain_state_list()
|
/openbmc/linux/Documentation/arch/arm/keystone/ |
H A D | knav-qmss.rst | 15 management of the packet queues. Packets are queued/de-queued by writing or 24 knav qmss driver provides a set of APIs to drivers to open/close qmss queues, 25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For 31 Accumulator QMSS queues using PDSP firmware 34 queue or multiple contiguous queues. drivers/soc/ti/knav_qmss_acc.c is the 37 1 or 32 queues per channel. More description on the firmware is available in 56 Use of accumulated queues requires the firmware image to be present in the 57 file system. The driver doesn't acc queues to the supported queue range if
|
/openbmc/linux/include/linux/ |
H A D | ptr_ring.h | 625 void ***queues; in ptr_ring_resize_multiple() local 628 queues = kmalloc_array(nrings, sizeof(*queues), gfp); in ptr_ring_resize_multiple() 629 if (!queues) in ptr_ring_resize_multiple() 633 queues[i] = __ptr_ring_init_queue_alloc(size, gfp); in ptr_ring_resize_multiple() 634 if (!queues[i]) in ptr_ring_resize_multiple() 641 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple() 648 kvfree(queues[i]); in ptr_ring_resize_multiple() 650 kfree(queues); in ptr_ring_resize_multiple() 656 kvfree(queues[i]); in ptr_ring_resize_multiple() 658 kfree(queues); in ptr_ring_resize_multiple()
|
/openbmc/linux/drivers/vdpa/alibaba/ |
H A D | eni_vdpa.c | 45 int queues; member 118 for (i = 0; i < eni_vdpa->queues; i++) { in eni_vdpa_free_irq() 164 int queues = eni_vdpa->queues; in eni_vdpa_request_irq() local 165 int vectors = queues + 1; in eni_vdpa_request_irq() 177 for (i = 0; i < queues; i++) { in eni_vdpa_request_irq() 195 irq = pci_irq_vector(pdev, queues); in eni_vdpa_request_irq() 202 vp_legacy_config_vector(ldev, queues); in eni_vdpa_request_irq() 492 eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa); in eni_vdpa_probe() 494 eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues, in eni_vdpa_probe() 503 for (i = 0; i < eni_vdpa->queues; i++) { in eni_vdpa_probe() [all …]
|
/openbmc/linux/Documentation/networking/ |
H A D | tc-queue-filters.rst | 7 TC can be used for directing traffic to either a set of queues or 12 1) TC filter directing traffic to a set of queues is achieved 14 the priority maps to a traffic class (set of queues) when 23 queues and/or a single queue are supported as below: 25 1) TC flower filter directs incoming traffic to a set of queues using
|
/openbmc/linux/Documentation/block/ |
H A D | blk-mq.rst | 37 spawns multiple queues with individual entry points local to the CPU, removing 49 blk-mq has two group of queues: software staging queues and hardware dispatch 50 queues. When the request arrives at the block layer, it will try the shortest 56 Then, after the requests are processed by software queues, they will be placed 62 Software staging queues 65 The block IO subsystem adds requests in the software staging queues 71 the number of queues is defined by a per-CPU or per-node basis. 93 requests from different queues, otherwise there would be cache trashing and a 99 queue (a.k.a. run the hardware queue), the software queues mapped to that 102 Hardware dispatch queues [all …]
|
/openbmc/linux/drivers/staging/qlge/ |
H A D | TODO | 13 * rename "rx" queues to "completion" queues. Calling tx completion queues "rx 14 queues" is confusing. 18 frames, resets the link, device and driver buffer queues become
|