/openbmc/linux/drivers/mmc/core/ |
H A D | queue.c | 26 static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) in mmc_cqe_dcmd_busy() argument 29 return mq->in_flight[MMC_ISSUE_DCMD]; in mmc_cqe_dcmd_busy() 32 void mmc_cqe_check_busy(struct mmc_queue *mq) in mmc_cqe_check_busy() argument 34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) in mmc_cqe_check_busy() 35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; in mmc_cqe_check_busy() 60 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) in mmc_issue_type() argument 62 struct mmc_host *host = mq->card->host; in mmc_issue_type() 73 static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) in __mmc_cqe_recovery_notifier() argument 75 if (!mq->recovery_needed) { in __mmc_cqe_recovery_notifier() 76 mq->recovery_needed = true; in __mmc_cqe_recovery_notifier() [all …]
|
H A D | block.c | 179 struct mmc_queue *mq); 249 struct mmc_queue *mq; in power_ro_lock_store() local 260 mq = &md->queue; in power_ro_lock_store() 263 req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0); in power_ro_lock_store() 663 struct mmc_queue *mq; in mmc_blk_ioctl_cmd() local 683 mq = &md->queue; in mmc_blk_ioctl_cmd() 684 req = blk_mq_alloc_request(mq->queue, in mmc_blk_ioctl_cmd() 714 struct mmc_queue *mq; in mmc_blk_ioctl_multi_cmd() local 756 mq = &md->queue; in mmc_blk_ioctl_multi_cmd() 757 req = blk_mq_alloc_request(mq->queue, in mmc_blk_ioctl_multi_cmd() [all …]
|
H A D | queue.h | 97 struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card); 104 void mmc_cqe_check_busy(struct mmc_queue *mq); 107 enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req); 109 static inline int mmc_tot_in_flight(struct mmc_queue *mq) in mmc_tot_in_flight() argument 111 return mq->in_flight[MMC_ISSUE_SYNC] + in mmc_tot_in_flight() 112 mq->in_flight[MMC_ISSUE_DCMD] + in mmc_tot_in_flight() 113 mq->in_flight[MMC_ISSUE_ASYNC]; in mmc_tot_in_flight() 116 static inline int mmc_cqe_qcnt(struct mmc_queue *mq) in mmc_cqe_qcnt() argument 118 return mq->in_flight[MMC_ISSUE_DCMD] + in mmc_cqe_qcnt() 119 mq->in_flight[MMC_ISSUE_ASYNC]; in mmc_cqe_qcnt()
|
H A D | block.h | 8 void mmc_blk_cqe_recovery(struct mmc_queue *mq); 12 enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req); 14 void mmc_blk_mq_recovery(struct mmc_queue *mq);
|
/openbmc/linux/drivers/md/ |
H A D | dm-cache-policy-smq.c | 876 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level) in writeback_sentinel() argument 878 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels); in writeback_sentinel() 881 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level) in demote_sentinel() argument 883 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels); in demote_sentinel() 886 static void __update_writeback_sentinels(struct smq_policy *mq) in __update_writeback_sentinels() argument 889 struct queue *q = &mq->dirty; in __update_writeback_sentinels() 893 sentinel = writeback_sentinel(mq, level); in __update_writeback_sentinels() 899 static void __update_demote_sentinels(struct smq_policy *mq) in __update_demote_sentinels() argument 902 struct queue *q = &mq->clean; in __update_demote_sentinels() 906 sentinel = demote_sentinel(mq, level); in __update_demote_sentinels() [all …]
|
/openbmc/linux/drivers/scsi/arm/ |
H A D | msgqueue.c | 24 struct msgqueue_entry *mq; in mqe_alloc() local 26 if ((mq = msgq->free) != NULL) in mqe_alloc() 27 msgq->free = mq->next; in mqe_alloc() 29 return mq; in mqe_alloc() 38 static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) in mqe_free() argument 40 if (mq) { in mqe_free() 41 mq->next = msgq->free; in mqe_free() 42 msgq->free = mq; in mqe_free() 82 struct msgqueue_entry *mq = msgq->qe; in msgqueue_msglength() local 85 for (mq = msgq->qe; mq; mq = mq->next) in msgqueue_msglength() [all …]
|
/openbmc/linux/drivers/sh/maple/ |
H A D | maple.c | 121 void (*callback) (struct mapleq *mq), in maple_getcond_callback() 139 struct mapleq *mq; in maple_release_device() local 142 mq = mdev->mq; in maple_release_device() 143 kmem_cache_free(maple_queue_cache, mq->recvbuf); in maple_release_device() 144 kfree(mq); in maple_release_device() 171 mdev->mq->command = command; in maple_add_packet() 172 mdev->mq->length = length; in maple_add_packet() 175 mdev->mq->sendbuf = sendbuf; in maple_add_packet() 178 list_add_tail(&mdev->mq->list, &maple_waitq); in maple_add_packet() 187 struct mapleq *mq; in maple_allocq() local [all …]
|
/openbmc/linux/drivers/misc/sgi-xp/ |
H A D | xpc_uv.c | 112 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) in xpc_get_gru_mq_irq_uv() argument 114 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); in xpc_get_gru_mq_irq_uv() 117 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, in xpc_get_gru_mq_irq_uv() 119 if (mq->irq < 0) in xpc_get_gru_mq_irq_uv() 120 return mq->irq; in xpc_get_gru_mq_irq_uv() 122 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); in xpc_get_gru_mq_irq_uv() 126 mq->irq = SGI_XPC_ACTIVATE; in xpc_get_gru_mq_irq_uv() 128 mq->irq = SGI_XPC_NOTIFY; in xpc_get_gru_mq_irq_uv() 132 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; in xpc_get_gru_mq_irq_uv() 133 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); in xpc_get_gru_mq_irq_uv() [all …]
|
/openbmc/linux/drivers/misc/sgi-gru/ |
H A D | grukservices.c | 134 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) argument 546 struct message_queue *mq = p; in gru_create_message_queue() local 550 memset(mq, 0, bytes); in gru_create_message_queue() 551 mq->start = &mq->data; in gru_create_message_queue() 552 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES; in gru_create_message_queue() 553 mq->next = &mq->data; in gru_create_message_queue() 554 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES; in gru_create_message_queue() 555 mq->qlines = qlines; in gru_create_message_queue() 556 mq->hstatus[0] = 0; in gru_create_message_queue() 557 mq->hstatus[1] = 1; in gru_create_message_queue() [all …]
|
/openbmc/linux/block/ |
H A D | Makefile | 9 blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ 10 blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \ 24 obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o 31 obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o 32 obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o 35 obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o 36 obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
|
/openbmc/linux/drivers/scsi/elx/efct/ |
H A D | efct_hw_queues.c | 17 struct hw_mq *mq = NULL; in efct_hw_init_queues() local 51 mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH); in efct_hw_init_queues() 52 if (!mq) { in efct_hw_init_queues() 247 struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL); in efct_hw_new_mq() local 249 if (!mq) in efct_hw_new_mq() 252 mq->cq = cq; in efct_hw_new_mq() 253 mq->type = SLI4_QTYPE_MQ; in efct_hw_new_mq() 254 mq->instance = cq->eq->hw->mq_count++; in efct_hw_new_mq() 255 mq->entry_count = entry_count; in efct_hw_new_mq() 256 mq->entry_size = EFCT_HW_MQ_DEPTH; in efct_hw_new_mq() [all …]
|
/openbmc/linux/drivers/mailbox/ |
H A D | omap-mailbox.c | 257 struct omap_mbox_queue *mq = in mbox_rx_work() local 263 while (kfifo_len(&mq->fifo) >= sizeof(msg)) { in mbox_rx_work() 264 len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); in mbox_rx_work() 268 mbox_chan_received_data(mq->mbox->chan, (void *)data); in mbox_rx_work() 269 spin_lock_irq(&mq->lock); in mbox_rx_work() 270 if (mq->full) { in mbox_rx_work() 271 mq->full = false; in mbox_rx_work() 272 _omap_mbox_enable_irq(mq->mbox, IRQ_RX); in mbox_rx_work() 274 spin_unlock_irq(&mq->lock); in mbox_rx_work() 290 struct omap_mbox_queue *mq = mbox->rxq; in __mbox_rx_interrupt() local [all …]
|
/openbmc/openpower-hw-diags/ |
H A D | listener.cpp | 58 message_queue mq(open_or_create, mq_listener, 1, max_command_len); in threadListener() local 71 mq.receive((void*)&buffer, max_command_len, recvd_size, in threadListener() 144 message_queue mq(open_only, mq_listener); in sendCmdLine() local 148 mq.send(i_argv[count], strlen(i_argv[count]), 0); in sendCmdLine() 152 mq.send(msg_send_end, strlen(msg_send_end), 0); in sendCmdLine() 168 message_queue mq(open_only, mq_listener); in listenerMqExists() local
|
/openbmc/linux/Documentation/admin-guide/device-mapper/ |
H A D | cache-policies.rst | 29 multiqueue (mq) 48 with the multiqueue (mq) policy. 50 The smq policy (vs mq) offers the promise of less memory utilization, 54 Users may switch from "mq" to "smq" simply by appropriately reloading a 56 mq policy's hints to be dropped. Also, performance of the cache may 63 The mq policy used a lot of memory; 88 bytes per cache block on a 64 78 mq placed entries in different levels of the multiqueue structures 91 The mq policy maintained a hit count for each cache block. For a 105 Testing smq shows substantially better performance than mq. 129 /dev/sdd 512 0 mq 4 sequential_threshold 1024 random_threshold 8"
|
/openbmc/linux/include/linux/ |
H A D | maple.h | 70 struct mapleq *mq; member 71 void (*callback) (struct mapleq * mq); 90 void (*callback) (struct mapleq * mq),
|
/openbmc/linux/drivers/net/ethernet/netronome/nfp/abm/ |
H A D | qdisc.c | 304 struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot); in nfp_abm_qdisc_clear_mq() local 307 if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev) in nfp_abm_qdisc_clear_mq() 309 for (i = 0; i < mq->num_children; i++) in nfp_abm_qdisc_clear_mq() 310 if (mq->children[i] == qdisc) { in nfp_abm_qdisc_clear_mq() 311 mq->children[i] = NULL; in nfp_abm_qdisc_clear_mq() 791 memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats)); in nfp_abm_mq_stats() 792 memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats)); in nfp_abm_mq_stats() 803 nfp_abm_stats_propagate(&qdisc->mq.stats, in nfp_abm_mq_stats() 805 nfp_abm_stats_propagate(&qdisc->mq.prev_stats, in nfp_abm_mq_stats() 810 nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats, in nfp_abm_mq_stats()
|
/openbmc/linux/drivers/mtd/maps/ |
H A D | vmu-flash.c | 89 static void vmu_blockread(struct mapleq *mq) in vmu_blockread() argument 94 mdev = mq->dev; in vmu_blockread() 101 memcpy(card->blockread, mq->recvbuf->buf + 12, in vmu_blockread() 191 list_del_init(&(mdev->mq->list)); in maple_vmu_read_block() 192 kfree(mdev->mq->sendbuf); in maple_vmu_read_block() 193 mdev->mq->sendbuf = NULL; in maple_vmu_read_block() 283 kfree(mdev->mq->sendbuf); in maple_vmu_write_block() 284 mdev->mq->sendbuf = NULL; in maple_vmu_write_block() 285 list_del_init(&(mdev->mq->list)); in maple_vmu_write_block() 499 static void vmu_queryblocks(struct mapleq *mq) in vmu_queryblocks() argument [all …]
|
/openbmc/linux/Documentation/block/ |
H A D | switching-sched.rst | 17 the fly to select one of mq-deadline, none, bfq, or kyber schedulers - 32 [mq-deadline] kyber bfq none 35 [none] mq-deadline kyber bfq
|
H A D | blk-mq.rst | 4 Multi-Queue Block IO Queueing Mechanism (blk-mq) 36 to different CPUs) wanted to perform block IO. Instead of this, the blk-mq API 45 for instance), blk-mq takes action: it will store and manage IO requests to 49 blk-mq has two group of queues: software staging queues and hardware dispatch 59 resources to accept more requests, blk-mq will places requests on a temporary 142 … Block IO: Introducing Multi-queue SSD Access on Multi-core Systems <http://kernel.dk/blk-mq.pdf>`_ 151 .. kernel-doc:: include/linux/blk-mq.h 153 .. kernel-doc:: block/blk-mq.c
|
/openbmc/linux/arch/riscv/boot/dts/allwinner/ |
H A D | Makefile | 7 dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1-mangopi-mq-pro.dtb 9 dtb-$(CONFIG_ARCH_SUNXI) += sun20i-d1s-mangopi-mq.dtb
|
/openbmc/linux/arch/arm/boot/dts/allwinner/ |
H A D | sun8i-t113s-mangopi-mq-r-t113.dts | 9 #include "sunxi-d1s-t113-mangopi-mq-r.dtsi" 13 compatible = "widora,mangopi-mq-r-t113", "allwinner,sun8i-t113s";
|
/openbmc/linux/drivers/input/mouse/ |
H A D | maplemouse.c | 27 static void dc_mouse_callback(struct mapleq *mq) in dc_mouse_callback() argument 30 struct maple_device *mapledev = mq->dev; in dc_mouse_callback() 33 unsigned char *res = mq->recvbuf->buf; in dc_mouse_callback()
|
/openbmc/linux/Documentation/devicetree/bindings/powerpc/4xx/ |
H A D | ppc440spe-adma.txt | 82 - compatible : "ibm,mq-440spe"; 87 MQ0: mq { 88 compatible = "ibm,mq-440spe";
|
/openbmc/linux/drivers/net/wireless/intel/iwlwifi/dvm/ |
H A D | main.c | 2033 int mq = priv->queue_to_mac80211[queue]; in iwl_stop_sw_queue() local 2035 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) in iwl_stop_sw_queue() 2038 if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) { in iwl_stop_sw_queue() 2041 queue, mq); in iwl_stop_sw_queue() 2045 set_bit(mq, &priv->transport_queue_stop); in iwl_stop_sw_queue() 2046 ieee80211_stop_queue(priv->hw, mq); in iwl_stop_sw_queue() 2052 int mq = priv->queue_to_mac80211[queue]; in iwl_wake_sw_queue() local 2054 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) in iwl_wake_sw_queue() 2057 if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) { in iwl_wake_sw_queue() 2060 queue, mq); in iwl_wake_sw_queue() [all …]
|
/openbmc/linux/arch/ia64/include/asm/sn/ |
H A D | sn_sal.h | 92 sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size, in sn_mq_watchlist_alloc() argument 100 addr = (unsigned long)mq; in sn_mq_watchlist_alloc()
|