Lines Matching full:cmdq

350 	return &smmu->cmdq;  in arm_smmu_get_cmdq()
390 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, in __arm_smmu_cmdq_skip_err()
414 * not to touch any of the shadow cmdq state. in __arm_smmu_cmdq_skip_err()
429 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q); in arm_smmu_cmdq_skip_err()
444 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_lock() argument
454 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) in arm_smmu_cmdq_shared_lock()
458 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); in arm_smmu_cmdq_shared_lock()
459 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); in arm_smmu_cmdq_shared_lock()
462 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_unlock() argument
464 (void)atomic_dec_return_release(&cmdq->lock); in arm_smmu_cmdq_shared_unlock()
467 static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_tryunlock() argument
469 if (atomic_read(&cmdq->lock) == 1) in arm_smmu_cmdq_shared_tryunlock()
472 arm_smmu_cmdq_shared_unlock(cmdq); in arm_smmu_cmdq_shared_tryunlock()
476 #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \ argument
480 __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
486 #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \ argument
488 atomic_set_release(&cmdq->lock, 0); \
532 static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq, in __arm_smmu_cmdq_poll_set_valid_map() argument
537 .max_n_shift = cmdq->q.llq.max_n_shift, in __arm_smmu_cmdq_poll_set_valid_map()
552 ptr = &cmdq->valid_map[swidx]; in __arm_smmu_cmdq_poll_set_valid_map()
579 static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_set_valid_map() argument
582 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true); in arm_smmu_cmdq_set_valid_map()
586 static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_poll_valid_map() argument
589 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false); in arm_smmu_cmdq_poll_valid_map()
598 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in arm_smmu_cmdq_poll_until_not_full() local
602 * Try to update our copy of cons by grabbing exclusive cmdq access. If in arm_smmu_cmdq_poll_until_not_full()
605 if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) { in arm_smmu_cmdq_poll_until_not_full()
606 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_poll_until_not_full()
607 arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags); in arm_smmu_cmdq_poll_until_not_full()
608 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
614 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
626 * Must be called with the cmdq lock held in some capacity.
633 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in __arm_smmu_cmdq_poll_until_msi() local
634 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); in __arm_smmu_cmdq_poll_until_msi()
650 * Must be called with the cmdq lock held in some capacity.
656 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in __arm_smmu_cmdq_poll_until_consumed() local
661 llq->val = READ_ONCE(cmdq->q.llq.val); in __arm_smmu_cmdq_poll_until_consumed()
676 * cmdq->q.llq.cons. Roughly speaking: in __arm_smmu_cmdq_poll_until_consumed()
696 llq->cons = readl(cmdq->q.cons_reg); in __arm_smmu_cmdq_poll_until_consumed()
711 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, in arm_smmu_cmdq_write_entries() argument
716 .max_n_shift = cmdq->q.llq.max_n_shift, in arm_smmu_cmdq_write_entries()
724 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_write_entries()
751 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu); in arm_smmu_cmdq_issue_cmdlist() local
755 llq.max_n_shift = cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_issue_cmdlist()
759 llq.val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_issue_cmdlist()
766 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); in arm_smmu_cmdq_issue_cmdlist()
774 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); in arm_smmu_cmdq_issue_cmdlist()
788 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); in arm_smmu_cmdq_issue_cmdlist()
791 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod); in arm_smmu_cmdq_issue_cmdlist()
792 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_issue_cmdlist()
797 * We achieve that by taking the cmdq lock as shared before in arm_smmu_cmdq_issue_cmdlist()
800 arm_smmu_cmdq_shared_lock(cmdq); in arm_smmu_cmdq_issue_cmdlist()
805 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); in arm_smmu_cmdq_issue_cmdlist()
810 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); in arm_smmu_cmdq_issue_cmdlist()
814 &cmdq->q.llq.atomic.prod); in arm_smmu_cmdq_issue_cmdlist()
822 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); in arm_smmu_cmdq_issue_cmdlist()
828 writel_relaxed(prod, cmdq->q.prod_reg); in arm_smmu_cmdq_issue_cmdlist()
835 atomic_set_release(&cmdq->owner_prod, prod); in arm_smmu_cmdq_issue_cmdlist()
846 readl_relaxed(cmdq->q.prod_reg), in arm_smmu_cmdq_issue_cmdlist()
847 readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_issue_cmdlist()
851 * Try to unlock the cmdq lock. This will fail if we're the last in arm_smmu_cmdq_issue_cmdlist()
852 * reader, in which case we can safely update cmdq->q.llq.cons in arm_smmu_cmdq_issue_cmdlist()
854 if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { in arm_smmu_cmdq_issue_cmdlist()
855 WRITE_ONCE(cmdq->q.llq.cons, llq.cons); in arm_smmu_cmdq_issue_cmdlist()
856 arm_smmu_cmdq_shared_unlock(cmdq); in arm_smmu_cmdq_issue_cmdlist()
871 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in __arm_smmu_cmdq_issue_cmd()
910 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in arm_smmu_cmdq_batch_add()
1693 dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); in arm_smmu_gerror_handler()
2935 struct arm_smmu_cmdq *cmdq = &smmu->cmdq; in arm_smmu_cmdq_init() local
2936 unsigned int nents = 1 << cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_init()
2938 atomic_set(&cmdq->owner_prod, 0); in arm_smmu_cmdq_init()
2939 atomic_set(&cmdq->lock, 0); in arm_smmu_cmdq_init()
2941 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents, in arm_smmu_cmdq_init()
2943 if (!cmdq->valid_map) in arm_smmu_cmdq_init()
2953 /* cmdq */ in arm_smmu_init_queues()
2954 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base, in arm_smmu_init_queues()
2956 CMDQ_ENT_DWORDS, "cmdq"); in arm_smmu_init_queues()
3184 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */ in arm_smmu_setup_msis()
3339 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); in arm_smmu_device_reset()
3340 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); in arm_smmu_device_reset()
3341 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); in arm_smmu_device_reset()
3581 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
3583 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { in arm_smmu_device_hw_probe()