Lines Matching refs:queue

227 static int sec_queue_map_io(struct sec_queue *queue)  in sec_queue_map_io()  argument
229 struct device *dev = queue->dev_info->dev; in sec_queue_map_io()
234 2 + queue->queue_id); in sec_queue_map_io()
237 queue->queue_id); in sec_queue_map_io()
240 queue->regs = ioremap(res->start, resource_size(res)); in sec_queue_map_io()
241 if (!queue->regs) in sec_queue_map_io()
247 static void sec_queue_unmap_io(struct sec_queue *queue) in sec_queue_unmap_io() argument
249 iounmap(queue->regs); in sec_queue_unmap_io()
252 static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg) in sec_queue_ar_pkgattr() argument
254 void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG; in sec_queue_ar_pkgattr()
267 static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg) in sec_queue_aw_pkgattr() argument
269 void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG; in sec_queue_aw_pkgattr()
519 static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc) in sec_queue_ar_alloc() argument
521 void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG; in sec_queue_ar_alloc()
536 static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc) in sec_queue_aw_alloc() argument
538 void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG; in sec_queue_aw_alloc()
553 static void sec_queue_reorder(struct sec_queue *queue, bool reorder) in sec_queue_reorder() argument
555 void __iomem *base = queue->regs; in sec_queue_reorder()
566 static void sec_queue_depth(struct sec_queue *queue, u32 depth) in sec_queue_depth() argument
568 void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG; in sec_queue_depth()
578 static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr) in sec_queue_cmdbase_addr() argument
580 writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG); in sec_queue_cmdbase_addr()
581 writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG); in sec_queue_cmdbase_addr()
584 static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr) in sec_queue_outorder_addr() argument
587 queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG); in sec_queue_outorder_addr()
589 queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG); in sec_queue_outorder_addr()
592 static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr) in sec_queue_errbase_addr() argument
595 queue->regs + SEC_Q_ERR_BASE_HADDR_REG); in sec_queue_errbase_addr()
597 queue->regs + SEC_Q_ERR_BASE_LADDR_REG); in sec_queue_errbase_addr()
600 static void sec_queue_irq_disable(struct sec_queue *queue) in sec_queue_irq_disable() argument
602 writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG); in sec_queue_irq_disable()
605 static void sec_queue_irq_enable(struct sec_queue *queue) in sec_queue_irq_enable() argument
607 writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG); in sec_queue_irq_enable()
610 static void sec_queue_abn_irq_disable(struct sec_queue *queue) in sec_queue_abn_irq_disable() argument
612 writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG); in sec_queue_abn_irq_disable()
615 static void sec_queue_stop(struct sec_queue *queue) in sec_queue_stop() argument
617 disable_irq(queue->task_irq); in sec_queue_stop()
618 sec_queue_irq_disable(queue); in sec_queue_stop()
619 writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG); in sec_queue_stop()
622 static void sec_queue_start(struct sec_queue *queue) in sec_queue_start() argument
624 sec_queue_irq_enable(queue); in sec_queue_start()
625 enable_irq(queue->task_irq); in sec_queue_start()
626 queue->expected = 0; in sec_queue_start()
627 writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG); in sec_queue_start()
628 writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG); in sec_queue_start()
651 static int sec_queue_free(struct sec_queue *queue) in sec_queue_free() argument
653 struct sec_dev_info *info = queue->dev_info; in sec_queue_free()
655 if (queue->queue_id >= SEC_Q_NUM) { in sec_queue_free()
656 dev_err(info->dev, "No queue %u\n", queue->queue_id); in sec_queue_free()
660 if (!queue->in_use) { in sec_queue_free()
661 dev_err(info->dev, "Queue %u is idle\n", queue->queue_id); in sec_queue_free()
666 queue->in_use = false; in sec_queue_free()
681 struct sec_queue *queue = q; in sec_isr_handle() local
682 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd; in sec_isr_handle()
683 struct sec_queue_ring_cq *cq_ring = &queue->ring_cq; in sec_isr_handle()
687 void __iomem *base = queue->regs; in sec_isr_handle()
701 set_bit(q_id, queue->unprocessed); in sec_isr_handle()
702 if (q_id == queue->expected) in sec_isr_handle()
703 while (test_bit(queue->expected, queue->unprocessed)) { in sec_isr_handle()
704 clear_bit(queue->expected, queue->unprocessed); in sec_isr_handle()
705 msg = msg_ring->vaddr + queue->expected; in sec_isr_handle()
708 queue->shadow[queue->expected]); in sec_isr_handle()
709 queue->shadow[queue->expected] = NULL; in sec_isr_handle()
710 queue->expected = (queue->expected + 1) % in sec_isr_handle()
723 sec_queue_irq_enable(queue); in sec_isr_handle()
728 static int sec_queue_irq_init(struct sec_queue *queue) in sec_queue_irq_init() argument
730 struct sec_dev_info *info = queue->dev_info; in sec_queue_irq_init()
731 int irq = queue->task_irq; in sec_queue_irq_init()
735 IRQF_TRIGGER_RISING, queue->name, queue); in sec_queue_irq_init()
745 static int sec_queue_irq_uninit(struct sec_queue *queue) in sec_queue_irq_uninit() argument
747 free_irq(queue->task_irq, queue); in sec_queue_irq_uninit()
774 struct sec_queue *queue; in sec_queue_alloc_start() local
776 queue = sec_alloc_queue(info); in sec_queue_alloc_start()
777 if (IS_ERR(queue)) { in sec_queue_alloc_start()
779 PTR_ERR(queue)); in sec_queue_alloc_start()
780 return queue; in sec_queue_alloc_start()
783 sec_queue_start(queue); in sec_queue_alloc_start()
785 return queue; in sec_queue_alloc_start()
799 struct sec_queue *queue = ERR_PTR(-ENODEV); in sec_queue_alloc_start_safe() local
806 queue = sec_queue_alloc_start(info); in sec_queue_alloc_start_safe()
811 return queue; in sec_queue_alloc_start_safe()
821 int sec_queue_stop_release(struct sec_queue *queue) in sec_queue_stop_release() argument
823 struct device *dev = queue->dev_info->dev; in sec_queue_stop_release()
826 sec_queue_stop(queue); in sec_queue_stop_release()
828 ret = sec_queue_free(queue); in sec_queue_stop_release()
843 bool sec_queue_empty(struct sec_queue *queue) in sec_queue_empty() argument
845 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd; in sec_queue_empty()
858 int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx) in sec_queue_send() argument
860 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd; in sec_queue_send()
861 void __iomem *base = queue->regs; in sec_queue_send()
872 queue->shadow[write] = ctx; in sec_queue_send()
885 bool sec_queue_can_enqueue(struct sec_queue *queue, int num) in sec_queue_can_enqueue() argument
887 struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd; in sec_queue_can_enqueue()
892 static void sec_queue_hw_init(struct sec_queue *queue) in sec_queue_hw_init() argument
894 sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC); in sec_queue_hw_init()
895 sec_queue_aw_alloc(queue, SEC_QUEUE_AW_FROCE_NOALLOC); in sec_queue_hw_init()
896 sec_queue_ar_pkgattr(queue, 1); in sec_queue_hw_init()
897 sec_queue_aw_pkgattr(queue, 1); in sec_queue_hw_init()
900 sec_queue_reorder(queue, true); in sec_queue_hw_init()
903 writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG); in sec_queue_hw_init()
905 sec_queue_depth(queue, SEC_QUEUE_LEN - 1); in sec_queue_hw_init()
907 sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr); in sec_queue_hw_init()
909 sec_queue_outorder_addr(queue, queue->ring_cq.paddr); in sec_queue_hw_init()
911 sec_queue_errbase_addr(queue, queue->ring_db.paddr); in sec_queue_hw_init()
913 writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG); in sec_queue_hw_init()
915 sec_queue_abn_irq_disable(queue); in sec_queue_hw_init()
916 sec_queue_irq_disable(queue); in sec_queue_hw_init()
917 writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG); in sec_queue_hw_init()
1002 struct sec_queue *queue, int queue_id) in sec_queue_base_init() argument
1004 queue->dev_info = info; in sec_queue_base_init()
1005 queue->queue_id = queue_id; in sec_queue_base_init()
1006 snprintf(queue->name, sizeof(queue->name), in sec_queue_base_init()
1007 "%s_%d", dev_name(info->dev), queue->queue_id); in sec_queue_base_init()
1078 static int sec_queue_res_cfg(struct sec_queue *queue) in sec_queue_res_cfg() argument
1080 struct device *dev = queue->dev_info->dev; in sec_queue_res_cfg()
1081 struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd; in sec_queue_res_cfg()
1082 struct sec_queue_ring_cq *ring_cq = &queue->ring_cq; in sec_queue_res_cfg()
1083 struct sec_queue_ring_db *ring_db = &queue->ring_db; in sec_queue_res_cfg()
1108 queue->task_irq = platform_get_irq(to_platform_device(dev), in sec_queue_res_cfg()
1109 queue->queue_id * 2 + 1); in sec_queue_res_cfg()
1110 if (queue->task_irq < 0) { in sec_queue_res_cfg()
1111 ret = queue->task_irq; in sec_queue_res_cfg()
1118 dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr, in sec_queue_res_cfg()
1119 queue->ring_db.paddr); in sec_queue_res_cfg()
1121 dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr, in sec_queue_res_cfg()
1122 queue->ring_cq.paddr); in sec_queue_res_cfg()
1124 dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr, in sec_queue_res_cfg()
1125 queue->ring_cmd.paddr); in sec_queue_res_cfg()
1130 static void sec_queue_free_ring_pages(struct sec_queue *queue) in sec_queue_free_ring_pages() argument
1132 struct device *dev = queue->dev_info->dev; in sec_queue_free_ring_pages()
1134 dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr, in sec_queue_free_ring_pages()
1135 queue->ring_db.paddr); in sec_queue_free_ring_pages()
1136 dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr, in sec_queue_free_ring_pages()
1137 queue->ring_cq.paddr); in sec_queue_free_ring_pages()
1138 dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr, in sec_queue_free_ring_pages()
1139 queue->ring_cmd.paddr); in sec_queue_free_ring_pages()
1142 static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue, in sec_queue_config() argument
1147 sec_queue_base_init(info, queue, queue_id); in sec_queue_config()
1149 ret = sec_queue_res_cfg(queue); in sec_queue_config()
1153 ret = sec_queue_map_io(queue); in sec_queue_config()
1156 sec_queue_free_ring_pages(queue); in sec_queue_config()
1160 sec_queue_hw_init(queue); in sec_queue_config()
1166 struct sec_queue *queue) in sec_queue_unconfig() argument
1168 sec_queue_unmap_io(queue); in sec_queue_unconfig()
1169 sec_queue_free_ring_pages(queue); in sec_queue_unconfig()