Lines Matching refs:rk_domain

652 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);  in rk_iommu_iova_to_phys()  local
658 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_iova_to_phys()
660 dte = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_iova_to_phys()
672 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_iova_to_phys()
677 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, in rk_iommu_zap_iova() argument
684 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_zap_iova()
685 list_for_each(pos, &rk_domain->iommus) { in rk_iommu_zap_iova()
703 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_zap_iova()
706 static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, in rk_iommu_zap_iova_first_last() argument
709 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); in rk_iommu_zap_iova_first_last()
711 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE, in rk_iommu_zap_iova_first_last()
715 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, in rk_dte_get_page_table() argument
723 assert_spin_locked(&rk_domain->dt_lock); in rk_dte_get_page_table()
726 dte_addr = &rk_domain->dt[dte_index]; in rk_dte_get_page_table()
745 rk_table_flush(rk_domain, in rk_dte_get_page_table()
746 rk_domain->dt_dma + dte_index * sizeof(u32), 1); in rk_dte_get_page_table()
752 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, in rk_iommu_unmap_iova() argument
759 assert_spin_locked(&rk_domain->dt_lock); in rk_iommu_unmap_iova()
769 rk_table_flush(rk_domain, pte_dma, pte_count); in rk_iommu_unmap_iova()
774 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, in rk_iommu_map_iova() argument
782 assert_spin_locked(&rk_domain->dt_lock); in rk_iommu_map_iova()
795 rk_table_flush(rk_domain, pte_dma, pte_total); in rk_iommu_map_iova()
803 rk_iommu_zap_iova_first_last(rk_domain, iova, size); in rk_iommu_map_iova()
808 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, in rk_iommu_map_iova()
822 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_map() local
829 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_map()
838 page_table = rk_dte_get_page_table(rk_domain, iova); in rk_iommu_map()
840 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_map()
844 dte_index = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_map()
849 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, in rk_iommu_map()
852 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_map()
860 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_unmap() local
868 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
877 dte = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_unmap()
880 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
887 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); in rk_iommu_unmap()
889 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
892 rk_iommu_zap_iova(rk_domain, iova, unmap_size); in rk_iommu_unmap()
925 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_enable() local
942 rk_ops->mk_dtentries(rk_domain->dt_dma)); in rk_iommu_enable()
960 struct rk_iommu_domain *rk_domain; in rk_iommu_identity_attach() local
969 rk_domain = to_rk_domain(iommu->domain); in rk_iommu_identity_attach()
978 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_identity_attach()
980 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_identity_attach()
1017 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_attach_device() local
1041 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_attach_device()
1042 list_add_tail(&iommu->node, &rk_domain->iommus); in rk_iommu_attach_device()
1043 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_attach_device()
1060 struct rk_iommu_domain *rk_domain; in rk_iommu_domain_alloc() local
1071 rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); in rk_iommu_domain_alloc()
1072 if (!rk_domain) in rk_iommu_domain_alloc()
1080 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags); in rk_iommu_domain_alloc()
1081 if (!rk_domain->dt) in rk_iommu_domain_alloc()
1084 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt, in rk_iommu_domain_alloc()
1086 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) { in rk_iommu_domain_alloc()
1091 spin_lock_init(&rk_domain->iommus_lock); in rk_iommu_domain_alloc()
1092 spin_lock_init(&rk_domain->dt_lock); in rk_iommu_domain_alloc()
1093 INIT_LIST_HEAD(&rk_domain->iommus); in rk_iommu_domain_alloc()
1095 rk_domain->domain.geometry.aperture_start = 0; in rk_iommu_domain_alloc()
1096 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); in rk_iommu_domain_alloc()
1097 rk_domain->domain.geometry.force_aperture = true; in rk_iommu_domain_alloc()
1099 return &rk_domain->domain; in rk_iommu_domain_alloc()
1102 free_page((unsigned long)rk_domain->dt); in rk_iommu_domain_alloc()
1104 kfree(rk_domain); in rk_iommu_domain_alloc()
1111 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_domain_free() local
1114 WARN_ON(!list_empty(&rk_domain->iommus)); in rk_iommu_domain_free()
1117 u32 dte = rk_domain->dt[i]; in rk_iommu_domain_free()
1127 dma_unmap_single(dma_dev, rk_domain->dt_dma, in rk_iommu_domain_free()
1129 free_page((unsigned long)rk_domain->dt); in rk_iommu_domain_free()
1131 kfree(rk_domain); in rk_iommu_domain_free()