|
76 | 76 |
|
77 | 77 | #define IOMMU_REG_POLL_COUNT_FAST 1000 |
78 | 78 |
|
| 79 | +#define IOMMU_INV_TLB_ENTIRE BIT(4) /* invalidate tlb entire */ |
| 80 | + |
79 | 81 | static LIST_HEAD(iommu_dev_list); |
80 | 82 |
|
81 | 83 | struct rk_iommu_domain { |
@@ -756,7 +758,8 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, |
756 | 758 | * We only zap the first and last iova, since only they could have |
757 | 759 | * dte or pte shared with an existing mapping. |
758 | 760 | */ |
759 | | - rk_iommu_zap_iova_first_last(rk_domain, iova, size); |
| 761 | + if (!(prot & IOMMU_INV_TLB_ENTIRE)) |
| 762 | + rk_iommu_zap_iova_first_last(rk_domain, iova, size); |
760 | 763 |
|
761 | 764 | return 0; |
762 | 765 | unwind: |
@@ -847,6 +850,72 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, |
847 | 850 | return unmap_size; |
848 | 851 | } |
849 | 852 |
|
| 853 | +static void rk_iommu_zap_tlb(struct iommu_domain *domain) |
| 854 | +{ |
| 855 | + struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
| 856 | + struct list_head *pos; |
| 857 | + int i; |
| 858 | + |
| 859 | + mutex_lock(&rk_domain->iommus_lock); |
| 860 | + list_for_each(pos, &rk_domain->iommus) { |
| 861 | + struct rk_iommu *iommu; |
| 862 | + |
| 863 | + iommu = list_entry(pos, struct rk_iommu, node); |
| 864 | + rk_iommu_power_on(iommu); |
| 865 | + for (i = 0; i < iommu->num_mmu; i++) { |
| 866 | + rk_iommu_write(iommu->bases[i], |
| 867 | + RK_MMU_COMMAND, |
| 868 | + RK_MMU_CMD_ZAP_CACHE); |
| 869 | + } |
| 870 | + rk_iommu_power_off(iommu); |
| 871 | + } |
| 872 | + mutex_unlock(&rk_domain->iommus_lock); |
| 873 | +} |
| 874 | + |
| 875 | +static size_t rk_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, |
| 876 | + struct scatterlist *sg, unsigned int nents, int prot) |
| 877 | +{ |
| 878 | + struct scatterlist *s; |
| 879 | + size_t mapped = 0; |
| 880 | + unsigned int i, min_pagesz; |
| 881 | + int ret; |
| 882 | + |
| 883 | + if (unlikely(domain->ops->pgsize_bitmap == 0UL)) |
| 884 | + return 0; |
| 885 | + |
| 886 | + min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); |
| 887 | + |
| 888 | + for_each_sg(sg, s, nents, i) { |
| 889 | + phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; |
| 890 | + |
| 891 | + /* |
| 892 | + * We are mapping on IOMMU page boundaries, so offset within |
| 893 | + * the page must be 0. However, the IOMMU may support pages |
| 894 | + * smaller than PAGE_SIZE, so s->offset may still represent |
| 895 | + * an offset of that boundary within the CPU page. |
| 896 | + */ |
| 897 | + if (!IS_ALIGNED(s->offset, min_pagesz)) |
| 898 | + goto out_err; |
| 899 | + |
| 900 | + ret = iommu_map(domain, iova + mapped, phys, s->length, |
| 901 | + prot | IOMMU_INV_TLB_ENTIRE); |
| 902 | + if (ret) |
| 903 | + goto out_err; |
| 904 | + |
| 905 | + mapped += s->length; |
| 906 | + } |
| 907 | + |
| 908 | + rk_iommu_zap_tlb(domain); |
| 909 | + |
| 910 | + return mapped; |
| 911 | + |
| 912 | +out_err: |
| 913 | + /* undo mappings already done */ |
| 914 | + iommu_unmap(domain, iova, mapped); |
| 915 | + |
| 916 | + return 0; |
| 917 | +} |
| 918 | + |
850 | 919 | static struct rk_iommu *rk_iommu_from_dev(struct device *dev) |
851 | 920 | { |
852 | 921 | struct iommu_group *group; |
@@ -1165,7 +1234,7 @@ static const struct iommu_ops rk_iommu_ops = { |
1165 | 1234 | .detach_dev = rk_iommu_detach_device, |
1166 | 1235 | .map = rk_iommu_map, |
1167 | 1236 | .unmap = rk_iommu_unmap, |
1168 | | - .map_sg = default_iommu_map_sg, |
| 1237 | + .map_sg = rk_iommu_map_sg, |
1169 | 1238 | .add_device = rk_iommu_add_device, |
1170 | 1239 | .remove_device = rk_iommu_remove_device, |
1171 | 1240 | .iova_to_phys = rk_iommu_iova_to_phys, |
|
0 commit comments