Skip to content

Commit b8bca70

Browse files
Simon Xuerkhuangtao
authored andcommitted
iommu/rockchip: support rk_iommu_map_sg for iommu ops
Impletement rk_iommu_map_sg for rk_iommu_ops, which only flush TLB once after each sg been mapped, that speed up the map operation. Change-Id: Ief123ad363018d2b3227066c07338ccbd75c9d84 Signed-off-by: Simon Xue <xxm@rock-chips.com>
1 parent c9b30e4 commit b8bca70

1 file changed

Lines changed: 71 additions & 2 deletions

File tree

drivers/iommu/rockchip-iommu.c

Lines changed: 71 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,8 @@
7676

7777
#define IOMMU_REG_POLL_COUNT_FAST 1000
7878

79+
#define IOMMU_INV_TLB_ENTIRE BIT(4) /* invalidate tlb entire */
80+
7981
static LIST_HEAD(iommu_dev_list);
8082

8183
struct rk_iommu_domain {
@@ -756,7 +758,8 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
756758
* We only zap the first and last iova, since only they could have
757759
* dte or pte shared with an existing mapping.
758760
*/
759-
rk_iommu_zap_iova_first_last(rk_domain, iova, size);
761+
if (!(prot & IOMMU_INV_TLB_ENTIRE))
762+
rk_iommu_zap_iova_first_last(rk_domain, iova, size);
760763

761764
return 0;
762765
unwind:
@@ -847,6 +850,72 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
847850
return unmap_size;
848851
}
849852

853+
static void rk_iommu_zap_tlb(struct iommu_domain *domain)
854+
{
855+
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
856+
struct list_head *pos;
857+
int i;
858+
859+
mutex_lock(&rk_domain->iommus_lock);
860+
list_for_each(pos, &rk_domain->iommus) {
861+
struct rk_iommu *iommu;
862+
863+
iommu = list_entry(pos, struct rk_iommu, node);
864+
rk_iommu_power_on(iommu);
865+
for (i = 0; i < iommu->num_mmu; i++) {
866+
rk_iommu_write(iommu->bases[i],
867+
RK_MMU_COMMAND,
868+
RK_MMU_CMD_ZAP_CACHE);
869+
}
870+
rk_iommu_power_off(iommu);
871+
}
872+
mutex_unlock(&rk_domain->iommus_lock);
873+
}
874+
875+
static size_t rk_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
876+
struct scatterlist *sg, unsigned int nents, int prot)
877+
{
878+
struct scatterlist *s;
879+
size_t mapped = 0;
880+
unsigned int i, min_pagesz;
881+
int ret;
882+
883+
if (unlikely(domain->ops->pgsize_bitmap == 0UL))
884+
return 0;
885+
886+
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
887+
888+
for_each_sg(sg, s, nents, i) {
889+
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
890+
891+
/*
892+
* We are mapping on IOMMU page boundaries, so offset within
893+
* the page must be 0. However, the IOMMU may support pages
894+
* smaller than PAGE_SIZE, so s->offset may still represent
895+
* an offset of that boundary within the CPU page.
896+
*/
897+
if (!IS_ALIGNED(s->offset, min_pagesz))
898+
goto out_err;
899+
900+
ret = iommu_map(domain, iova + mapped, phys, s->length,
901+
prot | IOMMU_INV_TLB_ENTIRE);
902+
if (ret)
903+
goto out_err;
904+
905+
mapped += s->length;
906+
}
907+
908+
rk_iommu_zap_tlb(domain);
909+
910+
return mapped;
911+
912+
out_err:
913+
/* undo mappings already done */
914+
iommu_unmap(domain, iova, mapped);
915+
916+
return 0;
917+
}
918+
850919
static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
851920
{
852921
struct iommu_group *group;
@@ -1165,7 +1234,7 @@ static const struct iommu_ops rk_iommu_ops = {
11651234
.detach_dev = rk_iommu_detach_device,
11661235
.map = rk_iommu_map,
11671236
.unmap = rk_iommu_unmap,
1168-
.map_sg = default_iommu_map_sg,
1237+
.map_sg = rk_iommu_map_sg,
11691238
.add_device = rk_iommu_add_device,
11701239
.remove_device = rk_iommu_remove_device,
11711240
.iova_to_phys = rk_iommu_iova_to_phys,

0 commit comments

Comments
 (0)