Skip to content

Commit d8f24cf

Browse files
yueshuasus-leslieyu
authored andcommitted
iommu/rockchip: Add pd/clk operation in iommu
Rk iommus share pd and clk with their masters, to make iommus independent, iommus need to manage pd and clk by using pm_runtime_get_sync API who is not atomic save, might lead to sleep, we change the spin lock to mutex to satisfy the pm_runtime_get_sync, callers of rk_iommu_attach_device and rk_iommu_map should guarantee not in a atomic path. Change-Id: Icbe175030d36572e19740d23eae94f49fe59eb10 Signed-off-by: Simon <xxm@rock-chips.com> Signed-off-by: Nickey Yang <nickey.yang@rock-chips.com>
1 parent 16ce881 commit d8f24cf

1 file changed

Lines changed: 83 additions & 27 deletions

File tree

drivers/iommu/rockchip-iommu.c

Lines changed: 83 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
* published by the Free Software Foundation.
55
*/
66

7+
#include <linux/clk.h>
78
#include <linux/compiler.h>
89
#include <linux/delay.h>
910
#include <linux/device.h>
@@ -19,6 +20,7 @@
1920
#include <linux/of.h>
2021
#include <linux/of_platform.h>
2122
#include <linux/platform_device.h>
23+
#include <linux/pm_runtime.h>
2224
#include <linux/slab.h>
2325
#include <linux/spinlock.h>
2426

@@ -79,8 +81,8 @@ struct rk_iommu_domain {
7981
struct platform_device *pdev;
8082
u32 *dt; /* page directory table */
8183
dma_addr_t dt_dma;
82-
spinlock_t iommus_lock; /* lock for iommus list */
83-
spinlock_t dt_lock; /* lock for modifying page directory table */
84+
struct mutex iommus_lock; /* lock for iommus list */
85+
struct mutex dt_lock; /* lock for modifying page directory table */
8486

8587
struct iommu_domain domain;
8688
};
@@ -90,8 +92,11 @@ struct rk_iommu {
9092
void __iomem **bases;
9193
int num_mmu;
9294
int irq;
95+
bool reset_disabled; /* isp iommu reset operation would failed */
9396
struct list_head node; /* entry in rk_iommu_domain.iommus */
9497
struct iommu_domain *domain; /* domain to which iommu is attached */
98+
struct clk *aclk; /* aclock belong to master */
99+
struct clk *hclk; /* hclock belong to master */
95100
};
96101

97102
static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
@@ -255,6 +260,26 @@ static u32 rk_mk_pte_invalid(u32 pte)
255260
#define RK_IOVA_PAGE_MASK 0x00000fff
256261
#define RK_IOVA_PAGE_SHIFT 0
257262

263+
static void rk_iommu_power_on(struct rk_iommu *iommu)
264+
{
265+
if (iommu->aclk && iommu->hclk) {
266+
clk_enable(iommu->aclk);
267+
clk_enable(iommu->hclk);
268+
}
269+
270+
pm_runtime_get_sync(iommu->dev);
271+
}
272+
273+
static void rk_iommu_power_off(struct rk_iommu *iommu)
274+
{
275+
pm_runtime_put_sync(iommu->dev);
276+
277+
if (iommu->aclk && iommu->hclk) {
278+
clk_disable(iommu->aclk);
279+
clk_disable(iommu->hclk);
280+
}
281+
}
282+
258283
static u32 rk_iova_dte_index(dma_addr_t iova)
259284
{
260285
return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
@@ -301,12 +326,17 @@ static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
301326
* TODO(djkurtz): Figure out when it is more efficient to shootdown the
302327
* entire iotlb rather than iterate over individual iovas.
303328
*/
329+
330+
rk_iommu_power_on(iommu);
331+
304332
for (i = 0; i < iommu->num_mmu; i++) {
305333
dma_addr_t iova;
306334

307335
for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
308336
rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
309337
}
338+
339+
rk_iommu_power_off(iommu);
310340
}
311341

312342
static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
@@ -414,6 +444,10 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
414444
int ret, i;
415445
u32 dte_addr;
416446

447+
/* Workaround for isp mmus */
448+
if (iommu->reset_disabled)
449+
return 0;
450+
417451
/*
418452
* Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
419453
* and verifying that upper 5 nybbles are read back.
@@ -551,12 +585,11 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
551585
dma_addr_t iova)
552586
{
553587
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
554-
unsigned long flags;
555588
phys_addr_t pt_phys, phys = 0;
556589
u32 dte, pte;
557590
u32 *page_table;
558591

559-
spin_lock_irqsave(&rk_domain->dt_lock, flags);
592+
mutex_lock(&rk_domain->dt_lock);
560593

561594
dte = rk_domain->dt[rk_iova_dte_index(iova)];
562595
if (!rk_dte_is_pt_valid(dte))
@@ -570,7 +603,7 @@ static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
570603

571604
phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
572605
out:
573-
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
606+
mutex_unlock(&rk_domain->dt_lock);
574607

575608
return phys;
576609
}
@@ -579,16 +612,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
579612
dma_addr_t iova, size_t size)
580613
{
581614
struct list_head *pos;
582-
unsigned long flags;
583615

584616
/* shootdown these iova from all iommus using this domain */
585-
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
617+
mutex_lock(&rk_domain->iommus_lock);
586618
list_for_each(pos, &rk_domain->iommus) {
587619
struct rk_iommu *iommu;
588620
iommu = list_entry(pos, struct rk_iommu, node);
589621
rk_iommu_zap_lines(iommu, iova, size);
590622
}
591-
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
623+
mutex_unlock(&rk_domain->iommus_lock);
592624
}
593625

594626
static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
@@ -609,7 +641,7 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
609641
phys_addr_t pt_phys;
610642
dma_addr_t pt_dma;
611643

612-
assert_spin_locked(&rk_domain->dt_lock);
644+
WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
613645

614646
dte_index = rk_iova_dte_index(iova);
615647
dte_addr = &rk_domain->dt[dte_index];
@@ -646,7 +678,7 @@ static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
646678
unsigned int pte_count;
647679
unsigned int pte_total = size / SPAGE_SIZE;
648680

649-
assert_spin_locked(&rk_domain->dt_lock);
681+
WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
650682

651683
for (pte_count = 0; pte_count < pte_total; pte_count++) {
652684
u32 pte = pte_addr[pte_count];
@@ -669,7 +701,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
669701
unsigned int pte_total = size / SPAGE_SIZE;
670702
phys_addr_t page_phys;
671703

672-
assert_spin_locked(&rk_domain->dt_lock);
704+
WARN_ON(!mutex_is_locked(&rk_domain->dt_lock));
673705

674706
for (pte_count = 0; pte_count < pte_total; pte_count++) {
675707
u32 pte = pte_addr[pte_count];
@@ -710,13 +742,12 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
710742
phys_addr_t paddr, size_t size, int prot)
711743
{
712744
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
713-
unsigned long flags;
714745
dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
715746
u32 *page_table, *pte_addr;
716747
u32 dte_index, pte_index;
717748
int ret;
718749

719-
spin_lock_irqsave(&rk_domain->dt_lock, flags);
750+
mutex_lock(&rk_domain->dt_lock);
720751

721752
/*
722753
* pgsize_bitmap specifies iova sizes that fit in one page table
@@ -727,7 +758,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
727758
*/
728759
page_table = rk_dte_get_page_table(rk_domain, iova);
729760
if (IS_ERR(page_table)) {
730-
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
761+
mutex_unlock(&rk_domain->dt_lock);
731762
return PTR_ERR(page_table);
732763
}
733764

@@ -738,7 +769,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
738769
ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
739770
paddr, size, prot);
740771

741-
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
772+
mutex_unlock(&rk_domain->dt_lock);
742773

743774
return ret;
744775
}
@@ -747,14 +778,13 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
747778
size_t size)
748779
{
749780
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
750-
unsigned long flags;
751781
dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
752782
phys_addr_t pt_phys;
753783
u32 dte;
754784
u32 *pte_addr;
755785
size_t unmap_size;
756786

757-
spin_lock_irqsave(&rk_domain->dt_lock, flags);
787+
mutex_lock(&rk_domain->dt_lock);
758788

759789
/*
760790
* pgsize_bitmap specifies iova sizes that fit in one page table
@@ -766,7 +796,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
766796
dte = rk_domain->dt[rk_iova_dte_index(iova)];
767797
/* Just return 0 if iova is unmapped */
768798
if (!rk_dte_is_pt_valid(dte)) {
769-
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
799+
mutex_unlock(&rk_domain->dt_lock);
770800
return 0;
771801
}
772802

@@ -775,7 +805,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
775805
pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
776806
unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
777807

778-
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
808+
mutex_unlock(&rk_domain->dt_lock);
779809

780810
/* Shootdown iotlb entries for iova range that was just unmapped */
781811
rk_iommu_zap_iova(rk_domain, iova, unmap_size);
@@ -809,7 +839,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
809839
{
810840
struct rk_iommu *iommu;
811841
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
812-
unsigned long flags;
813842
int ret, i;
814843

815844
/*
@@ -820,6 +849,8 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
820849
if (!iommu)
821850
return 0;
822851

852+
rk_iommu_power_on(iommu);
853+
823854
ret = rk_iommu_enable_stall(iommu);
824855
if (ret)
825856
return ret;
@@ -846,9 +877,9 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
846877
if (ret)
847878
return ret;
848879

849-
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
880+
mutex_lock(&rk_domain->iommus_lock);
850881
list_add_tail(&iommu->node, &rk_domain->iommus);
851-
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
882+
mutex_unlock(&rk_domain->iommus_lock);
852883

853884
dev_dbg(dev, "Attached to iommu domain\n");
854885

@@ -862,17 +893,16 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
862893
{
863894
struct rk_iommu *iommu;
864895
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
865-
unsigned long flags;
866896
int i;
867897

868898
/* Allow 'virtual devices' (eg drm) to detach from domain */
869899
iommu = rk_iommu_from_dev(dev);
870900
if (!iommu)
871901
return;
872902

873-
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
903+
mutex_lock(&rk_domain->iommus_lock);
874904
list_del_init(&iommu->node);
875-
spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
905+
mutex_unlock(&rk_domain->iommus_lock);
876906

877907
/* Ignore error while disabling, just keep going */
878908
rk_iommu_enable_stall(iommu);
@@ -887,6 +917,8 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
887917

888918
iommu->domain = NULL;
889919

920+
rk_iommu_power_off(iommu);
921+
890922
dev_dbg(dev, "Detached from iommu domain\n");
891923
}
892924

@@ -936,8 +968,8 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
936968

937969
rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
938970

939-
spin_lock_init(&rk_domain->iommus_lock);
940-
spin_lock_init(&rk_domain->dt_lock);
971+
mutex_init(&rk_domain->iommus_lock);
972+
mutex_init(&rk_domain->dt_lock);
941973
INIT_LIST_HEAD(&rk_domain->iommus);
942974

943975
rk_domain->domain.geometry.aperture_start = 0;
@@ -1157,11 +1189,35 @@ static int rk_iommu_probe(struct platform_device *pdev)
11571189
return -ENXIO;
11581190
}
11591191

1192+
iommu->reset_disabled = device_property_read_bool(dev,
1193+
"rk_iommu,disable_reset_quirk");
1194+
1195+
iommu->aclk = devm_clk_get(dev, "aclk");
1196+
if (IS_ERR(iommu->aclk)) {
1197+
dev_info(dev, "can't get aclk\n");
1198+
iommu->aclk = NULL;
1199+
}
1200+
1201+
iommu->hclk = devm_clk_get(dev, "hclk");
1202+
if (IS_ERR(iommu->hclk)) {
1203+
dev_info(dev, "can't get hclk\n");
1204+
iommu->hclk = NULL;
1205+
}
1206+
1207+
if (iommu->aclk && iommu->hclk) {
1208+
clk_prepare(iommu->aclk);
1209+
clk_prepare(iommu->hclk);
1210+
}
1211+
1212+
pm_runtime_enable(dev);
1213+
11601214
return 0;
11611215
}
11621216

11631217
static int rk_iommu_remove(struct platform_device *pdev)
11641218
{
1219+
pm_runtime_put(&pdev->dev);
1220+
11651221
return 0;
11661222
}
11671223

0 commit comments

Comments
 (0)