@@ -40,51 +40,16 @@ extern struct rga2_mmu_buf_t rga2_mmu_buf;
4040
4141void rga2_dma_flush_range (void * pstart , void * pend )
4242{
43- #ifdef CONFIG_ARM
44- dmac_flush_range (pstart , pend );
45- outer_flush_range (virt_to_phys (pstart ), virt_to_phys (pend ));
46- #elif defined(CONFIG_ARM64 )
47- #if LINUX_VERSION_CODE >= KERNEL_VERSION (4 , 19 , 0 )
48- __dma_flush_area (pstart , pend - pstart );
49- #else
50- __dma_flush_range (pstart , pend );
51- #endif
52- #endif
43+ dma_sync_single_for_device (rga2_drvdata -> dev , virt_to_phys (pstart ), pend - pstart , DMA_TO_DEVICE );
5344}
5445
5546static void rga2_dma_flush_page (struct page * page )
5647{
5748 phys_addr_t paddr ;
58- void * virt ;
5949
6050 paddr = page_to_phys (page );
61- #ifdef CONFIG_ARM
62- if (PageHighMem (page )) {
63- #ifdef CONFIG_HIGHMEM
64- if (cache_is_vipt_nonaliasing ()) {
65- virt = kmap_atomic (page );
66- dmac_flush_range (virt , virt + PAGE_SIZE );
67- kunmap_atomic (virt );
68- } else {
69- virt = kmap_high_get (page );
70- dmac_flush_range (virt , virt + PAGE_SIZE );
71- kunmap_high (page );
72- }
73- #endif
74- } else {
75- virt = page_address (page );
76- dmac_flush_range (virt , virt + PAGE_SIZE );
77- }
7851
79- outer_flush_range (paddr , paddr + PAGE_SIZE );
80- #elif defined(CONFIG_ARM64 )
81- virt = page_address (page );
82- #if LINUX_VERSION_CODE >= KERNEL_VERSION (4 , 19 , 0 )
83- __dma_flush_area (virt , PAGE_SIZE );
84- #else
85- __dma_flush_range (virt , virt + PAGE_SIZE );
86- #endif
87- #endif
52+ dma_sync_single_for_device (rga2_drvdata -> dev , paddr , PAGE_SIZE , DMA_TO_DEVICE );
8853}
8954
9055#if 0
@@ -620,6 +585,7 @@ static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)
620585 (rga2_mmu_buf .front & (rga2_mmu_buf .size - 1 ));
621586 MMU_Base_phys = rga2_mmu_buf .buf +
622587 (rga2_mmu_buf .front & (rga2_mmu_buf .size - 1 ));
588+
623589 mutex_unlock (& rga2_service .lock );
624590 if (Src0MemSize ) {
625591 if (req -> sg_src0 ) {
0 commit comments