mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
dma-mapping: add (back) arch_dma_mark_clean for ia64
Add back a hook to optimize dcache flushing after reading executable code using DMA. This gets ia64 out of the business of pretending to be dma incoherent just for this optimization. Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
ef1a85b6ca
commit
abdaf11ac1
7 changed files with 23 additions and 17 deletions
|
@ -8,6 +8,7 @@ menu "Processor type and features"
|
||||||
|
|
||||||
config IA64
|
config IA64
|
||||||
bool
|
bool
|
||||||
|
select ARCH_HAS_DMA_MARK_CLEAN
|
||||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||||
select ACPI
|
select ACPI
|
||||||
|
@ -32,8 +33,6 @@ config IA64
|
||||||
select TTY
|
select TTY
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_VIRT_CPU_ACCOUNTING
|
select HAVE_VIRT_CPU_ACCOUNTING
|
||||||
select DMA_NONCOHERENT_MMAP
|
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
|
||||||
select VIRT_TO_BUS
|
select VIRT_TO_BUS
|
||||||
select GENERIC_IRQ_PROBE
|
select GENERIC_IRQ_PROBE
|
||||||
select GENERIC_PENDING_IRQ if SMP
|
select GENERIC_PENDING_IRQ if SMP
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
#include <linux/dma-direct.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
|
||||||
/* Set this to 1 if there is a HW IOMMU in the system */
|
/* Set this to 1 if there is a HW IOMMU in the system */
|
||||||
|
@ -7,15 +7,3 @@ int iommu_detected __read_mostly;
|
||||||
|
|
||||||
const struct dma_map_ops *dma_ops;
|
const struct dma_map_ops *dma_ops;
|
||||||
EXPORT_SYMBOL(dma_ops);
|
EXPORT_SYMBOL(dma_ops);
|
||||||
|
|
||||||
void *arch_dma_alloc(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
|
||||||
{
|
|
||||||
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|
||||||
dma_addr_t dma_addr, unsigned long attrs)
|
|
||||||
{
|
|
||||||
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
|
|
||||||
}
|
|
||||||
|
|
|
@ -73,8 +73,7 @@ __ia64_sync_icache_dcache (pte_t pte)
|
||||||
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
|
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
|
||||||
* flush them when they get mapped into an executable vm-area.
|
* flush them when they get mapped into an executable vm-area.
|
||||||
*/
|
*/
|
||||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
|
||||||
enum dma_data_direction dir)
|
|
||||||
{
|
{
|
||||||
unsigned long pfn = PHYS_PFN(paddr);
|
unsigned long pfn = PHYS_PFN(paddr);
|
||||||
|
|
||||||
|
|
|
@ -150,6 +150,9 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||||
|
|
||||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
|
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
|
||||||
|
|
||||||
|
if (dir == DMA_FROM_DEVICE)
|
||||||
|
arch_dma_mark_clean(paddr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
||||||
|
|
|
@ -108,6 +108,14 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
|
#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
|
||||||
|
void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
|
||||||
|
#else
|
||||||
|
static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif /* ARCH_HAS_DMA_MARK_CLEAN */
|
||||||
|
|
||||||
void *arch_dma_set_uncached(void *addr, size_t size);
|
void *arch_dma_set_uncached(void *addr, size_t size);
|
||||||
void arch_dma_clear_uncached(void *addr, size_t size);
|
void arch_dma_clear_uncached(void *addr, size_t size);
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,12 @@ config ARCH_HAS_DMA_SET_MASK
|
||||||
config ARCH_HAS_DMA_WRITE_COMBINE
|
config ARCH_HAS_DMA_WRITE_COMBINE
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
#
|
||||||
|
# Select if the architectures provides the arch_dma_mark_clean hook
|
||||||
|
#
|
||||||
|
config ARCH_HAS_DMA_MARK_CLEAN
|
||||||
|
bool
|
||||||
|
|
||||||
config DMA_DECLARE_COHERENT
|
config DMA_DECLARE_COHERENT
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
|
|
@ -345,6 +345,9 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||||
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
|
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
|
||||||
SYNC_FOR_CPU);
|
SYNC_FOR_CPU);
|
||||||
|
|
||||||
|
if (dir == DMA_FROM_DEVICE)
|
||||||
|
arch_dma_mark_clean(paddr, sg->length);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dev_is_dma_coherent(dev))
|
if (!dev_is_dma_coherent(dev))
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue