dma-debug: don't enforce dma mapping check on noncoherent allocations
As discussed in [1], there is no need to enforce dma mapping check on
noncoherent allocations, a simple test on the returned CPU address is
good enough.
Add a new pair of debug helpers and use them for noncoherent alloc/free
to fix this issue.
Fixes: efa70f2fdc
("dma-mapping: add a new dma_alloc_pages API")
Link: https://lore.kernel.org/all/ff6c1fe6-820f-4e58-8395-df06aa91706c@oss.qualcomm.com # 1
Signed-off-by: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20250828-dma-debug-fix-noncoherent-dma-check-v1-1-76e9be0dd7fc@oss.qualcomm.com
This commit is contained in:
parent
89a2d212bd
commit
7e2368a217
|
@ -39,6 +39,7 @@ enum {
|
|||
dma_debug_sg,
|
||||
dma_debug_coherent,
|
||||
dma_debug_resource,
|
||||
dma_debug_noncoherent,
|
||||
};
|
||||
|
||||
enum map_err_types {
|
||||
|
@ -141,6 +142,7 @@ static const char *type2name[] = {
|
|||
[dma_debug_sg] = "scatter-gather",
|
||||
[dma_debug_coherent] = "coherent",
|
||||
[dma_debug_resource] = "resource",
|
||||
[dma_debug_noncoherent] = "noncoherent",
|
||||
};
|
||||
|
||||
static const char *dir2name[] = {
|
||||
|
@ -993,7 +995,8 @@ static void check_unmap(struct dma_debug_entry *ref)
|
|||
"[mapped as %s] [unmapped as %s]\n",
|
||||
ref->dev_addr, ref->size,
|
||||
type2name[entry->type], type2name[ref->type]);
|
||||
} else if (entry->type == dma_debug_coherent &&
|
||||
} else if ((entry->type == dma_debug_coherent ||
|
||||
entry->type == dma_debug_noncoherent) &&
|
||||
ref->paddr != entry->paddr) {
|
||||
err_printk(ref->dev, entry, "device driver frees "
|
||||
"DMA memory with different CPU address "
|
||||
|
@ -1581,6 +1584,49 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|||
}
|
||||
}
|
||||
|
||||
void debug_dma_alloc_pages(struct device *dev, struct page *page,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_debug_entry *entry;
|
||||
|
||||
if (unlikely(dma_debug_disabled()))
|
||||
return;
|
||||
|
||||
entry = dma_entry_alloc();
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
entry->type = dma_debug_noncoherent;
|
||||
entry->dev = dev;
|
||||
entry->paddr = page_to_phys(page);
|
||||
entry->size = size;
|
||||
entry->dev_addr = dma_addr;
|
||||
entry->direction = direction;
|
||||
|
||||
add_dma_entry(entry, attrs);
|
||||
}
|
||||
|
||||
void debug_dma_free_pages(struct device *dev, struct page *page,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
struct dma_debug_entry ref = {
|
||||
.type = dma_debug_noncoherent,
|
||||
.dev = dev,
|
||||
.paddr = page_to_phys(page),
|
||||
.dev_addr = dma_addr,
|
||||
.size = size,
|
||||
.direction = direction,
|
||||
};
|
||||
|
||||
if (unlikely(dma_debug_disabled()))
|
||||
return;
|
||||
|
||||
check_unmap(&ref);
|
||||
}
|
||||
|
||||
static int __init dma_debug_driver_setup(char *str)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -54,6 +54,13 @@ extern void debug_dma_sync_sg_for_cpu(struct device *dev,
|
|||
extern void debug_dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg,
|
||||
int nelems, int direction);
|
||||
extern void debug_dma_alloc_pages(struct device *dev, struct page *page,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr,
|
||||
unsigned long attrs);
|
||||
extern void debug_dma_free_pages(struct device *dev, struct page *page,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr);
|
||||
#else /* CONFIG_DMA_API_DEBUG */
|
||||
static inline void debug_dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
|
@ -126,5 +133,18 @@ static inline void debug_dma_sync_sg_for_device(struct device *dev,
|
|||
int nelems, int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_alloc_pages(struct device *dev, struct page *page,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_dma_free_pages(struct device *dev, struct page *page,
|
||||
size_t size, int direction,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DMA_API_DEBUG */
|
||||
#endif /* _KERNEL_DMA_DEBUG_H */
|
||||
|
|
|
@ -712,7 +712,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
|
|||
if (page) {
|
||||
trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
|
||||
size, dir, gfp, 0);
|
||||
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
|
||||
debug_dma_alloc_pages(dev, page, size, dir, *dma_handle, 0);
|
||||
} else {
|
||||
trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
|
||||
}
|
||||
|
@ -738,7 +738,7 @@ void dma_free_pages(struct device *dev, size_t size, struct page *page,
|
|||
dma_addr_t dma_handle, enum dma_data_direction dir)
|
||||
{
|
||||
trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0);
|
||||
debug_dma_unmap_page(dev, dma_handle, size, dir);
|
||||
debug_dma_free_pages(dev, page, size, dir, dma_handle);
|
||||
__dma_free_pages(dev, size, page, dma_handle, dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_free_pages);
|
||||
|
|
Loading…
Reference in New Issue