内存分配之DMA操作
DMA操作DMA内存分配有两种方式,一致性DMA和流式DMA,在早期的内存区域中有一个DMA ZONE,占用内存地址0至16M的空间,供ISA设备的DMA内存,现在有的设备DMA操作时没有地址的限制,可以在全内存范围内分配内存来用于DMA操作,所以也就不需要DMA ZONE了。\linux-4.12.4\include\linuxstatic inline void *dma_alloc_cohe
DMA操作
DMA内存分配有两种方式,一致性DMA和流式DMA,在早期的内存区域中有一个DMA ZONE,占用内存地址0至16M的空间,供ISA设备的DMA内存,现在有的设备DMA操作时没有地址的限制,可以在全内存范围内分配内存来用于DMA操作,所以也就不需要DMA ZONE了。
\linux-4.12.4\include\linux
static inline void *dma_alloc_coherent(struct device *dev, size_t size,dma_addr_t *dma_handle, gfp_t flag)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
以上接口函数对应DMA操作函数结构体:
const struct dma_map_ops arm_dma_ops = {
//一致性DMA
.alloc = arm_dma_alloc,//在该结构中此处分配出来的内存是 uncache的。
.free = arm_dma_free,
.mmap = arm_dma_mmap, //与.alloc配合使用,将地址映射到用户空间
//流式DMA,分配内存区域在目前ARM平台上可以是任何区域
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_dma_map_page, //将kmalloc分配的地址映射出DMA地址,即物理地址
.unmap_page = arm_dma_unmap_page,
.map_sg = arm_dma_map_sg, //该函数其实是内部封装了map_page
.unmap_sg = arm_dma_unmap_sg,//对流式DMA映射的页PG_dcache_clean,表示没有进行cache同步,如果使用cache要重新进行同步
.sync_single_for_cpu = arm_dma_sync_single_for_cpu,
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
};
//以下是硬件实现了内存和cache的一致性,coherent,是由硬件支持的,具体解释如下文:
const struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc,
.free = arm_coherent_dma_free,
.mmap = arm_coherent_dma_mmap,
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
};
/
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
attrs, __builtin_return_address(0));
}
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, unsigned long attrs)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
return __dma_alloc(dev, size, handle, gfp, prot, false,
attrs, __builtin_return_address(0));
}
比较两个函数调用__dma_alloc的区别只有一个参数的区别:
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, pgprot_t prot, bool is_coherent,
unsigned long attrs, const void *caller)
struct arm_dma_alloc_args args = {
.dev = dev,
.size = PAGE_ALIGN(size),
.gfp = gfp,
.prot = prot,
.caller = caller,
.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
.coherent_flag = is_coherent ? COHERENT : NORMAL,
};
static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
struct page **ret_page)
{
return __alloc_from_contiguous(args->dev, args->size, args->prot,
ret_page, args->caller,
args->want_vaddr, args->coherent_flag,
args->gfp);
}
/
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
int coherent_flag, gfp_t gfp)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
struct page *page;
void *ptr = NULL;
page = dma_alloc_from_contiguous(dev, count, order, gfp);
if (!page)
return NULL;
__dma_clear_buffer(page, size, coherent_flag);
}
更多推荐
所有评论(0)