void *vaddr, dma_addr_t dma_addr,
unsigned long attrs);
-static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
- gfp_t gfp)
-{
- unsigned long dma_mask = 0;
-
- dma_mask = dev->coherent_dma_mask;
- if (!dma_mask)
- dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
-
- return dma_mask;
-}
-
static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{
- unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
-
- if (dma_mask <= DMA_BIT_MASK(24))
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
gfp |= GFP_DMA;
#ifdef CONFIG_X86_64
- if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
gfp |= GFP_DMA32;
#endif
return gfp;
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs)
{
- unsigned long dma_mask;
struct page *page;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr;
- dma_mask = dma_alloc_coherent_mask(dev, flag);
-
again:
page = NULL;
/* CMA can be used only in the context which permits sleeping */
flag);
if (page) {
addr = phys_to_dma(dev, page_to_phys(page));
- if (addr + size > dma_mask) {
+ if (addr + size > dev->coherent_dma_mask) {
dma_release_from_contiguous(dev, page, count);
page = NULL;
}
return NULL;
addr = phys_to_dma(dev, page_to_phys(page));
- if (addr + size > dma_mask) {
+ if (addr + size > dev->coherent_dma_mask) {
__free_pages(page, get_order(size));
- if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
+ !(flag & GFP_DMA)) {
flag = (flag & ~GFP_DMA32) | GFP_DMA;
goto again;
}
static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
- unsigned long dma_mask;
unsigned int order;
struct page *page;
void *vaddr = NULL;
- dma_mask = dma_alloc_coherent_mask(dev, gfp);
order = get_order(size);
/*
* mask with it already cleared.
*/
addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
- if ((addr + size) > dma_mask) {
+ if ((addr + size) > dev->coherent_dma_mask) {
__free_pages(page, get_order(size));
} else {
vaddr = page_address(page);
* API.
*/
-#ifndef CONFIG_X86
-static unsigned long dma_alloc_coherent_mask(struct device *dev,
- gfp_t gfp)
-{
- unsigned long dma_mask = 0;
-
- dma_mask = dev->coherent_dma_mask;
- if (!dma_mask)
- dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
-
- return dma_mask;
-}
-#endif
-
#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
static char *xen_io_tlb_start, *xen_io_tlb_end;
return ret;
if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = dma_alloc_coherent_mask(hwdev, flags);
+ dma_mask = hwdev->coherent_dma_mask;
/* At this point dma_handle is the physical address, next we are
* going to set it to the machine address.