#if _PAGE_WRITETHRU != 0
#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
_PAGE_COHERENT | _PAGE_WRITETHRU))
+#else
+#define pgprot_cached_wthru(prot) pgprot_noncached(prot)
#endif
#define pgprot_cached_noncoherent(prot) \
size = 0x10000;
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
- size, pgprot_val(pgprot_noncached(__pgprot(0))));
+ size, pgprot_val(pgprot_noncached(PAGE_KERNEL)));
return;
inval_range:
printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
"mapping 64k\n");
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
- 0x10000, pgprot_val(pgprot_noncached(__pgprot(0))));
+ 0x10000, pgprot_val(pgprot_noncached(PAGE_KERNEL)));
}
*/
isa_io_base = ISA_IO_BASE;
__ioremap_at(pbase, (void *)ISA_IO_BASE,
- size, pgprot_val(pgprot_noncached(__pgprot(0))));
+ size, pgprot_val(pgprot_noncached(PAGE_KERNEL)));
pr_debug("ISA: Non-PCI bridge is %pOF\n", np);
}
/* Establish the mapping */
if (__ioremap_at(phys_page, area->addr, size_page,
- pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL)
+ pgprot_val(pgprot_noncached(PAGE_KERNEL))) == NULL)
return -ENOMEM;
/* Fixup hose IO resource */
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
- return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED,
- __builtin_return_address(0));
+ unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL));
+
+ return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap);
void __iomem *
ioremap_wc(phys_addr_t addr, unsigned long size)
{
- return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
- __builtin_return_address(0));
+ unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL));
+
+ return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_wc);
void __iomem *
ioremap_wt(phys_addr_t addr, unsigned long size)
{
- return __ioremap_caller(addr, size, _PAGE_WRITETHRU,
- __builtin_return_address(0));
+ unsigned long flags = pgprot_val(pgprot_cached_wthru(PAGE_KERNEL));
+
+ return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_wt);
void __iomem *
ioremap_coherent(phys_addr_t addr, unsigned long size)
{
- return __ioremap_caller(addr, size, _PAGE_COHERENT,
- __builtin_return_address(0));
+ unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL));
+
+ return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_coherent);
phys_addr_t p;
int err;
- /* Make sure we have the base flags */
- if ((flags & _PAGE_PRESENT) == 0)
- flags |= pgprot_val(PAGE_KERNEL);
-
- /* Non-cacheable page cannot be coherent */
- if (flags & _PAGE_NO_CACHE)
- flags &= ~_PAGE_COHERENT;
-
/*
* Choose an address to map it to.
* Once the vmalloc system is running, we use it.
{
unsigned long i;
- /* Make sure we have the base flags */
- if ((flags & _PAGE_PRESENT) == 0)
- flags |= pgprot_val(PAGE_KERNEL);
-
/* We don't support the 4K PFN hack with ioremap */
if (flags & H_PAGE_4K_PFN)
return NULL;
void __iomem * ioremap(phys_addr_t addr, unsigned long size)
{
- unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
+ unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL));
void *caller = __builtin_return_address(0);
if (ppc_md.ioremap)
void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
{
- unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
+ unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL));
void *caller = __builtin_return_address(0);
if (ppc_md.ioremap)
void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
{
- unsigned long flags = pgprot_val(pgprot_cached(__pgprot(0)));
+ unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL));
void *caller = __builtin_return_address(0);
if (ppc_md.ioremap)
int len;
struct resource rsrc;
- int ioflags;
ocm = ocm_get_node(count);
/* ioremap the non-cached region */
if (ocm->nc.memtotal) {
- ioflags = _PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_EXEC;
ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal,
- ioflags);
+ _PAGE_EXEC | PAGE_KERNEL_NCG);
if (!ocm->nc.virt) {
printk(KERN_ERR
/* ioremap the cached region */
if (ocm->c.memtotal) {
- ioflags = _PAGE_EXEC;
ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal,
- ioflags);
+ _PAGE_EXEC | PAGE_KERNEL);
if (!ocm->c.virt) {
printk(KERN_ERR
if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
(__ioremap_at(io.start, cf->io_virt, cf->io_size,
- pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL)) {
+ pgprot_val(pgprot_noncached(PAGE_KERNEL))) == NULL)) {
dev_err(device, "can't ioremap ranges\n");
status = -ENOMEM;
goto fail1;