powerpc: handover page flags with a pgprot_t parameter
authorChristophe Leroy <christophe.leroy@c-s.fr>
Tue, 9 Oct 2018 13:51:45 +0000 (13:51 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 14 Oct 2018 07:04:09 +0000 (18:04 +1100)
In order to avoid multiple conversions, handover directly a
pgprot_t to map_kernel_page() as already done for radix.

Do the same for __ioremap_caller() and __ioremap_at().

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
20 files changed:
arch/powerpc/include/asm/book3s/32/pgtable.h
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/fixmap.h
arch/powerpc/include/asm/io.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/nohash/32/pgtable.h
arch/powerpc/include/asm/nohash/64/pgtable.h
arch/powerpc/kernel/io-workarounds.c
arch/powerpc/kernel/isa-bridge.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/lib/code-patching.c
arch/powerpc/mm/8xx_mmu.c
arch/powerpc/mm/dma-noncoherent.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/pgtable-book3e.c
arch/powerpc/mm/pgtable-hash64.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/mm/pgtable_64.c
drivers/pcmcia/electra_cf.c

index 751cf931bb3f4da228c05c062846d33b60421304..7a9f0ed599ff924d015c0905026fe1179fd2d7c5 100644 (file)
@@ -292,7 +292,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
 #define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val(pte) >> 3 })
 #define __swp_entry_to_pte(x)          ((pte_t) { (x).val << 3 })
 
-int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
 
 /* Generic accessors to PTE bits */
 static inline int pte_write(pte_t pte)         { return !!(pte_val(pte) & _PAGE_RW);}
index fcf8b10a209feff70217c0bfc88b41ad0192fb56..247aff9cc6badebac372eec987f2239663b054fc 100644 (file)
@@ -201,8 +201,7 @@ static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 
-extern int hash__map_kernel_page(unsigned long ea, unsigned long pa,
-                            unsigned long flags);
+int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
 extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
                                              unsigned long page_size,
                                              unsigned long phys);
index c68cbbff3429378f2c0f624d7c8883bbd94ef083..eae6e10305238bf4afa4240e69ea4a2e1e4cf13b 100644 (file)
@@ -1030,17 +1030,16 @@ extern struct page *pgd_page(pgd_t pgd);
 #define pgd_ERROR(e) \
        pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
-static inline int map_kernel_page(unsigned long ea, unsigned long pa,
-                                 unsigned long flags)
+static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
 {
        if (radix_enabled()) {
 #if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
                unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
                WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
 #endif
-               return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
+               return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE);
        }
-       return hash__map_kernel_page(ea, pa, flags);
+       return hash__map_kernel_page(ea, pa, prot);
 }
 
 static inline int __meminit vmemmap_create_mapping(unsigned long start,
index 41cc15c14eee26b1e29781b9eba94cdc000a179d..b9fbed84ddca40da0844933d6baaf04b2b5c52c7 100644 (file)
@@ -72,7 +72,7 @@ enum fixed_addresses {
 static inline void __set_fixmap(enum fixed_addresses idx,
                                phys_addr_t phys, pgprot_t flags)
 {
-       map_kernel_page(fix_to_virt(idx), phys, pgprot_val(flags));
+       map_kernel_page(fix_to_virt(idx), phys, flags);
 }
 
 #endif /* !__ASSEMBLY__ */
index cdccab3938dbef695bb993959ecf843418ca0761..0a034519957d8bb02bf627321e5c0a9209cb5931 100644 (file)
@@ -786,12 +786,12 @@ extern void iounmap(volatile void __iomem *addr);
 extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
                               unsigned long flags);
 extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
-                                     unsigned long flags, void *caller);
+                                     pgprot_t prot, void *caller);
 
 extern void __iounmap(volatile void __iomem *addr);
 
 extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
-                                  unsigned long size, unsigned long flags);
+                                  unsigned long size, pgprot_t prot);
 extern void __iounmap_at(void *ea, unsigned long size);
 
 /*
index b4831f1338db56484582bcb7770dcbc6e7da6467..8311869005fa8d4769020c7aebdddf1335da415d 100644 (file)
@@ -35,7 +35,7 @@ struct machdep_calls {
        char            *name;
 #ifdef CONFIG_PPC64
        void __iomem *  (*ioremap)(phys_addr_t addr, unsigned long size,
-                                  unsigned long flags, void *caller);
+                                  pgprot_t prot, void *caller);
        void            (*iounmap)(volatile void __iomem *token);
 
 #ifdef CONFIG_PM
index a507a65b0866b9907cbf1cce71711c2ef1fa0765..a7f44498ab6f67e404331dc3abce942b791a739c 100644 (file)
@@ -323,7 +323,7 @@ static inline int pte_young(pte_t pte)
 #define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val(pte) >> 3 })
 #define __swp_entry_to_pte(x)          ((pte_t) { (x).val << 3 })
 
-int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
 
 #endif /* !__ASSEMBLY__ */
 
index 7cd6809f4d332321e5f7eb806693e94fd5915b48..513b6e9e62c65fd2a0e9bc96b98a0146e6cdc7a4 100644 (file)
@@ -327,8 +327,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
 #define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val((pte)) })
 #define __swp_entry_to_pte(x)          __pte((x).val)
 
-extern int map_kernel_page(unsigned long ea, unsigned long pa,
-                          unsigned long flags);
+int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
 extern int __meminit vmemmap_create_mapping(unsigned long start,
                                            unsigned long page_size,
                                            unsigned long phys);
index aa9f1b8261dbf812ad110f5ac736de5c82e36589..7e89d02a84e1352997c8a201d53e3f70262c1521 100644 (file)
@@ -153,10 +153,10 @@ static const struct ppc_pci_io iowa_pci_io = {
 
 #ifdef CONFIG_PPC_INDIRECT_MMIO
 static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
-                                 unsigned long flags, void *caller)
+                                 pgprot_t prot, void *caller)
 {
        struct iowa_bus *bus;
-       void __iomem *res = __ioremap_caller(addr, size, flags, caller);
+       void __iomem *res = __ioremap_caller(addr, size, prot, caller);
        int busno;
 
        bus = iowa_pci_find(0, (unsigned long)addr);
index 072e384f8c866803031fc49748e7d7d3c9a68759..fda3ae48480c8eebe121d602604e5dad5fa0bc92 100644 (file)
@@ -110,14 +110,14 @@ static void pci_process_ISA_OF_ranges(struct device_node *isa_node,
                size = 0x10000;
 
        __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
-                    size, pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+                    size, pgprot_noncached(PAGE_KERNEL));
        return;
 
 inval_range:
        printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
               "mapping 64k\n");
        __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
-                    0x10000, pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+                    0x10000, pgprot_noncached(PAGE_KERNEL));
 }
 
 
@@ -253,7 +253,7 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
         */
        isa_io_base = ISA_IO_BASE;
        __ioremap_at(pbase, (void *)ISA_IO_BASE,
-                    size, pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+                    size, pgprot_noncached(PAGE_KERNEL));
 
        pr_debug("ISA: Non-PCI bridge is %pOF\n", np);
 }
index 64bb4dd2b8f1e441934d96a37a9ae1456e0dc0ee..9d8c10d554075166b57a6906629d882a18dc43a5 100644 (file)
@@ -159,7 +159,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
 
        /* Establish the mapping */
        if (__ioremap_at(phys_page, area->addr, size_page,
-                        pgprot_val(pgprot_noncached(PAGE_KERNEL))) == NULL)
+                        pgprot_noncached(PAGE_KERNEL)) == NULL)
                return -ENOMEM;
 
        /* Fixup hose IO resource */
index 5ffee298745fe4e98a66d2410d86e6579d2b2560..89502cbccb1b9d846d9be6e414ef4c33cdaf6089 100644 (file)
@@ -98,8 +98,7 @@ static int map_patch_area(void *addr, unsigned long text_poke_addr)
        else
                pfn = __pa_symbol(addr) >> PAGE_SHIFT;
 
-       err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT),
-                               pgprot_val(PAGE_KERNEL));
+       err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
 
        pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err);
        if (err)
index cf77d755246db6e4a57a0d41a0dd0ebc7adfb4fa..9137361d687d05183bd014fbb46f2e7ce08c0e3b 100644 (file)
@@ -91,11 +91,10 @@ static void __init mmu_mapin_immr(void)
 {
        unsigned long p = PHYS_IMMR_BASE;
        unsigned long v = VIRT_IMMR_BASE;
-       unsigned long f = pgprot_val(PAGE_KERNEL_NCG);
        int offset;
 
        for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
-               map_kernel_page(v + offset, p + offset, f);
+               map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
 }
 
 /* Address of instructions to patch */
index 38252847543309975162c84468b94740e4d40279..b6e7b5952ab5490abadb28875258c623cc744bcc 100644 (file)
@@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
                do {
                        SetPageReserved(page);
                        map_kernel_page(vaddr, page_to_phys(page),
-                                pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+                                       pgprot_noncached(PAGE_KERNEL));
                        page++;
                        vaddr += PAGE_SIZE;
                } while (size -= PAGE_SIZE);
index 04ccb274a6205bba58357d5897105ada90f81c0f..cb421aeb7674fee72a9af60d7e1bb7eff492e33f 100644 (file)
@@ -309,11 +309,11 @@ void __init paging_init(void)
        unsigned long end = __fix_to_virt(FIX_HOLE);
 
        for (; v < end; v += PAGE_SIZE)
-               map_kernel_page(v, 0, 0); /* XXX gross */
+               map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
 #endif
 
 #ifdef CONFIG_HIGHMEM
-       map_kernel_page(PKMAP_BASE, 0, 0);      /* XXX gross */
+       map_kernel_page(PKMAP_BASE, 0, __pgprot(0));    /* XXX gross */
        pkmap_page_table = virt_to_kpte(PKMAP_BASE);
 
        kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
index a2298930f990d7336c1c3ad26159547d7345899a..e0ccf36714b2330e2d93ff38d9ab443222a30b7e 100644 (file)
@@ -42,7 +42,7 @@ int __meminit vmemmap_create_mapping(unsigned long start,
         * thus must have the low bits clear
         */
        for (i = 0; i < page_size; i += PAGE_SIZE)
-               BUG_ON(map_kernel_page(start + i, phys, flags));
+               BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags)));
 
        return 0;
 }
@@ -70,7 +70,7 @@ static __ref void *early_alloc_pgtable(unsigned long size)
  * map_kernel_page adds an entry to the ioremap page table
  * and adds an entry to the HPT, possibly bolting it
  */
-int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
+int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
 {
        pgd_t *pgdp;
        pud_t *pudp;
@@ -89,8 +89,6 @@ int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
                ptep = pte_alloc_kernel(pmdp, ea);
                if (!ptep)
                        return -ENOMEM;
-               set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
-                                                         __pgprot(flags)));
        } else {
                pgdp = pgd_offset_k(ea);
 #ifndef __PAGETABLE_PUD_FOLDED
@@ -113,9 +111,8 @@ int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
                        pmd_populate_kernel(&init_mm, pmdp, ptep);
                }
                ptep = pte_offset_kernel(pmdp, ea);
-               set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
-                                                         __pgprot(flags)));
        }
+       set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
 
        smp_wmb();
        return 0;
index 692bfc9e372cb0dcd82f75ff144626707f65a837..c08d49046a968971718af300959a90b790720086 100644 (file)
@@ -142,7 +142,7 @@ void hash__vmemmap_remove_mapping(unsigned long start,
  * map_kernel_page adds an entry to the ioremap page table
  * and adds an entry to the HPT, possibly bolting it
  */
-int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
+int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
 {
        pgd_t *pgdp;
        pud_t *pudp;
@@ -161,8 +161,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag
                ptep = pte_alloc_kernel(pmdp, ea);
                if (!ptep)
                        return -ENOMEM;
-               set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
-                                                         __pgprot(flags)));
+               set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
        } else {
                /*
                 * If the mm subsystem is not fully up, we cannot create a
@@ -170,7 +169,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag
                 * entry in the hardware page table.
                 *
                 */
-               if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
+               if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
                                      mmu_io_psize, mmu_kernel_ssize)) {
                        printk(KERN_ERR "Failed to do bolted mapping IO "
                               "memory at %016lx !\n", pa);
index 6a81a2446c47176ec43888d75754973d145d14ba..0bbc7b7d8a05d025e458887e78ff10e88b25344c 100644 (file)
@@ -76,36 +76,36 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 void __iomem *
 ioremap(phys_addr_t addr, unsigned long size)
 {
-       unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL));
+       pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
 
-       return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+       return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap);
 
 void __iomem *
 ioremap_wc(phys_addr_t addr, unsigned long size)
 {
-       unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL));
+       pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
 
-       return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+       return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_wc);
 
 void __iomem *
 ioremap_wt(phys_addr_t addr, unsigned long size)
 {
-       unsigned long flags = pgprot_val(pgprot_cached_wthru(PAGE_KERNEL));
+       pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL);
 
-       return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+       return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_wt);
 
 void __iomem *
 ioremap_coherent(phys_addr_t addr, unsigned long size)
 {
-       unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL));
+       pgprot_t prot = pgprot_cached(PAGE_KERNEL);
 
-       return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+       return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_coherent);
 
@@ -120,19 +120,18 @@ ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
        flags &= ~(_PAGE_USER | _PAGE_EXEC);
        flags |= _PAGE_PRIVILEGED;
 
-       return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+       return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_prot);
 
 void __iomem *
 __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
 {
-       return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+       return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
 }
 
 void __iomem *
-__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
-                void *caller)
+__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
 {
        unsigned long v, i;
        phys_addr_t p;
@@ -195,7 +194,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
 
        err = 0;
        for (i = 0; i < size && err == 0; i += PAGE_SIZE)
-               err = map_kernel_page(v+i, p+i, flags);
+               err = map_kernel_page(v + i, p + i, prot);
        if (err) {
                if (slab_is_available())
                        vunmap((void *)v);
@@ -221,7 +220,7 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
-int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
 {
        pmd_t *pd;
        pte_t *pg;
@@ -237,9 +236,8 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
                 * hash table
                 */
                BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
-                      flags);
-               set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
-                                                    __pgprot(flags)));
+                      pgprot_val(prot));
+               set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
        }
        smp_wmb();
        return err;
@@ -250,7 +248,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
  */
 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
 {
-       unsigned long v, s, f;
+       unsigned long v, s;
        phys_addr_t p;
        int ktext;
 
@@ -260,8 +258,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
        for (; s < top; s += PAGE_SIZE) {
                ktext = ((char *)v >= _stext && (char *)v < etext) ||
                        ((char *)v >= _sinittext && (char *)v < _einittext);
-               f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
-               map_kernel_page(v, p, f);
+               map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
 #ifdef CONFIG_PPC_STD_MMU_32
                if (ktext)
                        hash_preload(&init_mm, v, 0, 0x300);
index 1f1bb40555a8629016cc7fc151ca2917e9c4de6e..b0f4a4b4f62b94bb1de3000aadfaa39b1d21e604 100644 (file)
@@ -113,13 +113,12 @@ unsigned long ioremap_bot = IOREMAP_BASE;
  * __ioremap_at - Low level function to establish the page tables
  *                for an IO mapping
  */
-void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
-                           unsigned long flags)
+void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
 {
        unsigned long i;
 
        /* We don't support the 4K PFN hack with ioremap */
-       if (flags & H_PAGE_4K_PFN)
+       if (pgprot_val(prot) & H_PAGE_4K_PFN)
                return NULL;
 
        WARN_ON(pa & ~PAGE_MASK);
@@ -127,7 +126,7 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
        WARN_ON(size & ~PAGE_MASK);
 
        for (i = 0; i < size; i += PAGE_SIZE)
-               if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
+               if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
                        return NULL;
 
        return (void __iomem *)ea;
@@ -148,7 +147,7 @@ void __iounmap_at(void *ea, unsigned long size)
 }
 
 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
-                               unsigned long flags, void *caller)
+                               pgprot_t prot, void *caller)
 {
        phys_addr_t paligned;
        void __iomem *ret;
@@ -178,11 +177,11 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
                        return NULL;
 
                area->phys_addr = paligned;
-               ret = __ioremap_at(paligned, area->addr, size, flags);
+               ret = __ioremap_at(paligned, area->addr, size, prot);
                if (!ret)
                        vunmap(area->addr);
        } else {
-               ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
+               ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
                if (ret)
                        ioremap_bot += size;
        }
@@ -195,37 +194,37 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
                         unsigned long flags)
 {
-       return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+       return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
 }
 
 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
 {
-       unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL));
+       pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
        void *caller = __builtin_return_address(0);
 
        if (ppc_md.ioremap)
-               return ppc_md.ioremap(addr, size, flags, caller);
-       return __ioremap_caller(addr, size, flags, caller);
+               return ppc_md.ioremap(addr, size, prot, caller);
+       return __ioremap_caller(addr, size, prot, caller);
 }
 
 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
 {
-       unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL));
+       pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
        void *caller = __builtin_return_address(0);
 
        if (ppc_md.ioremap)
-               return ppc_md.ioremap(addr, size, flags, caller);
-       return __ioremap_caller(addr, size, flags, caller);
+               return ppc_md.ioremap(addr, size, prot, caller);
+       return __ioremap_caller(addr, size, prot, caller);
 }
 
 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
 {
-       unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL));
+       pgprot_t prot = pgprot_cached(PAGE_KERNEL);
        void *caller = __builtin_return_address(0);
 
        if (ppc_md.ioremap)
-               return ppc_md.ioremap(addr, size, flags, caller);
-       return __ioremap_caller(addr, size, flags, caller);
+               return ppc_md.ioremap(addr, size, prot, caller);
+       return __ioremap_caller(addr, size, prot, caller);
 }
 
 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
@@ -246,8 +245,8 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
        flags |= _PAGE_PRIVILEGED;
 
        if (ppc_md.ioremap)
-               return ppc_md.ioremap(addr, size, flags, caller);
-       return __ioremap_caller(addr, size, flags, caller);
+               return ppc_md.ioremap(addr, size, __pgprot(flags), caller);
+       return __ioremap_caller(addr, size, __pgprot(flags), caller);
 }
 
 
index 34d6c1a0971e4391182152b4b57249f72930b1d2..b31abe35ed2c00df91c83cbc8642b8ab3a5d68e3 100644 (file)
@@ -230,7 +230,7 @@ static int electra_cf_probe(struct platform_device *ofdev)
 
        if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
            (__ioremap_at(io.start, cf->io_virt, cf->io_size,
-                         pgprot_val(pgprot_noncached(PAGE_KERNEL))) == NULL)) {
+                         pgprot_noncached(PAGE_KERNEL)) == NULL)) {
                dev_err(device, "can't ioremap ranges\n");
                status = -ENOMEM;
                goto fail1;