From ece0e2b6406a995c371e0311190631ea34ad851a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 26 Oct 2010 14:21:52 -0700 Subject: [PATCH] mm: remove pte_*map_nested() Since we no longer need to provide KM_type, the whole pte_*map_nested() API is now redundant, remove it. Signed-off-by: Peter Zijlstra Acked-by: Chris Metcalf Cc: David Howells Cc: Hugh Dickins Cc: Rik van Riel Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Steven Rostedt Cc: Russell King Cc: Ralf Baechle Cc: David Miller Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/pgtable.h | 2 -- arch/arm/include/asm/pgtable.h | 14 ++++++-------- arch/arm/mm/fault-armv.c | 4 ++-- arch/arm/mm/pgd.c | 4 ++-- arch/avr32/include/asm/pgtable.h | 2 -- arch/cris/include/asm/pgtable.h | 2 -- arch/frv/include/asm/pgtable.h | 9 ++------- arch/ia64/include/asm/pgtable.h | 2 -- arch/m32r/include/asm/pgtable.h | 2 -- arch/m68k/include/asm/motorola_pgtable.h | 2 -- arch/m68k/include/asm/sun3_pgtable.h | 2 -- arch/microblaze/include/asm/pgtable.h | 7 ++----- arch/mips/include/asm/pgtable-32.h | 3 --- arch/mips/include/asm/pgtable-64.h | 3 --- arch/mn10300/include/asm/pgtable.h | 2 -- arch/parisc/include/asm/pgtable.h | 2 -- arch/powerpc/include/asm/pgtable-ppc32.h | 8 ++------ arch/powerpc/include/asm/pgtable-ppc64.h | 2 -- arch/s390/include/asm/pgtable.h | 2 -- arch/score/include/asm/pgtable.h | 3 --- arch/sh/include/asm/pgtable_32.h | 3 --- arch/sh/include/asm/pgtable_64.h | 2 -- arch/sparc/include/asm/pgtable_32.h | 3 --- arch/sparc/include/asm/pgtable_64.h | 2 -- arch/tile/include/asm/pgtable.h | 5 ----- arch/um/include/asm/pgtable.h | 2 -- arch/x86/include/asm/pgtable_32.h | 14 ++------------ arch/x86/include/asm/pgtable_64.h | 2 -- arch/xtensa/include/asm/pgtable.h | 3 --- mm/memory.c | 4 ++-- mm/mremap.c | 4 ++-- 31 files changed, 22 insertions(+), 99 deletions(-) diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 71a243294142..de98a732683d 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -318,9 +318,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address) } #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) extern pgd_t swapper_pg_dir[1024]; diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index a9672e8406a3..b155414192da 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -263,17 +263,15 @@ extern struct page *empty_zero_page; #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) -#define pte_offset_map(dir,addr) (__pte_map(dir, KM_PTE0) + __pte_index(addr)) -#define pte_offset_map_nested(dir,addr) (__pte_map(dir, KM_PTE1) + __pte_index(addr)) -#define pte_unmap(pte) __pte_unmap(pte, KM_PTE0) -#define pte_unmap_nested(pte) __pte_unmap(pte, KM_PTE1) +#define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr)) +#define pte_unmap(pte) __pte_unmap(pte) #ifndef CONFIG_HIGHPTE -#define __pte_map(dir,km) pmd_page_vaddr(*(dir)) -#define __pte_unmap(pte,km) do { } while (0) +#define __pte_map(dir) pmd_page_vaddr(*(dir)) +#define __pte_unmap(pte) do { } while (0) #else -#define __pte_map(dir,km) ((pte_t *)kmap_atomic(pmd_page(*(dir)), km) + PTRS_PER_PTE) -#define __pte_unmap(pte,km) kunmap_atomic((pte - PTRS_PER_PTE), km) +#define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE) +#define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE)) #endif #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 8440d952ba6d..c493d7244d3d 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -89,13 +89,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, * open-code the spin-locking. */ ptl = pte_lockptr(vma->vm_mm, pmd); - pte = pte_offset_map_nested(pmd, address); + pte = pte_offset_map(pmd, address); spin_lock(ptl); ret = do_adjust_pte(vma, address, pfn, pte); spin_unlock(ptl); - pte_unmap_nested(pte); + pte_unmap(pte); return ret; } diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index be5f58e153bf..69bbfc6645a6 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -57,9 +57,9 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) goto no_pte; init_pmd = pmd_offset(init_pgd, 0); - init_pte = pte_offset_map_nested(init_pmd, 0); + init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); - pte_unmap_nested(init_pte); + pte_unmap(init_pte); pte_unmap(new_pte); } diff --git a/arch/avr32/include/asm/pgtable.h b/arch/avr32/include/asm/pgtable.h index a9ae30c41e74..6fbfea61f7bb 100644 --- a/arch/avr32/include/asm/pgtable.h +++ b/arch/avr32/include/asm/pgtable.h @@ -319,9 +319,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) -#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) struct vm_area_struct; extern void update_mmu_cache(struct vm_area_struct * vma, diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index f63d6fccbc6c..9eaae217b21b 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h @@ -248,10 +248,8 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #define pte_pfn(x) ((unsigned long)(__va((x).pte)) >> PAGE_SHIFT) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index c18b0d32e636..6bc241e4b4f8 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -451,17 +451,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) -#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address)) +#define pte_unmap(pte) kunmap_atomic(pte) #else #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #endif /* diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index c3286f42e501..1a97af31ef17 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -406,9 +406,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address) #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) -#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) /* atomic versions of the some PTE manipulations: */ diff --git a/arch/m32r/include/asm/pgtable.h b/arch/m32r/include/asm/pgtable.h index e6359c566b50..8a28cfea2729 100644 --- a/arch/m32r/include/asm/pgtable.h +++ b/arch/m32r/include/asm/pgtable.h @@ -332,9 +332,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 2) & 0x1f) diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 8e9a8a754dde..45bd3f589bf0 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -221,9 +221,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address) } #define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) -#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address) #define pte_unmap(pte) ((void)0) -#define pte_unmap_nested(pte) ((void)0) /* * Allocate and free page tables. The xxx_kernel() versions are diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h index f847ec732d62..cf5fad9b5250 100644 --- a/arch/m68k/include/asm/sun3_pgtable.h +++ b/arch/m68k/include/asm/sun3_pgtable.h @@ -219,9 +219,7 @@ static inline pte_t pgoff_to_pte(unsigned off) #define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address)) /* FIXME: should we bother with kmap() here? */ #define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + pte_index(address)) -#define pte_offset_map_nested(pmd, address) pte_offset_map(pmd, address) #define pte_unmap(pte) kunmap(pte) -#define pte_unmap_nested(pte) kunmap(pte) /* Macros to (de)construct the fake PTEs representing swap pages. */ #define __swp_type(x) ((x).val & 0x7F) diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index d4f421672d3b..cae268c22ba2 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -504,12 +504,9 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) #define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) -#define pte_offset_map_nested(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) + ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) -#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) +#define pte_unmap(pte) kunmap_atomic(pte) /* Encode and decode a nonlinear file mapping entry */ #define PTE_FILE_MAX_BITS 29 diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index ae90412556d0..8a153d2fa62a 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -154,10 +154,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) #define pte_unmap(pte) ((void)(pte)) -#define pte_unmap_nested(pte) ((void)(pte)) #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 1be4b0fa30da..f00896087dda 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -257,10 +257,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) #define pte_unmap(pte) ((void)(pte)) -#define pte_unmap_nested(pte) ((void)(pte)) /* * Initialize a new pgd / pmd table with invalid pointers. diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h index 16d88577f3e0..b049a8bd1577 100644 --- a/arch/mn10300/include/asm/pgtable.h +++ b/arch/mn10300/include/asm/pgtable.h @@ -457,9 +457,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable) #define pte_offset_map(dir, address) \ ((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do {} while (0) -#define pte_unmap_nested(pte) do {} while (0) /* * The MN10300 has external MMU info in the form of a TLB: this is adapted from diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 01c15035e783..865f37a8a881 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -397,9 +397,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_offset_kernel(pmd, address) \ ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) -#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index a7db96f2b5c3..47edde8c3556 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -308,12 +308,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) #define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) -#define pte_offset_map_nested(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) - -#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) + ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) +#define pte_unmap(pte) kunmap_atomic(pte) /* * Encode and decode a swap entry. diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 49865045d56f..2b09cd522d33 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -193,9 +193,7 @@ (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) #define pte_unmap(pte) do { } while(0) -#define pte_unmap_nested(pte) do { } while(0) /* to find an entry in a kernel page-table-directory */ /* This now only contains the vmalloc pages */ diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 986dc9476c21..02ace3491c51 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1094,9 +1094,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) -#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) /* * 31 bit swap entry format: diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index ccf38f06c57d..2fd469807683 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -88,10 +88,7 @@ static inline void pmd_clear(pmd_t *pmdp) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) #define pte_unmap(pte) ((void)(pte)) -#define pte_unmap_nested(pte) ((void)(pte)) /* * Bits 9(_PAGE_PRESENT) and 10(_PAGE_FILE)are taken, diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index e172d696e52b..69fdfbf14ea5 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h @@ -429,10 +429,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) -#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address) - #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #ifdef CONFIG_X2TLB #define pte_ERROR(e) \ diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h index 0ee46776dad6..10a48111226d 100644 --- a/arch/sh/include/asm/pgtable_64.h +++ b/arch/sh/include/asm/pgtable_64.h @@ -84,9 +84,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #ifndef __ASSEMBLY__ #define IOBASE_VADDR 0xff000000 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 0ece77f47753..303bd4dc8292 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -304,10 +304,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long) * and sun4c is guaranteed to have no highmem anyway. */ #define pte_offset_map(d, a) pte_offset_kernel(d,a) -#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a) - #define pte_unmap(pte) do{}while(0) -#define pte_unmap_nested(pte) do{}while(0) /* Certain architectures need to do special things when pte's * within a page table are directly modified. Thus, the following diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index f5b5fa76c02d..f8dddb7045bb 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -652,9 +652,7 @@ static inline int pte_special(pte_t pte) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) #define pte_offset_kernel pte_index #define pte_offset_map pte_index -#define pte_offset_map_nested pte_index #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) /* Actual page table PTE updates. */ extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig); diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index b3367379d537..dc4ccdd855bc 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h @@ -347,15 +347,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); #define pte_offset_map(dir, address) \ _pte_offset_map(dir, address, KM_PTE0) -#define pte_offset_map_nested(dir, address) \ - _pte_offset_map(dir, address, KM_PTE1) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #else #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #endif /* Clear a non-executable kernel PTE and flush it from the TLB. */ diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index a9f7251b4a8d..41474fb5eee7 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -338,9 +338,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) struct mm_struct; extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 8abde9ec90bf..0c92113c4cb6 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -49,24 +49,14 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); #endif #if defined(CONFIG_HIGHPTE) -#define __KM_PTE \ - (in_nmi() ? KM_NMI_PTE : \ - in_irq() ? KM_IRQ_PTE : \ - KM_PTE0) #define pte_offset_map(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \ + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ pte_index((address))) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \ - pte_index((address))) -#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) -#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) +#define pte_unmap(pte) kunmap_atomic((pte)) #else #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) -#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) #endif /* Clear a kernel PTE and flush it from the TLB */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index f96ac9bedf75..f86da20347f2 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -127,9 +127,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } /* x86-64 always has all page tables mapped. */ #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) -#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) #define pte_unmap(pte) ((void)(pte))/* NOP */ -#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */ #define update_mmu_cache(vma, address, ptep) do { } while (0) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 76bf35554117..b03c043ce75b 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -324,10 +324,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) #define pte_offset_kernel(dir,addr) \ ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) - #define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) /* diff --git a/mm/memory.c b/mm/memory.c index af82741caaa4..92cc54e94137 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -736,7 +736,7 @@ again: dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) return -ENOMEM; - src_pte = pte_offset_map_nested(src_pmd, addr); + src_pte = pte_offset_map(src_pmd, addr); src_ptl = pte_lockptr(src_mm, src_pmd); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); orig_src_pte = src_pte; @@ -767,7 +767,7 @@ again: arch_leave_lazy_mmu_mode(); spin_unlock(src_ptl); - pte_unmap_nested(orig_src_pte); + pte_unmap(orig_src_pte); add_mm_rss_vec(dst_mm, rss); pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); diff --git a/mm/mremap.c b/mm/mremap.c index cde56ee51ef7..563fbdd6293a 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -101,7 +101,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, * pte locks because exclusive mmap_sem prevents deadlock. */ old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); - new_pte = pte_offset_map_nested(new_pmd, new_addr); + new_pte = pte_offset_map(new_pmd, new_addr); new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); @@ -119,7 +119,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, arch_leave_lazy_mmu_mode(); if (new_ptl != old_ptl) spin_unlock(new_ptl); - pte_unmap_nested(new_pte - 1); + pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); if (mapping) spin_unlock(&mapping->i_mmap_lock); -- 2.30.2