spin_unlock(&mm->page_table_lock);
}
-static void __init xen_pagetable_setup_start(pgd_t *base)
-{
-}
+static void xen_post_allocator_init(void);
- static void __init xen_pagetable_init(void)
- {
- paging_init();
- xen_setup_shared_info();
- xen_post_allocator_init();
- }
-
static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
{
/* reserve the range used */
}
}
-static void xen_post_allocator_init(void);
-
+ #ifdef CONFIG_X86_64
+ static void __init xen_cleanhighmap(unsigned long vaddr,
+ unsigned long vaddr_end)
+ {
+ unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
+ pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
+
+ /* NOTE: The loop is more greedy than the cleanup_highmap variant.
+ * We include the PMD passed in on _both_ boundaries. */
+ for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
+ pmd++, vaddr += PMD_SIZE) {
+ if (pmd_none(*pmd))
+ continue;
+ if (vaddr < (unsigned long) _text || vaddr > kernel_end)
+ set_pmd(pmd, __pmd(0));
+ }
+ /* In case we did something silly, we should crash in this function
+ * instead of somewhere later and be confusing. */
+ xen_mc_flush();
+ }
+ #endif
-static void __init xen_pagetable_setup_done(pgd_t *base)
++static void __init xen_pagetable_init(void)
+ {
+ #ifdef CONFIG_X86_64
+ unsigned long size;
+ unsigned long addr;
+ #endif
-
++ paging_init();
+ xen_setup_shared_info();
+ #ifdef CONFIG_X86_64
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ unsigned long new_mfn_list;
+
+ size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+
+ /* On 32-bit, we get zero so this never gets executed. */
+ new_mfn_list = xen_revector_p2m_tree();
+ if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
+ /* using __ka address and sticking INVALID_P2M_ENTRY! */
+ memset((void *)xen_start_info->mfn_list, 0xff, size);
+
+ /* We should be in __ka space. */
+ BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
+ addr = xen_start_info->mfn_list;
+ /* We roundup to the PMD, which means that if anybody at this stage is
+ * using the __ka address of xen_start_info or xen_start_info->shared_info
+ * they are in going to crash. Fortunatly we have already revectored
+ * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
+ size = roundup(size, PMD_SIZE);
+ xen_cleanhighmap(addr, addr + size);
+
+ size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+ memblock_free(__pa(xen_start_info->mfn_list), size);
+ /* And revector! Bye bye old array */
+ xen_start_info->mfn_list = new_mfn_list;
+ } else
+ goto skip;
+ }
+ /* At this stage, cleanup_highmap has already cleaned __ka space
+ * from _brk_limit way up to the max_pfn_mapped (which is the end of
+ * the ramdisk). We continue on, erasing PMD entries that point to page
+ * tables - do note that they are accessible at this stage via __va.
+ * For good measure we also round up to the PMD - which means that if
+ * anybody is using __ka address to the initial boot-stack - and try
+ * to use it - they are going to crash. The xen_start_info has been
+ * taken care of already in xen_setup_kernel_pagetable. */
+ addr = xen_start_info->pt_base;
+ size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
+
+ xen_cleanhighmap(addr, addr + size);
+ xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
+ #ifdef DEBUG
+ /* This is superflous and is not neccessary, but you know what
+ * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
+ * anything at this stage. */
+ xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
+ #endif
+ skip:
+ #endif
+ xen_post_allocator_init();
+ }
-
static void xen_write_cr2(unsigned long cr2)
{
this_cpu_read(xen_vcpu)->arch.cr2 = cr2;