return 8;
}
+#define kvm_phys_to_vttbr(addr) (addr)
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
#endif
.endm
+/*
+ * Arrange a physical address in a TTBR register, taking care of 52-bit
+ * addresses.
+ *
+ * phys: physical address, preserved
+ * ttbr: returns the TTBR value
+ */
+ .macro phys_to_ttbr, phys, ttbr
+#ifdef CONFIG_ARM64_PA_BITS_52
+ orr \ttbr, \phys, \phys, lsr #46
+ and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
+#else
+ mov \ttbr, \phys
+#endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H */
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
}
+#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
*/
static inline void cpu_set_reserved_ttbr0(void)
{
- unsigned long ttbr = __pa_symbol(empty_zero_page);
+ unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
write_sysreg(ttbr, ttbr0_el1);
isb();
#ifndef __ASM_PGTABLE_HWDEF_H
#define __ASM_PGTABLE_HWDEF_H
+#include <asm/memory.h>
+
/*
* Number of page-table levels required to address 'va_bits' wide
* address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
+/*
+ * TTBR.
+ */
+#ifdef CONFIG_ARM64_PA_BITS_52
+/*
+ * This should be GENMASK_ULL(47, 2).
+ * TTBR_ELx[1] is RES0 in this configuration.
+ */
+#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
+#endif
+
#endif
#define kc_vaddr_to_offset(v) ((v) & ~VA_START)
#define kc_offset_to_vaddr(o) ((o) | VA_START)
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
+#else
+#define phys_to_ttbr(addr) (addr)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */
update_early_cpu_boot_status 0, x1, x2
adrp x1, idmap_pg_dir
adrp x2, swapper_pg_dir
- msr ttbr0_el1, x1 // load TTBR0
- msr ttbr1_el1, x2 // load TTBR1
+ phys_to_ttbr x1, x3
+ phys_to_ttbr x2, x4
+ msr ttbr0_el1, x3 // load TTBR0
+ msr ttbr1_el1, x4 // load TTBR1
isb
msr sctlr_el1, x0
isb
* Even switching to our copied tables will cause a changed output address at
* each stage of the walk.
*/
-.macro break_before_make_ttbr_switch zero_page, page_table
- msr ttbr1_el1, \zero_page
+.macro break_before_make_ttbr_switch zero_page, page_table, tmp
+ phys_to_ttbr \zero_page, \tmp
+ msr ttbr1_el1, \tmp
isb
tlbi vmalle1
dsb nsh
- msr ttbr1_el1, \page_table
+ phys_to_ttbr \page_table, \tmp
+ msr ttbr1_el1, \tmp
isb
.endm
* We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page
*/
- break_before_make_ttbr_switch x5, x0
+ break_before_make_ttbr_switch x5, x0, x6
mov x21, x1
mov x30, x2
dsb ish /* wait for PoU cleaning to finish */
/* switch to the restored kernels page tables */
- break_before_make_ttbr_switch x25, x21
+ break_before_make_ttbr_switch x25, x21, x6
ic ialluis
dsb ish
*/
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
- write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+ write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1);
isb();
*phys_dst_addr = virt_to_phys((void *)dst);
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc
- msr ttbr0_el2, x0
+ phys_to_ttbr x0, x4
+ msr ttbr0_el2, x4
mrs x4, tcr_el1
ldr x5, =TCR_EL2_MASK
if (PGD_SIZE == PAGE_SIZE)
return;
+#ifdef CONFIG_ARM64_PA_BITS_52
+ /*
+ * With 52-bit physical addresses, the architecture requires the
+ * top-level table to be aligned to at least 64 bytes.
+ */
+ BUILD_BUG_ON(PGD_SIZE < 64);
+#endif
+
/*
* Naturally aligned pgds required by the architecture.
*/
* - pgd_phys - physical address of new TTB
*/
ENTRY(cpu_do_switch_mm)
- pre_ttbr0_update_workaround x0, x2, x3
+ phys_to_ttbr x0, x2
+ pre_ttbr0_update_workaround x2, x3, x4
mmid x1, x1 // get mm->context.id
- bfi x0, x1, #48, #16 // set the ASID
- msr ttbr0_el1, x0 // set TTBR0
+ bfi x2, x1, #48, #16 // set the ASID
+ msr ttbr0_el1, x2 // set TTBR0
isb
post_ttbr0_update_workaround
ret
save_and_disable_daif flags=x2
adrp x1, empty_zero_page
- msr ttbr1_el1, x1
+ phys_to_ttbr x1, x3
+ msr ttbr1_el1, x3
isb
tlbi vmalle1
dsb nsh
isb
- msr ttbr1_el1, x0
+ phys_to_ttbr x0, x3
+ msr ttbr1_el1, x3
isb
restore_daif x2
pgd_phys = virt_to_phys(kvm->arch.pgd);
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
- kvm->arch.vttbr = pgd_phys | vmid;
+ kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
spin_unlock(&kvm_vmid_lock);
}