On arm64 there is optional support for a 52-bit virtual address space.
To exploit this one has to be running with a 64KB page size and be
running on hardware that supports this.
For an arm64 kernel supporting a 48 bit VA with a 64KB page size,
some changes are needed to support a 52-bit userspace:
* TCR_EL1.T0SZ needs to be 12 instead of 16,
* TASK_SIZE needs to reflect the new size.
This patch implements the above when the support for 52-bit VAs is
detected at early boot time.
On arm64 userspace addresses translation is controlled by TTBR0_EL1. As
well as userspace, TTBR0_EL1 controls:
* The identity mapping,
* EFI runtime code.
It is possible to run a kernel with an identity mapping that has a
larger VA size than userspace (and for this case __cpu_set_tcr_t0sz()
would set TCR_EL1.T0SZ as appropriate). However, when the conditions for
52-bit userspace are met; it is possible to keep TCR_EL1.T0SZ fixed at
12. Thus in this patch, the TCR_EL1.T0SZ size changing logic is
disabled.
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
endchoice
+config ARM64_52BIT_VA
+ def_bool y
+ depends on ARM64_VA_BITS_48 && ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
+
config ARM64_PA_BITS
int
default 48 if ARM64_PA_BITS_48
.endm
/*
- * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
+ * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
*/
- .macro tcr_set_idmap_t0sz, valreg, tmpreg
- ldr_l \tmpreg, idmap_t0sz
- bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+ .macro tcr_set_t0sz, valreg, t0sz
+ bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
.endm
/*
static inline bool __cpu_uses_extended_idmap(void)
{
+ if (IS_ENABLED(CONFIG_ARM64_52BIT_VA))
+ return false;
+
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
}
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H
-#define TASK_SIZE_64 (UL(1) << VA_BITS)
-
-#define KERNEL_DS UL(-1)
-#define USER_DS (TASK_SIZE_64 - 1)
+#define KERNEL_DS UL(-1)
+#ifdef CONFIG_ARM64_52BIT_VA
+#define USER_DS ((UL(1) << 52) - 1)
+#else
+#define USER_DS ((UL(1) << VA_BITS) - 1)
+#endif /* CONFIG_ARM64_52BIT_VA */
/*
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS)
+extern u64 vabits_user;
+#define TASK_SIZE_64 (UL(1) << vabits_user)
+
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 UL(0x100000000)
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
+#ifdef CONFIG_ARM64_52BIT_VA
+ mrs_s x6, SYS_ID_AA64MMFR2_EL1
+ and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ mov x5, #52
+ cbnz x6, 1f
+#endif
+ mov x5, #VA_BITS
+1:
+ adr_l x6, vabits_user
+ str x5, [x6]
+ dmb sy
+ dc ivac, x6 // Invalidate potentially stale cache line
+
/*
* VA_BITS may be too small to allow for an ID mapping to be created
* that covers system RAM if that is located sufficiently high in the
pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
- VA_BITS, mm->pgd);
+ mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd);
pgdp = pgd_offset(mm, addr);
pgd = READ_ONCE(*pgdp);
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
+u64 vabits_user __ro_after_init;
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1
- tcr_set_idmap_t0sz x10, x9
+
+#ifdef CONFIG_ARM64_52BIT_VA
+ ldr_l x9, vabits_user
+ sub x9, xzr, x9
+ add x9, x9, #64
+#else
+ ldr_l x9, idmap_t0sz
+#endif
+ tcr_set_t0sz x10, x9
/*
* Set the IPS bits in TCR_EL1.