unsigned long numpages;
int flags;
unsigned long pfn;
- unsigned force_split : 1;
+ unsigned force_split : 1,
+ force_static_prot : 1;
int curpage;
struct page **pages;
};
enum cpa_warn {
+ CPA_CONFLICT,
CPA_PROTECT,
CPA_DETECT,
};
unsigned long pfn, const char *txt)
{
static const char *lvltxt[] = {
+ [CPA_CONFLICT] = "conflict",
[CPA_PROTECT] = "protect",
[CPA_DETECT] = "detect",
};
struct cpa_data *cpa)
{
unsigned long numpages, pmask, psize, lpaddr, addr, pfn, old_pfn;
- pgprot_t old_prot, new_prot, req_prot;
+ pgprot_t old_prot, new_prot, req_prot, chk_prot;
pte_t new_pte, old_pte, *tmp;
enum pg_level level;
int i;
lpaddr = address & pmask;
numpages = psize >> PAGE_SHIFT;
+ /*
+ * Sanity check that the existing mapping is correct versus the static
+ * protections. static_protections() guards against !PRESENT, so no
+ * extra conditional required here.
+ */
+ chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
+ CPA_CONFLICT);
+
+ if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
+ /*
+ * Split the large page and tell the split code to
+ * enforce static protections.
+ */
+ cpa->force_static_prot = 1;
+ return 1;
+ }
+
/*
* Make sure that the requested pgprot does not violate the static
* protections. Check the full large page whether one of the pages
new_prot = static_protections(req_prot, address, pfn, 1, CPA_DETECT);
pfn = old_pfn;
for (i = 0, addr = lpaddr; i < numpages; i++, addr += PAGE_SIZE, pfn++) {
- pgprot_t chk_prot = static_protections(req_prot, addr, pfn, 1,
- CPA_DETECT);
+ chk_prot = static_protections(req_prot, addr, pfn, 1,
+ CPA_DETECT);
cpa_inc_4k_checked();
if (pgprot_val(chk_prot) != pgprot_val(new_prot))
return 1;
return do_split;
}
+static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
+ pgprot_t ref_prot, unsigned long address,
+ unsigned long size)
+{
+ unsigned int npg = PFN_DOWN(size);
+ pgprot_t prot;
+
+ /*
+ * If should_split_large_page() discovered an inconsistent mapping,
+ * remove the invalid protection in the split mapping.
+ */
+ if (!cpa->force_static_prot)
+ goto set;
+
+ prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT);
+
+ if (pgprot_val(prot) == pgprot_val(ref_prot))
+ goto set;
+
+ /*
+ * If this is splitting a PMD, fix it up. PUD splits cannot be
+ * fixed trivially as that would require to rescan the newly
+ * installed PMD mappings after returning from split_large_page()
+ * so an eventual further split can allocate the necessary PTE
+ * pages. Warn for now and revisit it in case this actually
+ * happens.
+ */
+ if (size == PAGE_SIZE)
+ ref_prot = prot;
+ else
+ pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
+set:
+ set_pte(pte, pfn_pte(pfn, ref_prot));
+}
+
static int
__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
struct page *base)
{
+ unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
pte_t *pbase = (pte_t *)page_address(base);
- unsigned long ref_pfn, pfn, pfninc = 1;
unsigned int i, level;
- pte_t *tmp;
pgprot_t ref_prot;
+ pte_t *tmp;
spin_lock(&pgd_lock);
/*
* PAT bit to correct position.
*/
ref_prot = pgprot_large_2_4k(ref_prot);
-
ref_pfn = pmd_pfn(*(pmd_t *)kpte);
+ lpaddr = address & PMD_MASK;
+ lpinc = PAGE_SIZE;
break;
case PG_LEVEL_1G:
ref_prot = pud_pgprot(*(pud_t *)kpte);
ref_pfn = pud_pfn(*(pud_t *)kpte);
pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
-
+ lpaddr = address & PUD_MASK;
+ lpinc = PMD_SIZE;
/*
* Clear the PSE flags if the PRESENT flag is not set
* otherwise pmd_present/pmd_huge will return true
* Get the target pfn from the original entry:
*/
pfn = ref_pfn;
- for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
- set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
+ for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
+ split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
if (virt_addr_valid(address)) {
unsigned long pfn = PFN_DOWN(__pa(address));