#include <asm/processor.h>
#include <asm/cache.h>
-extern spinlock_t pa_tlb_lock;
+static inline spinlock_t *pgd_spinlock(pgd_t *);
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
*/
#define kern_addr_valid(addr) (1)
-/* Purge data and instruction TLB entries. Must be called holding
- * the pa_tlb_lock. The TLB purge instructions are slow on SMP
- * machines since the purge must be broadcast to all CPUs.
+/* This is for the serialization of PxTLB broadcasts. At least on the N class
+ * systems, only one PxTLB inter processor broadcast can be active at any one
+ * time on the Merced bus.
+
+ * PTE updates are protected by locks in the PMD.
+ */
+extern spinlock_t pa_tlb_flush_lock;
+extern spinlock_t pa_swapper_pg_lock;
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
+extern int pa_serialize_tlb_flushes;
+#else
+#define pa_serialize_tlb_flushes (0)
+#endif
+
+#define purge_tlb_start(flags) do { \
+ if (pa_serialize_tlb_flushes) \
+ spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
+ else \
+ local_irq_save(flags); \
+ } while (0)
+#define purge_tlb_end(flags) do { \
+ if (pa_serialize_tlb_flushes) \
+ spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
+ else \
+ local_irq_restore(flags); \
+ } while (0)
+
+/* Purge data and instruction TLB entries. The TLB purge instructions
+ * are slow on SMP machines since the purge must be broadcast to all CPUs.
*/
static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
+ unsigned long flags;
+
+ purge_tlb_start(flags);
mtsp(mm->context, 1);
pdtlb(addr);
pitlb(addr);
+ purge_tlb_end(flags);
}
/* Certain architectures need to do special things when PTEs
do { \
pte_t old_pte; \
unsigned long flags; \
- spin_lock_irqsave(&pa_tlb_lock, flags); \
+ spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
old_pte = *ptep; \
set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \
- spin_unlock_irqrestore(&pa_tlb_lock, flags); \
+ spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
} while (0)
#endif /* !__ASSEMBLY__ */
#if CONFIG_PGTABLE_LEVELS == 3
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */
-#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
+#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */
#else
#define PGD_ORDER 1 /* Number of pages per pgd */
-#define PGD_ALLOC_ORDER PGD_ORDER
+#define PGD_ALLOC_ORDER (PGD_ORDER + 1)
#endif
/* Definitions for 3rd level (we use PLD here for Page Lower directory
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
+{
+ if (unlikely(pgd == swapper_pg_dir))
+ return &pa_swapper_pg_lock;
+ return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
+}
+
+
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
if (!pte_young(*ptep))
return 0;
- spin_lock_irqsave(&pa_tlb_lock, flags);
+ spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
pte = *ptep;
if (!pte_young(pte)) {
- spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 0;
}
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
- spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 1;
}
pte_t old_pte;
unsigned long flags;
- spin_lock_irqsave(&pa_tlb_lock, flags);
+ spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
old_pte = *ptep;
set_pte(ptep, __pte(0));
purge_tlb_entries(mm, addr);
- spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
unsigned long flags;
- spin_lock_irqsave(&pa_tlb_lock, flags);
+ spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
- spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
#include <linux/sched.h>
#include <asm/mmu_context.h>
-
-/* This is for the serialisation of PxTLB broadcasts. At least on the
- * N class systems, only one PxTLB inter processor broadcast can be
- * active at any one time on the Merced bus. This tlb purge
- * synchronisation is fairly lightweight and harmless so we activate
- * it on all systems not just the N class.
-
- * It is also used to ensure PTE updates are atomic and consistent
- * with the TLB.
- */
-extern spinlock_t pa_tlb_lock;
-
-#define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags)
-#define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags)
-
extern void flush_tlb_all(void);
extern void flush_tlb_all_local(void *);
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
- unsigned long flags, sid;
-
- sid = vma->vm_mm->context;
- purge_tlb_start(flags);
- mtsp(sid, 1);
- pdtlb(addr);
- pitlb(addr);
- purge_tlb_end(flags);
+ purge_tlb_entries(vma->vm_mm, addr);
}
#endif