#ifndef _LINUX_MMU_NOTIFIER_H
#define _LINUX_MMU_NOTIFIER_H
+#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
struct mmu_notifier;
struct mmu_notifier_ops;
+/* mmu_notifier_ops flags */
+#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
+
#ifdef CONFIG_MMU_NOTIFIER
/*
};
struct mmu_notifier_ops {
+ /*
+ * Flags to specify behavior of callbacks for this MMU notifier.
+ * Used to determine which context an operation may be called.
+ *
+ * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not
+ * block
+ */
+ int flags;
+
/*
* Called either by mmu_notifier_unregister or when the mm is
* being destroyed by exit_mmap, always before all pages are
* page. Pages will no longer be referenced by the linux
* address space but may still be referenced by sptes until
* the last refcount is dropped.
+ *
+ * If both of these callbacks cannot block, and invalidate_range
+ * cannot block, mmu_notifier_ops.flags should have
+ * MMU_INVALIDATE_DOES_NOT_BLOCK set.
*/
void (*invalidate_range_start)(struct mmu_notifier *mn,
struct mm_struct *mm,
* external TLB range needs to be flushed. For more in depth
* discussion on this see Documentation/vm/mmu_notifier.txt
*
- * The invalidate_range() function is called under the ptl
- * spin-lock and not allowed to sleep.
- *
* Note that this function might be called with just a sub-range
* of what was passed to invalidate_range_start()/end(), if
* called between those functions.
+ *
+ * If this callback cannot block, and invalidate_range_{start,end}
+ * cannot block, mmu_notifier_ops.flags should have
+ * MMU_INVALIDATE_DOES_NOT_BLOCK set.
*/
void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
unsigned long start, unsigned long end);
bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
+extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
static inline void mmu_notifier_release(struct mm_struct *mm)
{
{
}
+static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
+{
+ return false;
+}
+
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
{
}
}
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
+/*
+ * Must be called while holding mm->mmap_sem for either read or write.
+ * The result is guaranteed to be valid until mm->mmap_sem is dropped.
+ */
+bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
+{
+ struct mmu_notifier *mn;
+ int id;
+ bool ret = false;
+
+ WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+
+ if (!mm_has_notifiers(mm))
+ return ret;
+
+ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
+ if (!mn->ops->invalidate_range &&
+ !mn->ops->invalidate_range_start &&
+ !mn->ops->invalidate_range_end)
+ continue;
+
+ if (!(mn->ops->flags & MMU_INVALIDATE_DOES_NOT_BLOCK)) {
+ ret = true;
+ break;
+ }
+ }
+ srcu_read_unlock(&srcu, id);
+ return ret;
+}
+
static int do_mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm,
int take_mmap_sem)