Then, remove bad_hva and inline kvm_is_error_hva
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
}
+#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
+
+static inline bool kvm_is_error_hva(unsigned long addr)
+{
+ return addr == PAGE_OFFSET;
+}
+
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
static inline bool is_error_page(struct page *page)
return slot;
}
-int kvm_is_error_hva(unsigned long addr);
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc);
}
EXPORT_SYMBOL_GPL(kvm_disable_largepages);
-static inline unsigned long bad_hva(void)
-{
- return PAGE_OFFSET;
-}
-
-int kvm_is_error_hva(unsigned long addr)
-{
- return addr == bad_hva();
-}
-EXPORT_SYMBOL_GPL(kvm_is_error_hva);
-
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
return __gfn_to_memslot(kvm_memslots(kvm), gfn);
gfn_t *nr_pages)
{
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
- return bad_hva();
+ return KVM_HVA_ERR_BAD;
if (nr_pages)
*nr_pages = slot->npages - (gfn - slot->base_gfn);