Using the ksys_fadvise64_64() helper allows us to avoid the in-kernel
calls to the sys_fadvise64_64() syscall. The ksys_ prefix denotes that
this function is meant as a drop-in replacement for the syscall. In
particular, it uses the same calling convention as ksys_fadvise64_64().
Some compat stubs called sys_fadvise64(), which then just passed through
the arguments to sys_fadvise64_64(). Get rid of this indirection, and call
ksys_fadvise64_64() directly.
This patch is part of a series which removes in-kernel calls to syscalls.
On this basis, the syscall entry path can be streamlined. For details, see
http://lkml.kernel.org/r/
20180325162527.GA17492@light.dominikbrodowski.net
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
loff_t offset, loff_t len)
{
- return sys_fadvise64_64(fd, offset, len, advice);
+ return ksys_fadvise64_64(fd, offset, len, advice);
}
unsigned long a4, unsigned long a5,
int flags)
{
- return sys_fadvise64_64(fd,
+ return ksys_fadvise64_64(fd,
merge_64(a2, a3), merge_64(a4, a5),
flags);
}
unsigned int high_off, unsigned int low_off,
unsigned int high_len, unsigned int low_len, int advice)
{
- return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
+ return ksys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
(loff_t)high_len << 32 | low_len, advice);
}
long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
size_t len, int advice)
{
- return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, len,
- advice);
+ return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low, len,
+ advice);
}
asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low)
{
- return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
- (u64)len_high << 32 | len_low, advice);
+ return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low,
+ (u64)len_high << 32 | len_low, advice);
}
long sys_switch_endian(void)
advise = POSIX_FADV_DONTNEED;
else if (advise == 5)
advise = POSIX_FADV_NOREUSE;
- return sys_fadvise64(fd, (unsigned long)high << 32 | low, len, advise);
+ return ksys_fadvise64_64(fd, (unsigned long)high << 32 | low, len,
+ advise);
}
struct fadvise64_64_args {
a.advice = POSIX_FADV_DONTNEED;
else if (a.advice == 5)
a.advice = POSIX_FADV_NOREUSE;
- return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
+ return ksys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
}
COMPAT_SYSCALL_DEFINE6(s390_sync_file_range, int, fd, u32, offhigh, u32, offlow,
u32 len0, u32 len1, int advice)
{
#ifdef __LITTLE_ENDIAN__
- return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
- (u64)len1 << 32 | len0, advice);
+ return ksys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
+ (u64)len1 << 32 | len0, advice);
#else
- return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
- (u64)len0 << 32 | len1, advice);
+ return ksys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
+ (u64)len0 << 32 | len1, advice);
#endif
}
unsigned long offlo,
compat_size_t len, int advice)
{
- return sys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice);
+ return ksys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice);
}
long compat_sys_fadvise64_64(int fd,
unsigned long lenhi, unsigned long lenlo,
int advice)
{
- return sys_fadvise64_64(fd,
- (offhi << 32) | offlo,
- (lenhi << 32) | lenlo,
- advice);
+ return ksys_fadvise64_64(fd,
+ (offhi << 32) | offlo,
+ (lenhi << 32) | lenlo,
+ advice);
}
long sys32_sync_file_range(unsigned int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, unsigned int flags)
__u32, offset_high, __u32, len_low, __u32, len_high,
int, advice)
{
- return sys_fadvise64_64(fd,
- (((u64)offset_high)<<32) | offset_low,
- (((u64)len_high)<<32) | len_low,
- advice);
+ return ksys_fadvise64_64(fd,
+ (((u64)offset_high)<<32) | offset_low,
+ (((u64)len_high)<<32) | len_low,
+ advice);
}
COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo,
COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo,
unsigned int, offset_hi, size_t, len, int, advice)
{
- return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
- len, advice);
+ return ksys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
+ len, advice);
}
COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode,
asmlinkage long xtensa_fadvise64_64(int fd, int advice,
unsigned long long offset, unsigned long long len)
{
- return sys_fadvise64_64(fd, offset, len, advice);
+ return ksys_fadvise64_64(fd, offset, len, advice);
}
#ifdef CONFIG_MMU
ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
size_t count, loff_t pos);
int ksys_fallocate(int fd, int mode, loff_t offset, loff_t len);
+#ifdef CONFIG_ADVISE_SYSCALLS
+int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
+#else
+static inline int ksys_fadvise64_64(int fd, loff_t offset, loff_t len,
+ int advice)
+{
+ return -EINVAL;
+}
+#endif
/*
* The following kernel syscall equivalents are just wrappers to fs-internal
* POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
* deactivate the pages and clear PG_Referenced.
*/
-SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
+
+int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
{
struct fd f = fdget(fd);
struct inode *inode;
return ret;
}
+SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
+{
+ return ksys_fadvise64_64(fd, offset, len, advice);
+}
+
#ifdef __ARCH_WANT_SYS_FADVISE64
SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
{
- return sys_fadvise64_64(fd, offset, len, advice);
+ return ksys_fadvise64_64(fd, offset, len, advice);
}
#endif