unsigned int max_vq, vq;
u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
+ if (!vcpu_has_sve(vcpu))
+ return -ENOENT;
+
if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
return -EINVAL;
unsigned int max_vq, vq;
u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
+ if (!vcpu_has_sve(vcpu))
+ return -ENOENT;
+
if (kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM; /* too late! */
unsigned int upad; /* extra trailing padding in user memory */
};
-/* Get sanitised bounds for user/kernel SVE register copy */
+/*
+ * Validate SVE register ID and get sanitised bounds for user/kernel SVE
+ * register copy
+ */
static int sve_reg_to_region(struct sve_state_reg_region *region,
struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
/* Verify that we match the UAPI header: */
BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
- if ((reg->id & SVE_REG_SLICE_MASK) > 0)
- return -ENOENT;
-
- vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
-
reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+ return -ENOENT;
+
+ vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+
reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET;
reqlen = KVM_SVE_ZREG_SIZE;
maxlen = SVE_SIG_ZREG_SIZE(vq);
} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+ return -ENOENT;
+
+ vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+
reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET;
reqlen = KVM_SVE_PREG_SIZE;
maxlen = SVE_SIG_PREG_SIZE(vq);
} else {
- return -ENOENT;
+ return -EINVAL;
}
sve_state_size = vcpu_sve_state_size(vcpu);
static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
+ int ret;
struct sve_state_reg_region region;
char __user *uptr = (char __user *)reg->addr;
- if (!vcpu_has_sve(vcpu))
- return -ENOENT;
-
/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
if (reg->id == KVM_REG_ARM64_SVE_VLS)
return get_sve_vls(vcpu, reg);
- /* Otherwise, reg is an architectural SVE register... */
+ /* Try to interpret reg ID as an architectural SVE register... */
+ ret = sve_reg_to_region(®ion, vcpu, reg);
+ if (ret)
+ return ret;
if (!kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM;
- if (sve_reg_to_region(®ion, vcpu, reg))
- return -ENOENT;
-
if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
region.klen) ||
clear_user(uptr + region.klen, region.upad))
static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
+ int ret;
struct sve_state_reg_region region;
const char __user *uptr = (const char __user *)reg->addr;
- if (!vcpu_has_sve(vcpu))
- return -ENOENT;
-
/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
if (reg->id == KVM_REG_ARM64_SVE_VLS)
return set_sve_vls(vcpu, reg);
- /* Otherwise, reg is an architectural SVE register... */
+ /* Try to interpret reg ID as an architectural SVE register... */
+ ret = sve_reg_to_region(®ion, vcpu, reg);
+ if (ret)
+ return ret;
if (!kvm_arm_vcpu_sve_finalized(vcpu))
return -EPERM;
- if (sve_reg_to_region(®ion, vcpu, reg))
- return -ENOENT;
-
if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
region.klen))
return -EFAULT;