*/
static inline void fpregs_deactivate(struct fpu *fpu)
{
- fpu->fpregs_active = 0;
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
trace_x86_fpu_regs_deactivated(fpu);
}
static inline void fpregs_activate(struct fpu *fpu)
{
- fpu->fpregs_active = 1;
this_cpu_write(fpu_fpregs_owner_ctx, fpu);
trace_x86_fpu_regs_activated(fpu);
}
static inline void
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- WARN_ON_FPU(old_fpu->fpregs_active != old_fpu->fpstate_active);
-
if (old_fpu->fpstate_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
old_fpu->last_cpu = cpu;
/* But leave fpu_fpregs_owner_ctx! */
- old_fpu->fpregs_active = 0;
trace_x86_fpu_regs_deactivated(old_fpu);
} else
old_fpu->last_cpu = -1;
*/
unsigned char fpstate_active;
- /*
- * @fpregs_active:
- *
- * This flag determines whether a given context is actively
- * loaded into the FPU's registers and that those registers
- * represent the task's current FPU state.
- *
- * Note the interaction with fpstate_active:
- *
- * # task does not use the FPU:
- * fpstate_active == 0
- *
- * # task uses the FPU and regs are active:
- * fpstate_active == 1 && fpregs_active == 1
- *
- * # the regs are inactive but still match fpstate:
- * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
- *
- * The third state is what we use for the lazy restore optimization
- * on lazy-switching CPUs.
- */
- unsigned char fpregs_active;
-
/*
* @state:
*
TP_STRUCT__entry(
__field(struct fpu *, fpu)
- __field(bool, fpregs_active)
__field(bool, fpstate_active)
__field(u64, xfeatures)
__field(u64, xcomp_bv)
TP_fast_assign(
__entry->fpu = fpu;
- __entry->fpregs_active = fpu->fpregs_active;
__entry->fpstate_active = fpu->fpstate_active;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
}
),
- TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
+ TP_printk("x86/fpu: %p fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
__entry->fpu,
- __entry->fpregs_active,
__entry->fpstate_active,
__entry->xfeatures,
__entry->xcomp_bv
WARN_ON_FPU(fpu != ¤t->thread.fpu);
preempt_disable();
- WARN_ON_FPU(fpu->fpstate_active != fpu->fpregs_active);
-
trace_x86_fpu_before_save(fpu);
if (fpu->fpstate_active) {
if (!copy_fpregs_to_fpstate(fpu)) {
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
*/
void fpu__activate_fpstate_read(struct fpu *fpu)
{
- WARN_ON_FPU(fpu->fpstate_active != fpu->fpregs_active);
/*
* If fpregs are active (in the current CPU), then
* copy them to the fpstate:
{
struct fpu *fpu = ¤t->thread.fpu;
- WARN_ON_FPU(fpu->fpstate_active != fpu->fpregs_active);
/*
* 'fpu' now has an updated copy of the state, but the
* registers may still be out of date. Update them with
preempt_disable();
if (fpu == ¤t->thread.fpu) {
- WARN_ON_FPU(fpu->fpstate_active != fpu->fpregs_active);
-
if (fpu->fpstate_active) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
_ASM_EXTABLE(1b, 2b));
fpregs_deactivate(fpu);
}
- } else {
- WARN_ON_FPU(fpu->fpregs_active);
}
fpu->fpstate_active = 0;
sizeof(struct user_i387_ia32_struct), NULL,
(struct _fpstate_32 __user *) buf) ? -1 : 1;
- WARN_ON_FPU(fpu->fpstate_active != fpu->fpregs_active);
-
if (fpu->fpstate_active || using_compacted_format()) {
/* Save the live register state to the user directly. */
if (copy_fpregs_to_sigframe(buf_fx))