OFFSET(pbe_next, pbe, next);
/* Offset from the sysenter stack to tss.esp0 */
- DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, esp0) -
+ DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, x86_tss.esp0) -
sizeof(struct tss_struct));
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
printk("double fault, tss at %08lx\n", tss);
if (ptr_ok(tss)) {
- struct tss_struct *t = (struct tss_struct *)tss;
+ struct i386_hw_tss *t = (struct i386_hw_tss *)tss;
printk("eip = %08lx, esp = %08lx\n", t->eip, t->esp);
}
struct tss_struct doublefault_tss __cacheline_aligned = {
- .esp0 = STACK_START,
- .ss0 = __KERNEL_DS,
- .ldt = 0,
- .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
+ .x86_tss = {
+ .esp0 = STACK_START,
+ .ss0 = __KERNEL_DS,
+ .ldt = 0,
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
- .eip = (unsigned long) doublefault_fn,
- .eflags = X86_EFLAGS_SF | 0x2, /* 0x2 bit is always set */
- .esp = STACK_START,
- .es = __USER_DS,
- .cs = __KERNEL_CS,
- .ss = __KERNEL_DS,
- .ds = __USER_DS,
+ .eip = (unsigned long) doublefault_fn,
+ /* 0x2 bit is always set */
+ .eflags = X86_EFLAGS_SF | 0x2,
+ .esp = STACK_START,
+ .es = __USER_DS,
+ .cs = __KERNEL_CS,
+ .ss = __KERNEL_DS,
+ .ds = __USER_DS,
- .__cr3 = __pa(swapper_pg_dir)
+ .__cr3 = __pa(swapper_pg_dir)
+ }
};
* Reset the owner so that a process switch will not set
* tss->io_bitmap_base to IO_BITMAP_OFFSET.
*/
- tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
+ tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
tss->io_bitmap_owner = NULL;
put_cpu();
t->io_bitmap_max = 0;
tss->io_bitmap_owner = NULL;
tss->io_bitmap_max = 0;
- tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+ tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
put_cpu();
}
}
* Disable the bitmap via an invalid offset. We still cache
* the previous bitmap owner and the IO bitmap contents:
*/
- tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+ tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
return;
}
* matches the next task, we dont have to do anything but
* to set a valid offset in the TSS:
*/
- tss->io_bitmap_base = IO_BITMAP_OFFSET;
+ tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
return;
}
/*
* redundant copies when the currently switched task does not
* perform any I/O during its timeslice.
*/
- tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
+ tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
}
/*
return;
}
- tss->ss1 = __KERNEL_CS;
- tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
+ tss->x86_tss.ss1 = __KERNEL_CS;
+ tss->x86_tss.esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
- wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
+ wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
put_cpu();
}
* and we set the offset field correctly. Then we let the CPU to
* restart the faulting instruction.
*/
- if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
+ if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
thread->io_bitmap_ptr) {
memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
thread->io_bitmap_max);
thread->io_bitmap_max, 0xff,
tss->io_bitmap_max - thread->io_bitmap_max);
tss->io_bitmap_max = thread->io_bitmap_max;
- tss->io_bitmap_base = IO_BITMAP_OFFSET;
+ tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
tss->io_bitmap_owner = thread;
put_cpu();
return;
static void vmi_load_esp0(struct tss_struct *tss,
struct thread_struct *thread)
{
- tss->esp0 = thread->esp0;
+ tss->x86_tss.esp0 = thread->esp0;
/* This can only happen when SEP is enabled, no need to test "SEP"arately */
- if (unlikely(tss->ss1 != thread->sysenter_cs)) {
- tss->ss1 = thread->sysenter_cs;
+ if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
+ tss->x86_tss.ss1 = thread->sysenter_cs;
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
}
- vmi_ops.set_kernel_stack(__KERNEL_DS, tss->esp0);
+ vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.esp0);
}
static void vmi_flush_tlb_user(void)
struct thread_struct;
-struct tss_struct {
+/* This is the TSS defined by the hardware. */
+struct i386_hw_tss {
unsigned short back_link,__blh;
unsigned long esp0;
unsigned short ss0,__ss0h;
unsigned short gs, __gsh;
unsigned short ldt, __ldth;
unsigned short trace, io_bitmap_base;
+} __attribute__((packed));
+
+struct tss_struct {
+ struct i386_hw_tss x86_tss;
+
/*
* The extra 1 is there because the CPU will access an
* additional byte beyond the end of the IO permission
* be within the limit.
*/
#define INIT_TSS { \
- .esp0 = sizeof(init_stack) + (long)&init_stack, \
- .ss0 = __KERNEL_DS, \
- .ss1 = __KERNEL_CS, \
- .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+ .x86_tss = { \
+ .esp0 = sizeof(init_stack) + (long)&init_stack, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+ }, \
.io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
}
static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
{
- tss->esp0 = thread->esp0;
+ tss->x86_tss.esp0 = thread->esp0;
/* This can only happen when SEP is enabled, no need to test "SEP"arately */
- if (unlikely(tss->ss1 != thread->sysenter_cs)) {
- tss->ss1 = thread->sysenter_cs;
+ if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
+ tss->x86_tss.ss1 = thread->sysenter_cs;
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
}
}