d.p = 1; /* Present */
d.d = 1; /* 32-bit */
- write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
+ write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPU_NUMBER, &d, DESCTYPE_S);
}
static int vgetcpu_online(unsigned int cpu)
#define GDT_ENTRY_TLS_MIN 12
#define GDT_ENTRY_TLS_MAX 14
-/* Abused to load per CPU data from limit */
-#define GDT_ENTRY_PER_CPU 15
+#define GDT_ENTRY_CPU_NUMBER 15
/*
* Number of entries in the GDT table:
#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
#define __USER32_DS __USER_DS
#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
-#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU*8 + 3)
+#define __CPU_NUMBER_SEG (GDT_ENTRY_CPU_NUMBER*8 + 3)
#endif
unsigned int p;
/*
- * Load per CPU data from GDT. LSL is faster than RDTSCP and
- * works on all CPUs. This is volatile so that it orders
- * correctly wrt barrier() and to keep gcc from cleverly
+ * Load CPU (and node) number from GDT. LSL is faster than RDTSCP
+ * and works on all CPUs. This is volatile so that it orders
+ * correctly with respect to barrier() and to keep GCC from cleverly
* hoisting it out of the calling function.
*
* If RDPID is available, use it.
alternative_io ("lsl %[seg],%[p]",
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
X86_FEATURE_RDPID,
- [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
+ [p] "=a" (p), [seg] "r" (__CPU_NUMBER_SEG));
return p;
}