typedef struct thread_struct thread_struct;
-/*
- * General size of a stack
- */
-#define STACK_ORDER 2
-#define STACK_SIZE (PAGE_SIZE << STACK_ORDER)
-#define STACK_INIT_OFFSET \
- (STACK_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
-
/*
* Stack layout of a C stack frame.
*/
#include <linux/const.h>
/*
- * Size of kernel stack for each process
+ * General size of kernel stacks
*/
#define THREAD_SIZE_ORDER 2
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#include <asm/page.h>
#include <asm/processor.h>
+#define STACK_INIT_OFFSET \
+ (THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
+
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
#ifdef CONFIG_CHECK_STACK
sp = __dump_trace(func, data, sp,
- S390_lowcore.nodat_stack + frame_size - STACK_SIZE,
+ S390_lowcore.nodat_stack + frame_size - THREAD_SIZE,
S390_lowcore.nodat_stack + frame_size);
#endif
sp = __dump_trace(func, data, sp,
- S390_lowcore.async_stack + frame_size - STACK_SIZE,
+ S390_lowcore.async_stack + frame_size - THREAD_SIZE,
S390_lowcore.async_stack + frame_size);
task = task ?: current;
__dump_trace(func, data, sp,
old = current_stack_pointer();
/* Check against async. stack address range. */
new = S390_lowcore.async_stack;
- if (((new - old) >> (PAGE_SHIFT + STACK_ORDER)) != 0) {
+ if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
CALL_ON_STACK(__do_softirq, new, 0);
} else {
/* We are already on the async stack. */
{
#ifdef CONFIG_VMAP_STACK
return (unsigned long)
- __vmalloc_node_range(STACK_SIZE, STACK_SIZE,
+ __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
VMALLOC_START, VMALLOC_END,
THREADINFO_GFP,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
#else
- return __get_free_pages(GFP_KERNEL, STACK_ORDER);
+ return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
#endif
}
#ifdef CONFIG_VMAP_STACK
vfree((void *) stack);
#else
- free_pages(stack, STACK_ORDER);
+ free_pages(stack, THREAD_SIZE_ORDER);
#endif
}
{
unsigned long stack;
- stack = __get_free_pages(GFP_KERNEL, STACK_ORDER);
+ stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
if (!stack)
panic("Couldn't allocate async stack");
S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
if (!new)
panic("Couldn't allocate async stack");
S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
- free_pages(old, STACK_ORDER);
+ free_pages(old, THREAD_SIZE_ORDER);
return 0;
}
early_initcall(async_stack_realloc);
* Allocate the global restart stack which is the same for
* all CPUs in cast *one* of them does a PSW restart.
*/
- restart_stack = memblock_virt_alloc(STACK_SIZE, STACK_SIZE);
+ restart_stack = memblock_virt_alloc(THREAD_SIZE, THREAD_SIZE);
restart_stack += STACK_INIT_OFFSET;
/*
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
- nodat_stack = __get_free_pages(GFP_KERNEL, STACK_ORDER);
+ nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
if (!pcpu->lowcore || !nodat_stack)
goto out;
} else {
stack_free(async_stack);
out:
if (pcpu != &pcpu_devices[0]) {
- free_pages(nodat_stack, STACK_ORDER);
+ free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
}
return -ENOMEM;
stack_free(async_stack);
if (pcpu == &pcpu_devices[0])
return;
- free_pages(nodat_stack, STACK_ORDER);
+ free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages(lowcore, LC_ORDER);
}