Commit 0e1dbccd authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Two families of fixes:

   - Fix an FPU context related boot crash on newer x86 hardware with
     larger context sizes than what most people test.  To fix this
     without ugly kludges or extensive reverts we had to touch core task
     allocator, to allow x86 to determine the task size dynamically, at
     boot time.

     I've tested it on a number of x86 platforms, and I cross-built it
     to a handful of architectures:

                                        (warns)               (warns)
       testing     x86-64:  -git:  pass (    0),  -tip:  pass (    0)
       testing     x86-32:  -git:  pass (    0),  -tip:  pass (    0)
       testing        arm:  -git:  pass ( 1359),  -tip:  pass ( 1359)
       testing       cris:  -git:  pass ( 1031),  -tip:  pass ( 1031)
       testing       m32r:  -git:  pass ( 1135),  -tip:  pass ( 1135)
       testing       m68k:  -git:  pass ( 1471),  -tip:  pass ( 1471)
       testing       mips:  -git:  pass ( 1162),  -tip:  pass ( 1162)
       testing    mn10300:  -git:  pass ( 1058),  -tip:  pass ( 1058)
       testing     parisc:  -git:  pass ( 1846),  -tip:  pass ( 1846)
       testing      sparc:  -git:  pass ( 1185),  -tip:  pass ( 1185)

     ... so I hope the cross-arch impact 'none', as intended.

     (by Dave Hansen)

   - Fix various NMI handling related bugs unearthed by the big asm code
     rewrite and generally make the NMI code more robust and more
     maintainable while at it.  These changes are a bit late in the
     cycle, I hope they are still acceptable.

     (by Andy Lutomirski)"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu, sched: Introduce CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT and use it on x86
  x86/fpu, sched: Dynamically allocate 'struct fpu'
  x86/entry/64, x86/nmi/64: Add CONFIG_DEBUG_ENTRY NMI testing code
  x86/nmi/64: Make the "NMI executing" variable more consistent
  x86/nmi/64: Minor asm simplification
  x86/nmi/64: Use DF to avoid userspace RSP confusing nested NMI detection
  x86/nmi/64: Reorder nested NMI checks
  x86/nmi/64: Improve nested NMI comments
  x86/nmi/64: Switch stacks on userspace NMI entry
  x86/nmi/64: Remove asm code that saves CR2
  x86/nmi: Enable nested do_nmi() handling for 64-bit kernels
parents dae57fb6 5aaeb5c0
...@@ -221,6 +221,10 @@ config ARCH_TASK_STRUCT_ALLOCATOR ...@@ -221,6 +221,10 @@ config ARCH_TASK_STRUCT_ALLOCATOR
config ARCH_THREAD_INFO_ALLOCATOR config ARCH_THREAD_INFO_ALLOCATOR
bool bool
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
config ARCH_WANTS_DYNAMIC_TASK_STRUCT
bool
config HAVE_REGS_AND_STACK_ACCESS_API config HAVE_REGS_AND_STACK_ACCESS_API
bool bool
help help
......
...@@ -41,6 +41,7 @@ config X86 ...@@ -41,6 +41,7 @@ config X86
select ARCH_USE_CMPXCHG_LOCKREF if X86_64 select ARCH_USE_CMPXCHG_LOCKREF if X86_64
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION if X86_32 select ARCH_WANT_IPC_PARSE_VERSION if X86_32
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
......
...@@ -297,6 +297,18 @@ config OPTIMIZE_INLINING ...@@ -297,6 +297,18 @@ config OPTIMIZE_INLINING
If unsure, say N. If unsure, say N.
config DEBUG_ENTRY
bool "Debug low-level entry code"
depends on DEBUG_KERNEL
---help---
This option enables sanity checks in x86's low-level entry code.
Some of these sanity checks may slow down kernel entries and
exits or otherwise impact performance.
This is currently used to help test NMI code.
If unsure, say N.
config DEBUG_NMI_SELFTEST config DEBUG_NMI_SELFTEST
bool "NMI Selftest" bool "NMI Selftest"
depends on DEBUG_KERNEL && X86_LOCAL_APIC depends on DEBUG_KERNEL && X86_LOCAL_APIC
......
This diff is collapsed.
...@@ -189,6 +189,7 @@ union fpregs_state { ...@@ -189,6 +189,7 @@ union fpregs_state {
struct fxregs_state fxsave; struct fxregs_state fxsave;
struct swregs_state soft; struct swregs_state soft;
struct xregs_state xsave; struct xregs_state xsave;
u8 __padding[PAGE_SIZE];
}; };
/* /*
...@@ -197,40 +198,6 @@ union fpregs_state { ...@@ -197,40 +198,6 @@ union fpregs_state {
* state fields: * state fields:
*/ */
struct fpu { struct fpu {
/*
* @state:
*
* In-memory copy of all FPU registers that we save/restore
* over context switches. If the task is using the FPU then
* the registers in the FPU are more recent than this state
* copy. If the task context-switches away then they get
* saved here and represent the FPU state.
*
* After context switches there may be a (short) time period
* during which the in-FPU hardware registers are unchanged
* and still perfectly match this state, if the tasks
* scheduled afterwards are not using the FPU.
*
* This is the 'lazy restore' window of optimization, which
* we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
*
* We detect whether a subsequent task uses the FPU via setting
* CR0::TS to 1, which causes any FPU use to raise a #NM fault.
*
* During this window, if the task gets scheduled again, we
* might be able to skip having to do a restore from this
* memory buffer to the hardware registers - at the cost of
* incurring the overhead of #NM fault traps.
*
* Note that on modern CPUs that support the XSAVEOPT (or other
* optimized XSAVE instructions), we don't use #NM traps anymore,
* as the hardware can track whether FPU registers need saving
* or not. On such CPUs we activate the non-lazy ('eagerfpu')
* logic, which unconditionally saves/restores all FPU state
* across context switches. (if FPU state exists.)
*/
union fpregs_state state;
/* /*
* @last_cpu: * @last_cpu:
* *
...@@ -288,6 +255,43 @@ struct fpu { ...@@ -288,6 +255,43 @@ struct fpu {
* deal with bursty apps that only use the FPU for a short time: * deal with bursty apps that only use the FPU for a short time:
*/ */
unsigned char counter; unsigned char counter;
/*
* @state:
*
* In-memory copy of all FPU registers that we save/restore
* over context switches. If the task is using the FPU then
* the registers in the FPU are more recent than this state
* copy. If the task context-switches away then they get
* saved here and represent the FPU state.
*
* After context switches there may be a (short) time period
* during which the in-FPU hardware registers are unchanged
* and still perfectly match this state, if the tasks
* scheduled afterwards are not using the FPU.
*
* This is the 'lazy restore' window of optimization, which
* we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
*
* We detect whether a subsequent task uses the FPU via setting
* CR0::TS to 1, which causes any FPU use to raise a #NM fault.
*
* During this window, if the task gets scheduled again, we
* might be able to skip having to do a restore from this
* memory buffer to the hardware registers - at the cost of
* incurring the overhead of #NM fault traps.
*
* Note that on modern CPUs that support the XSAVEOPT (or other
* optimized XSAVE instructions), we don't use #NM traps anymore,
* as the hardware can track whether FPU registers need saving
* or not. On such CPUs we activate the non-lazy ('eagerfpu')
* logic, which unconditionally saves/restores all FPU state
* across context switches. (if FPU state exists.)
*/
union fpregs_state state;
/*
* WARNING: 'state' is dynamically-sized. Do not put
* anything after it here.
*/
}; };
#endif /* _ASM_X86_FPU_H */ #endif /* _ASM_X86_FPU_H */
...@@ -390,9 +390,6 @@ struct thread_struct { ...@@ -390,9 +390,6 @@ struct thread_struct {
#endif #endif
unsigned long gs; unsigned long gs;
/* Floating point and extended processor state */
struct fpu fpu;
/* Save middle states of ptrace breakpoints */ /* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM]; struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */ /* Debug status used for traps, single steps, etc... */
...@@ -418,6 +415,13 @@ struct thread_struct { ...@@ -418,6 +415,13 @@ struct thread_struct {
unsigned long iopl; unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */ /* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max; unsigned io_bitmap_max;
/* Floating point and extended processor state */
struct fpu fpu;
/*
* WARNING: 'fpu' is dynamically-sized. It *MUST* be at
* the end.
*/
}; };
/* /*
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#include <asm/fpu/internal.h> #include <asm/fpu/internal.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <linux/sched.h>
/* /*
* Initialize the TS bit in CR0 according to the style of context-switches * Initialize the TS bit in CR0 according to the style of context-switches
* we are using: * we are using:
...@@ -136,6 +138,43 @@ static void __init fpu__init_system_generic(void) ...@@ -136,6 +138,43 @@ static void __init fpu__init_system_generic(void)
unsigned int xstate_size; unsigned int xstate_size;
EXPORT_SYMBOL_GPL(xstate_size); EXPORT_SYMBOL_GPL(xstate_size);
/* Enforce that 'MEMBER' is the last field of 'TYPE': */
#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
BUILD_BUG_ON(sizeof(TYPE) != offsetofend(TYPE, MEMBER))
/*
* We append the 'struct fpu' to the task_struct:
*/
static void __init fpu__init_task_struct_size(void)
{
int task_size = sizeof(struct task_struct);
/*
* Subtract off the static size of the register state.
* It potentially has a bunch of padding.
*/
task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state);
/*
* Add back the dynamically-calculated register state
* size.
*/
task_size += xstate_size;
/*
* We dynamically size 'struct fpu', so we require that
* it be at the end of 'thread_struct' and that
* 'thread_struct' be at the end of 'task_struct'. If
* you hit a compile error here, check the structure to
* see if something got added to the end.
*/
CHECK_MEMBER_AT_END_OF(struct fpu, state);
CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
arch_task_struct_size = task_size;
}
/* /*
* Set up the xstate_size based on the legacy FPU context size. * Set up the xstate_size based on the legacy FPU context size.
* *
...@@ -287,6 +326,7 @@ void __init fpu__init_system(struct cpuinfo_x86 *c) ...@@ -287,6 +326,7 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
fpu__init_system_generic(); fpu__init_system_generic();
fpu__init_system_xstate_size_legacy(); fpu__init_system_xstate_size_legacy();
fpu__init_system_xstate(); fpu__init_system_xstate();
fpu__init_task_struct_size();
fpu__init_system_ctx_switch(); fpu__init_system_ctx_switch();
} }
......
...@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs) ...@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
NOKPROBE_SYMBOL(default_do_nmi); NOKPROBE_SYMBOL(default_do_nmi);
/* /*
* NMIs can hit breakpoints which will cause it to lose its * NMIs can page fault or hit breakpoints which will cause it to lose
* NMI context with the CPU when the breakpoint does an iret. * its NMI context with the CPU when the breakpoint or page fault does an IRET.
*/ *
#ifdef CONFIG_X86_32 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
/* * NMI processing. On x86_64, the asm glue protects us from nested NMIs
* For i386, NMIs use the same stack as the kernel, and we can * if the outer NMI came from kernel mode, but we can still nest if the
* add a workaround to the iret problem in C (preventing nested * outer NMI came from user mode.
* NMIs if an NMI takes a trap). Simply have 3 states the NMI *
* can be in: * To handle these nested NMIs, we have three states:
* *
* 1) not running * 1) not running
* 2) executing * 2) executing
...@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi); ...@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
* (Note, the latch is binary, thus multiple NMIs triggering, * (Note, the latch is binary, thus multiple NMIs triggering,
* when one is running, are ignored. Only one NMI is restarted.) * when one is running, are ignored. Only one NMI is restarted.)
* *
* If an NMI hits a breakpoint that executes an iret, another * If an NMI executes an iret, another NMI can preempt it. We do not
* NMI can preempt it. We do not want to allow this new NMI * want to allow this new NMI to run, but we want to execute it when the
* to run, but we want to execute it when the first one finishes. * first one finishes. We set the state to "latched", and the exit of
* We set the state to "latched", and the exit of the first NMI will * the first NMI will perform a dec_return, if the result is zero
* perform a dec_return, if the result is zero (NOT_RUNNING), then * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
* it will simply exit the NMI handler. If not, the dec_return * dec_return would have set the state to NMI_EXECUTING (what we want it
* would have set the state to NMI_EXECUTING (what we want it to * to be when we are running). In this case, we simply jump back to
* be when we are running). In this case, we simply jump back * rerun the NMI handler again, and restart the 'latched' NMI.
* to rerun the NMI handler again, and restart the 'latched' NMI.
* *
* No trap (breakpoint or page fault) should be hit before nmi_restart, * No trap (breakpoint or page fault) should be hit before nmi_restart,
* thus there is no race between the first check of state for NOT_RUNNING * thus there is no race between the first check of state for NOT_RUNNING
...@@ -461,49 +460,36 @@ enum nmi_states { ...@@ -461,49 +460,36 @@ enum nmi_states {
static DEFINE_PER_CPU(enum nmi_states, nmi_state); static DEFINE_PER_CPU(enum nmi_states, nmi_state);
static DEFINE_PER_CPU(unsigned long, nmi_cr2); static DEFINE_PER_CPU(unsigned long, nmi_cr2);
#define nmi_nesting_preprocess(regs) \ #ifdef CONFIG_X86_64
do { \
if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
this_cpu_write(nmi_state, NMI_LATCHED); \
return; \
} \
this_cpu_write(nmi_state, NMI_EXECUTING); \
this_cpu_write(nmi_cr2, read_cr2()); \
} while (0); \
nmi_restart:
#define nmi_nesting_postprocess() \
do { \
if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
write_cr2(this_cpu_read(nmi_cr2)); \
if (this_cpu_dec_return(nmi_state)) \
goto nmi_restart; \
} while (0)
#else /* x86_64 */
/* /*
* In x86_64 things are a bit more difficult. This has the same problem * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
* where an NMI hitting a breakpoint that calls iret will remove the * some care, the inner breakpoint will clobber the outer breakpoint's
* NMI context, allowing a nested NMI to enter. What makes this more * stack.
* difficult is that both NMIs and breakpoints have their own stack.
* When a new NMI or breakpoint is executed, the stack is set to a fixed
* point. If an NMI is nested, it will have its stack set at that same
* fixed address that the first NMI had, and will start corrupting the
* stack. This is handled in entry_64.S, but the same problem exists with
* the breakpoint stack.
* *
* If a breakpoint is being processed, and the debug stack is being used, * If a breakpoint is being processed, and the debug stack is being
* if an NMI comes in and also hits a breakpoint, the stack pointer * used, if an NMI comes in and also hits a breakpoint, the stack
* will be set to the same fixed address as the breakpoint that was * pointer will be set to the same fixed address as the breakpoint that
* interrupted, causing that stack to be corrupted. To handle this case, * was interrupted, causing that stack to be corrupted. To handle this
* check if the stack that was interrupted is the debug stack, and if * case, check if the stack that was interrupted is the debug stack, and
* so, change the IDT so that new breakpoints will use the current stack * if so, change the IDT so that new breakpoints will use the current
* and not switch to the fixed address. On return of the NMI, switch back * stack and not switch to the fixed address. On return of the NMI,
* to the original IDT. * switch back to the original IDT.
*/ */
static DEFINE_PER_CPU(int, update_debug_stack); static DEFINE_PER_CPU(int, update_debug_stack);
#endif
static inline void nmi_nesting_preprocess(struct pt_regs *regs) dotraplinkage notrace void
do_nmi(struct pt_regs *regs, long error_code)
{ {
if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
this_cpu_write(nmi_state, NMI_LATCHED);
return;
}
this_cpu_write(nmi_state, NMI_EXECUTING);
this_cpu_write(nmi_cr2, read_cr2());
nmi_restart:
#ifdef CONFIG_X86_64
/* /*
* If we interrupted a breakpoint, it is possible that * If we interrupted a breakpoint, it is possible that
* the nmi handler will have breakpoints too. We need to * the nmi handler will have breakpoints too. We need to
...@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs) ...@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
debug_stack_set_zero(); debug_stack_set_zero();
this_cpu_write(update_debug_stack, 1); this_cpu_write(update_debug_stack, 1);
} }
}
static inline void nmi_nesting_postprocess(void)
{
if (unlikely(this_cpu_read(update_debug_stack))) {
debug_stack_reset();
this_cpu_write(update_debug_stack, 0);
}
}
#endif #endif
dotraplinkage notrace void
do_nmi(struct pt_regs *regs, long error_code)
{
nmi_nesting_preprocess(regs);
nmi_enter(); nmi_enter();
inc_irq_stat(__nmi_count); inc_irq_stat(__nmi_count);
...@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code) ...@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
nmi_exit(); nmi_exit();
/* On i386, may loop back to preprocess */ #ifdef CONFIG_X86_64
nmi_nesting_postprocess(); if (unlikely(this_cpu_read(update_debug_stack))) {
debug_stack_reset();
this_cpu_write(update_debug_stack, 0);
}
#endif
if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
write_cr2(this_cpu_read(nmi_cr2));
if (this_cpu_dec_return(nmi_state))
goto nmi_restart;
} }
NOKPROBE_SYMBOL(do_nmi); NOKPROBE_SYMBOL(do_nmi);
......
...@@ -81,7 +81,7 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister); ...@@ -81,7 +81,7 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
*/ */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{ {
*dst = *src; memcpy(dst, src, arch_task_struct_size);
return fpu__copy(&dst->thread.fpu, &src->thread.fpu); return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
} }
......
...@@ -92,7 +92,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen) ...@@ -92,7 +92,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
roundup(sizeof(CORE_STR), 4)) + roundup(sizeof(CORE_STR), 4)) +
roundup(sizeof(struct elf_prstatus), 4) + roundup(sizeof(struct elf_prstatus), 4) +
roundup(sizeof(struct elf_prpsinfo), 4) + roundup(sizeof(struct elf_prpsinfo), 4) +
roundup(sizeof(struct task_struct), 4); roundup(arch_task_struct_size, 4);
*elf_buflen = PAGE_ALIGN(*elf_buflen); *elf_buflen = PAGE_ALIGN(*elf_buflen);
return size + *elf_buflen; return size + *elf_buflen;
} }
...@@ -415,7 +415,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) ...@@ -415,7 +415,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
/* set up the task structure */ /* set up the task structure */
notes[2].name = CORE_STR; notes[2].name = CORE_STR;
notes[2].type = NT_TASKSTRUCT; notes[2].type = NT_TASKSTRUCT;
notes[2].datasz = sizeof(struct task_struct); notes[2].datasz = arch_task_struct_size;
notes[2].data = current; notes[2].data = current;
nhdr->p_filesz += notesize(&notes[2]); nhdr->p_filesz += notesize(&notes[2]);
......
...@@ -1522,8 +1522,6 @@ struct task_struct { ...@@ -1522,8 +1522,6 @@ struct task_struct {
/* hung task detection */ /* hung task detection */
unsigned long last_switch_count; unsigned long last_switch_count;
#endif #endif
/* CPU-specific state of this task */
struct thread_struct thread;
/* filesystem information */ /* filesystem information */
struct fs_struct *fs; struct fs_struct *fs;
/* open file information */ /* open file information */
...@@ -1778,8 +1776,22 @@ struct task_struct { ...@@ -1778,8 +1776,22 @@ struct task_struct {
unsigned long task_state_change; unsigned long task_state_change;
#endif #endif
int pagefault_disabled; int pagefault_disabled;
/* CPU-specific state of this task */
struct thread_struct thread;
/*
* WARNING: on x86, 'thread_struct' contains a variable-sized
* structure. It *MUST* be at the end of 'task_struct'.
*
* Do not put anything below here!
*/
}; };
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
#else
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
/* Future-safe accessor for struct task_struct's cpus_allowed. */ /* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
......
...@@ -287,6 +287,11 @@ static void set_max_threads(unsigned int max_threads_suggested) ...@@ -287,6 +287,11 @@ static void set_max_threads(unsigned int max_threads_suggested)
max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
} }
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
/* Initialized by the architecture: */
int arch_task_struct_size __read_mostly;
#endif
void __init fork_init(void) void __init fork_init(void)
{ {
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
...@@ -295,7 +300,7 @@ void __init fork_init(void) ...@@ -295,7 +300,7 @@ void __init fork_init(void)
#endif #endif
/* create a slab on which task_structs can be allocated */ /* create a slab on which task_structs can be allocated */
task_struct_cachep = task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct), kmem_cache_create("task_struct", arch_task_struct_size,
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment