Commit 3224bf99 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] swsusp: shuffle cpu.c to make it usable for smp suspend

From: Pavel Machek <pavel@ucw.cz>

This moves fix_processor_context() so that additional prototype is not
needed, and adds context * to processor state saving functions, so that
they can be used on SMP.  It should be done this way from the beggining.
Signed-off-by: default avatarPavel Machek <pavel@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 97b6dbb2
......@@ -27,7 +27,6 @@
#include <asm/tlbflush.h>
static struct saved_context saved_context;
static void fix_processor_context(void);
unsigned long saved_context_eax, saved_context_ebx;
unsigned long saved_context_ecx, saved_context_edx;
......@@ -37,33 +36,38 @@ unsigned long saved_context_eflags;
extern void enable_sep_cpu(void *);
void save_processor_state(void)
void __save_processor_state(struct saved_context *ctxt)
{
kernel_fpu_begin();
/*
* descriptor tables
*/
asm volatile ("sgdt %0" : "=m" (saved_context.gdt_limit));
asm volatile ("sidt %0" : "=m" (saved_context.idt_limit));
asm volatile ("sldt %0" : "=m" (saved_context.ldt));
asm volatile ("str %0" : "=m" (saved_context.tr));
asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
asm volatile ("sldt %0" : "=m" (ctxt->ldt));
asm volatile ("str %0" : "=m" (ctxt->tr));
/*
* segment registers
*/
asm volatile ("movw %%es, %0" : "=m" (saved_context.es));
asm volatile ("movw %%fs, %0" : "=m" (saved_context.fs));
asm volatile ("movw %%gs, %0" : "=m" (saved_context.gs));
asm volatile ("movw %%ss, %0" : "=m" (saved_context.ss));
asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
/*
* control registers
*/
asm volatile ("movl %%cr0, %0" : "=r" (saved_context.cr0));
asm volatile ("movl %%cr2, %0" : "=r" (saved_context.cr2));
asm volatile ("movl %%cr3, %0" : "=r" (saved_context.cr3));
asm volatile ("movl %%cr4, %0" : "=r" (saved_context.cr4));
asm volatile ("movl %%cr0, %0" : "=r" (ctxt->cr0));
asm volatile ("movl %%cr2, %0" : "=r" (ctxt->cr2));
asm volatile ("movl %%cr3, %0" : "=r" (ctxt->cr3));
asm volatile ("movl %%cr4, %0" : "=r" (ctxt->cr4));
}
void save_processor_state(void)
{
__save_processor_state(&saved_context);
}
static void
......@@ -75,32 +79,59 @@ do_fpu_end(void)
mxcsr_feature_mask_init();
}
void restore_processor_state(void)
static void fix_processor_context(void)
{
int cpu = smp_processor_id();
struct tss_struct * t = init_tss + cpu;
set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
load_TR_desc(); /* This does ltr */
load_LDT(&current->active_mm->context); /* This does lldt */
/*
* Now maybe reload the debug registers
*/
if (current->thread.debugreg[7]){
loaddebug(&current->thread, 0);
loaddebug(&current->thread, 1);
loaddebug(&current->thread, 2);
loaddebug(&current->thread, 3);
/* no 4 and 5 */
loaddebug(&current->thread, 6);
loaddebug(&current->thread, 7);
}
}
void __restore_processor_state(struct saved_context *ctxt)
{
/*
* control registers
*/
asm volatile ("movl %0, %%cr4" :: "r" (saved_context.cr4));
asm volatile ("movl %0, %%cr3" :: "r" (saved_context.cr3));
asm volatile ("movl %0, %%cr2" :: "r" (saved_context.cr2));
asm volatile ("movl %0, %%cr0" :: "r" (saved_context.cr0));
asm volatile ("movl %0, %%cr4" :: "r" (ctxt->cr4));
asm volatile ("movl %0, %%cr3" :: "r" (ctxt->cr3));
asm volatile ("movl %0, %%cr2" :: "r" (ctxt->cr2));
asm volatile ("movl %0, %%cr0" :: "r" (ctxt->cr0));
/*
* segment registers
*/
asm volatile ("movw %0, %%es" :: "r" (saved_context.es));
asm volatile ("movw %0, %%fs" :: "r" (saved_context.fs));
asm volatile ("movw %0, %%gs" :: "r" (saved_context.gs));
asm volatile ("movw %0, %%ss" :: "r" (saved_context.ss));
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
asm volatile ("movw %0, %%gs" :: "r" (ctxt->gs));
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
/*
* now restore the descriptor tables to their proper values
* ltr is done i fix_processor_context().
*/
asm volatile ("lgdt %0" :: "m" (saved_context.gdt_limit));
asm volatile ("lidt %0" :: "m" (saved_context.idt_limit));
asm volatile ("lldt %0" :: "m" (saved_context.ldt));
asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
asm volatile ("lldt %0" :: "m" (ctxt->ldt));
/*
* sysenter MSRs
......@@ -112,31 +143,11 @@ void restore_processor_state(void)
do_fpu_end();
}
static void fix_processor_context(void)
void restore_processor_state(void)
{
int cpu = smp_processor_id();
struct tss_struct * t = init_tss + cpu;
set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
load_TR_desc(); /* This does ltr */
load_LDT(&current->active_mm->context); /* This does lldt */
/*
* Now maybe reload the debug registers
*/
if (current->thread.debugreg[7]){
loaddebug(&current->thread, 0);
loaddebug(&current->thread, 1);
loaddebug(&current->thread, 2);
loaddebug(&current->thread, 3);
/* no 4 and 5 */
loaddebug(&current->thread, 6);
loaddebug(&current->thread, 7);
}
__restore_processor_state(&saved_context);
}
EXPORT_SYMBOL(save_processor_state);
EXPORT_SYMBOL(restore_processor_state);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment