Commit 07bf731e authored by Bodo Stroesser's avatar Bodo Stroesser Committed by Linus Torvalds

[PATCH] uml: skas0 stubs now check system call return values

Change syscall-stub's data to include a "expected retval".

Stub now checks syscalls retval and aborts execution of syscall list, if
retval != expected retval.

run_syscall_stub prints the data of the failed syscall, using the data pointer
and retval written by the stub to the beginning of the stack.

one_syscall_stub is removed, to simplify code, because only some instructions
are saved by one_syscall_stub, no host-syscall.

Using the stub with additional data (modify_ldt via stub)
is prepared also.
Signed-off-by: default avatarBodo Stroesser <bstroesser@fujitsu-siemens.com>
Signed-off-by: default avatarJeff Dike <jdike@addtoit.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 8b51304e
...@@ -38,9 +38,9 @@ extern void mprotect_kernel_vm(int w); ...@@ -38,9 +38,9 @@ extern void mprotect_kernel_vm(int w);
extern void force_flush_all(void); extern void force_flush_all(void);
extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr, extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force, unsigned long end_addr, int force,
void *(*do_ops)(union mm_context *, int (*do_ops)(union mm_context *,
struct host_vm_op *, int, int, struct host_vm_op *, int, int,
void *)); void **));
extern int flush_tlb_kernel_range_common(unsigned long start, extern int flush_tlb_kernel_range_common(unsigned long start,
unsigned long end); unsigned long end);
......
...@@ -24,14 +24,14 @@ extern void new_thread_proc(void *stack, void (*handler)(int sig)); ...@@ -24,14 +24,14 @@ extern void new_thread_proc(void *stack, void (*handler)(int sig));
extern void remove_sigstack(void); extern void remove_sigstack(void);
extern void new_thread_handler(int sig); extern void new_thread_handler(int sig);
extern void handle_syscall(union uml_pt_regs *regs); extern void handle_syscall(union uml_pt_regs *regs);
extern void *map(struct mm_id * mm_idp, unsigned long virt, extern int map(struct mm_id * mm_idp, unsigned long virt,
unsigned long len, int r, int w, int x, int phys_fd, unsigned long len, int r, int w, int x, int phys_fd,
unsigned long long offset, int done, void *data); unsigned long long offset, int done, void **data);
extern void *unmap(struct mm_id * mm_idp, void *addr, extern int unmap(struct mm_id * mm_idp, void *addr, unsigned long len,
unsigned long len, int done, void *data); int done, void **data);
extern void *protect(struct mm_id * mm_idp, unsigned long addr, extern int protect(struct mm_id * mm_idp, unsigned long addr,
unsigned long len, int r, int w, int x, int done, unsigned long len, int r, int w, int x, int done,
void *data); void **data);
extern void user_signal(int sig, union uml_pt_regs *regs, int pid); extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
extern int new_mm(int from, unsigned long stack); extern int new_mm(int from, unsigned long stack);
extern int start_userspace(unsigned long stub_stack); extern int start_userspace(unsigned long stub_stack);
...@@ -39,16 +39,11 @@ extern int copy_context_skas0(unsigned long stack, int pid); ...@@ -39,16 +39,11 @@ extern int copy_context_skas0(unsigned long stack, int pid);
extern void get_skas_faultinfo(int pid, struct faultinfo * fi); extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
extern long execute_syscall_skas(void *r); extern long execute_syscall_skas(void *r);
extern unsigned long current_stub_stack(void); extern unsigned long current_stub_stack(void);
extern long run_syscall_stub(struct mm_id * mm_idp,
int syscall, unsigned long *args, long expected,
void **addr, int done);
extern long syscall_stub_data(struct mm_id * mm_idp,
unsigned long *data, int data_count,
void **addr, void **stub_addr);
#endif #endif
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/
This diff is collapsed.
...@@ -18,30 +18,31 @@ ...@@ -18,30 +18,31 @@
#include "os.h" #include "os.h"
#include "tlb.h" #include "tlb.h"
static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
int finished, void *flush) int finished, void **flush)
{ {
struct host_vm_op *op; struct host_vm_op *op;
int i; int i, ret = 0;
for(i = 0; i <= last; i++){ for(i = 0; i <= last && !ret; i++){
op = &ops[i]; op = &ops[i];
switch(op->type){ switch(op->type){
case MMAP: case MMAP:
flush = map(&mmu->skas.id, op->u.mmap.addr, ret = map(&mmu->skas.id, op->u.mmap.addr,
op->u.mmap.len, op->u.mmap.r, op->u.mmap.w, op->u.mmap.len, op->u.mmap.r, op->u.mmap.w,
op->u.mmap.x, op->u.mmap.fd, op->u.mmap.x, op->u.mmap.fd,
op->u.mmap.offset, finished, flush); op->u.mmap.offset, finished, flush);
break; break;
case MUNMAP: case MUNMAP:
flush = unmap(&mmu->skas.id, (void *) op->u.munmap.addr, ret = unmap(&mmu->skas.id,
op->u.munmap.len, finished, flush); (void *) op->u.munmap.addr,
op->u.munmap.len, finished, flush);
break; break;
case MPROTECT: case MPROTECT:
flush = protect(&mmu->skas.id, op->u.mprotect.addr, ret = protect(&mmu->skas.id, op->u.mprotect.addr,
op->u.mprotect.len, op->u.mprotect.r, op->u.mprotect.len, op->u.mprotect.r,
op->u.mprotect.w, op->u.mprotect.x, op->u.mprotect.w, op->u.mprotect.x,
finished, flush); finished, flush);
break; break;
default: default:
printk("Unknown op type %d in do_ops\n", op->type); printk("Unknown op type %d in do_ops\n", op->type);
...@@ -49,7 +50,7 @@ static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, ...@@ -49,7 +50,7 @@ static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
} }
} }
return flush; return ret;
} }
extern int proc_mm; extern int proc_mm;
......
...@@ -16,115 +16,117 @@ ...@@ -16,115 +16,117 @@
#include "os.h" #include "os.h"
static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
int r, int w, int x, struct host_vm_op *ops, int index, int r, int w, int x, struct host_vm_op *ops, int *index,
int last_filled, union mm_context *mmu, void **flush, int last_filled, union mm_context *mmu, void **flush,
void *(*do_ops)(union mm_context *, struct host_vm_op *, int (*do_ops)(union mm_context *, struct host_vm_op *,
int, int, void *)) int, int, void **))
{ {
__u64 offset; __u64 offset;
struct host_vm_op *last; struct host_vm_op *last;
int fd; int fd, ret = 0;
fd = phys_mapping(phys, &offset); fd = phys_mapping(phys, &offset);
if(index != -1){ if(*index != -1){
last = &ops[index]; last = &ops[*index];
if((last->type == MMAP) && if((last->type == MMAP) &&
(last->u.mmap.addr + last->u.mmap.len == virt) && (last->u.mmap.addr + last->u.mmap.len == virt) &&
(last->u.mmap.r == r) && (last->u.mmap.w == w) && (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
(last->u.mmap.x == x) && (last->u.mmap.fd == fd) && (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
(last->u.mmap.offset + last->u.mmap.len == offset)){ (last->u.mmap.offset + last->u.mmap.len == offset)){
last->u.mmap.len += len; last->u.mmap.len += len;
return index; return 0;
} }
} }
if(index == last_filled){ if(*index == last_filled){
*flush = (*do_ops)(mmu, ops, last_filled, 0, *flush); ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
index = -1; *index = -1;
} }
ops[++index] = ((struct host_vm_op) { .type = MMAP, ops[++*index] = ((struct host_vm_op) { .type = MMAP,
.u = { .mmap = { .u = { .mmap = {
.addr = virt, .addr = virt,
.len = len, .len = len,
.r = r, .r = r,
.w = w, .w = w,
.x = x, .x = x,
.fd = fd, .fd = fd,
.offset = offset } .offset = offset }
} }); } });
return index; return ret;
} }
static int add_munmap(unsigned long addr, unsigned long len, static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *ops, int index, int last_filled, struct host_vm_op *ops, int *index, int last_filled,
union mm_context *mmu, void **flush, union mm_context *mmu, void **flush,
void *(*do_ops)(union mm_context *, struct host_vm_op *, int (*do_ops)(union mm_context *, struct host_vm_op *,
int, int, void *)) int, int, void **))
{ {
struct host_vm_op *last; struct host_vm_op *last;
int ret = 0;
if(index != -1){ if(*index != -1){
last = &ops[index]; last = &ops[*index];
if((last->type == MUNMAP) && if((last->type == MUNMAP) &&
(last->u.munmap.addr + last->u.mmap.len == addr)){ (last->u.munmap.addr + last->u.mmap.len == addr)){
last->u.munmap.len += len; last->u.munmap.len += len;
return index; return 0;
} }
} }
if(index == last_filled){ if(*index == last_filled){
*flush = (*do_ops)(mmu, ops, last_filled, 0, *flush); ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
index = -1; *index = -1;
} }
ops[++index] = ((struct host_vm_op) { .type = MUNMAP, ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
.u = { .munmap = { .u = { .munmap = {
.addr = addr, .addr = addr,
.len = len } } }); .len = len } } });
return index; return ret;
} }
static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
int x, struct host_vm_op *ops, int index, int x, struct host_vm_op *ops, int *index,
int last_filled, union mm_context *mmu, void **flush, int last_filled, union mm_context *mmu, void **flush,
void *(*do_ops)(union mm_context *, int (*do_ops)(union mm_context *, struct host_vm_op *,
struct host_vm_op *, int, int, void *)) int, int, void **))
{ {
struct host_vm_op *last; struct host_vm_op *last;
int ret = 0;
if(index != -1){ if(*index != -1){
last = &ops[index]; last = &ops[*index];
if((last->type == MPROTECT) && if((last->type == MPROTECT) &&
(last->u.mprotect.addr + last->u.mprotect.len == addr) && (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
(last->u.mprotect.r == r) && (last->u.mprotect.w == w) && (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
(last->u.mprotect.x == x)){ (last->u.mprotect.x == x)){
last->u.mprotect.len += len; last->u.mprotect.len += len;
return index; return 0;
} }
} }
if(index == last_filled){ if(*index == last_filled){
*flush = (*do_ops)(mmu, ops, last_filled, 0, *flush); ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
index = -1; *index = -1;
} }
ops[++index] = ((struct host_vm_op) { .type = MPROTECT, ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
.u = { .mprotect = { .u = { .mprotect = {
.addr = addr, .addr = addr,
.len = len, .len = len,
.r = r, .r = r,
.w = w, .w = w,
.x = x } } }); .x = x } } });
return index; return ret;
} }
#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
void fix_range_common(struct mm_struct *mm, unsigned long start_addr, void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force, unsigned long end_addr, int force,
void *(*do_ops)(union mm_context *, struct host_vm_op *, int (*do_ops)(union mm_context *, struct host_vm_op *,
int, int, void *)) int, int, void **))
{ {
pgd_t *npgd; pgd_t *npgd;
pud_t *npud; pud_t *npud;
...@@ -136,20 +138,21 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, ...@@ -136,20 +138,21 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
struct host_vm_op ops[1]; struct host_vm_op ops[1];
void *flush = NULL; void *flush = NULL;
int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1; int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
int ret = 0;
if(mm == NULL) return; if(mm == NULL) return;
ops[0].type = NONE; ops[0].type = NONE;
for(addr = start_addr; addr < end_addr;){ for(addr = start_addr; addr < end_addr && !ret;){
npgd = pgd_offset(mm, addr); npgd = pgd_offset(mm, addr);
if(!pgd_present(*npgd)){ if(!pgd_present(*npgd)){
end = ADD_ROUND(addr, PGDIR_SIZE); end = ADD_ROUND(addr, PGDIR_SIZE);
if(end > end_addr) if(end > end_addr)
end = end_addr; end = end_addr;
if(force || pgd_newpage(*npgd)){ if(force || pgd_newpage(*npgd)){
op_index = add_munmap(addr, end - addr, ops, ret = add_munmap(addr, end - addr, ops,
op_index, last_op, mmu, &op_index, last_op, mmu,
&flush, do_ops); &flush, do_ops);
pgd_mkuptodate(*npgd); pgd_mkuptodate(*npgd);
} }
addr = end; addr = end;
...@@ -162,9 +165,9 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, ...@@ -162,9 +165,9 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
if(end > end_addr) if(end > end_addr)
end = end_addr; end = end_addr;
if(force || pud_newpage(*npud)){ if(force || pud_newpage(*npud)){
op_index = add_munmap(addr, end - addr, ops, ret = add_munmap(addr, end - addr, ops,
op_index, last_op, mmu, &op_index, last_op, mmu,
&flush, do_ops); &flush, do_ops);
pud_mkuptodate(*npud); pud_mkuptodate(*npud);
} }
addr = end; addr = end;
...@@ -177,9 +180,9 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, ...@@ -177,9 +180,9 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
if(end > end_addr) if(end > end_addr)
end = end_addr; end = end_addr;
if(force || pmd_newpage(*npmd)){ if(force || pmd_newpage(*npmd)){
op_index = add_munmap(addr, end - addr, ops, ret = add_munmap(addr, end - addr, ops,
op_index, last_op, mmu, &op_index, last_op, mmu,
&flush, do_ops); &flush, do_ops);
pmd_mkuptodate(*npmd); pmd_mkuptodate(*npmd);
} }
addr = end; addr = end;
...@@ -198,24 +201,32 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, ...@@ -198,24 +201,32 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
} }
if(force || pte_newpage(*npte)){ if(force || pte_newpage(*npte)){
if(pte_present(*npte)) if(pte_present(*npte))
op_index = add_mmap(addr, ret = add_mmap(addr,
pte_val(*npte) & PAGE_MASK, pte_val(*npte) & PAGE_MASK,
PAGE_SIZE, r, w, x, ops, PAGE_SIZE, r, w, x, ops,
op_index, last_op, mmu, &op_index, last_op, mmu,
&flush, do_ops); &flush, do_ops);
else op_index = add_munmap(addr, PAGE_SIZE, ops, else ret = add_munmap(addr, PAGE_SIZE, ops,
op_index, last_op, mmu, &op_index, last_op, mmu,
&flush, do_ops); &flush, do_ops);
} }
else if(pte_newprot(*npte)) else if(pte_newprot(*npte))
op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops, ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
op_index, last_op, mmu, &op_index, last_op, mmu,
&flush, do_ops); &flush, do_ops);
*npte = pte_mkuptodate(*npte); *npte = pte_mkuptodate(*npte);
addr += PAGE_SIZE; addr += PAGE_SIZE;
} }
flush = (*do_ops)(mmu, ops, op_index, 1, flush);
if(!ret)
ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
/* This is not an else because ret is modified above */
if(ret) {
printk("fix_range_common: failed, killing current process\n");
force_sig(SIGKILL, current);
}
} }
int flush_tlb_kernel_range_common(unsigned long start, unsigned long end) int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
......
...@@ -17,26 +17,31 @@ ...@@ -17,26 +17,31 @@
#include "os.h" #include "os.h"
#include "tlb.h" #include "tlb.h"
static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
int finished, void *flush) int finished, void **flush)
{ {
struct host_vm_op *op; struct host_vm_op *op;
int i; int i, ret=0;
for(i = 0; i <= last; i++){ for(i = 0; i <= last && !ret; i++){
op = &ops[i]; op = &ops[i];
switch(op->type){ switch(op->type){
case MMAP: case MMAP:
os_map_memory((void *) op->u.mmap.addr, op->u.mmap.fd, ret = os_map_memory((void *) op->u.mmap.addr,
op->u.mmap.offset, op->u.mmap.len, op->u.mmap.fd, op->u.mmap.offset,
op->u.mmap.r, op->u.mmap.w, op->u.mmap.len, op->u.mmap.r,
op->u.mmap.x); op->u.mmap.w, op->u.mmap.x);
break; break;
case MUNMAP: case MUNMAP:
os_unmap_memory((void *) op->u.munmap.addr, ret = os_unmap_memory((void *) op->u.munmap.addr,
op->u.munmap.len); op->u.munmap.len);
break; break;
case MPROTECT: case MPROTECT:
ret = protect_memory(op->u.mprotect.addr,
op->u.munmap.len,
op->u.mprotect.r,
op->u.mprotect.w,
op->u.mprotect.x, 1);
protect_memory(op->u.mprotect.addr, op->u.munmap.len, protect_memory(op->u.mprotect.addr, op->u.munmap.len,
op->u.mprotect.r, op->u.mprotect.w, op->u.mprotect.r, op->u.mprotect.w,
op->u.mprotect.x, 1); op->u.mprotect.x, 1);
...@@ -47,7 +52,7 @@ static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, ...@@ -47,7 +52,7 @@ static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
} }
} }
return NULL; return ret;
} }
static void fix_range(struct mm_struct *mm, unsigned long start_addr, static void fix_range(struct mm_struct *mm, unsigned long start_addr,
......
...@@ -2,24 +2,50 @@ ...@@ -2,24 +2,50 @@
.globl syscall_stub .globl syscall_stub
.section .__syscall_stub, "x" .section .__syscall_stub, "x"
syscall_stub:
int $0x80
mov %eax, UML_CONFIG_STUB_DATA
int3
.globl batch_syscall_stub .globl batch_syscall_stub
batch_syscall_stub: batch_syscall_stub:
mov $UML_CONFIG_STUB_DATA, %esp /* load pointer to first operation */
again: pop %eax mov $(UML_CONFIG_STUB_DATA+8), %esp
again:
/* load length of additional data */
mov 0x0(%esp), %eax
/* if(length == 0) : end of list */
/* write possible 0 to header */
mov %eax, UML_CONFIG_STUB_DATA+4
cmpl $0, %eax cmpl $0, %eax
jz done jz done
/* save current pointer */
mov %esp, UML_CONFIG_STUB_DATA+4
/* skip additional data */
add %eax, %esp
/* load syscall-# */
pop %eax
/* load syscall params */
pop %ebx pop %ebx
pop %ecx pop %ecx
pop %edx pop %edx
pop %esi pop %esi
pop %edi pop %edi
pop %ebp pop %ebp
/* execute syscall */
int $0x80 int $0x80
/* check return value */
pop %ebx
cmp %ebx, %eax
je again
done:
/* save return value */
mov %eax, UML_CONFIG_STUB_DATA mov %eax, UML_CONFIG_STUB_DATA
jmp again
done: int3 /* stop */
int3
...@@ -16,21 +16,51 @@ syscall_stub: ...@@ -16,21 +16,51 @@ syscall_stub:
.globl batch_syscall_stub .globl batch_syscall_stub
batch_syscall_stub: batch_syscall_stub:
movq $(UML_CONFIG_STUB_DATA >> 32), %rbx mov $(UML_CONFIG_STUB_DATA >> 32), %rbx
salq $32, %rbx sal $32, %rbx
movq $(UML_CONFIG_STUB_DATA & 0xffffffff), %rcx mov $(UML_CONFIG_STUB_DATA & 0xffffffff), %rax
or %rcx, %rbx or %rax, %rbx
movq %rbx, %rsp /* load pointer to first operation */
again: pop %rax mov %rbx, %rsp
cmpq $0, %rax add $0x10, %rsp
jz done again:
/* load length of additional data */
mov 0x0(%rsp), %rax
/* if(length == 0) : end of list */
/* write possible 0 to header */
mov %rax, 8(%rbx)
cmp $0, %rax
jz done
/* save current pointer */
mov %rsp, 8(%rbx)
/* skip additional data */
add %rax, %rsp
/* load syscall-# */
pop %rax
/* load syscall params */
pop %rdi pop %rdi
pop %rsi pop %rsi
pop %rdx pop %rdx
pop %r10 pop %r10
pop %r8 pop %r8
pop %r9 pop %r9
/* execute syscall */
syscall syscall
/* check return value */
pop %rcx
cmp %rcx, %rax
je again
done:
/* save return value */
mov %rax, (%rbx) mov %rax, (%rbx)
jmp again
done: int3 /* stop */
int3
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment