Commit 07bf731e authored by Bodo Stroesser's avatar Bodo Stroesser Committed by Linus Torvalds

[PATCH] uml: skas0 stubs now check system call return values

Change syscall-stub's data to include a "expected retval".

Stub now checks syscalls retval and aborts execution of syscall list, if
retval != expected retval.

run_syscall_stub prints the data of the failed syscall, using the data pointer
and retval written by the stub to the beginning of the stack.

one_syscall_stub is removed, to simplify code, because only some instructions
are saved by one_syscall_stub, no host-syscall.

Using the stub with additional data (modify_ldt via stub)
is prepared also.
Signed-off-by: default avatarBodo Stroesser <bstroesser@fujitsu-siemens.com>
Signed-off-by: default avatarJeff Dike <jdike@addtoit.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 8b51304e
......@@ -38,9 +38,9 @@ extern void mprotect_kernel_vm(int w);
extern void force_flush_all(void);
extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force,
void *(*do_ops)(union mm_context *,
int (*do_ops)(union mm_context *,
struct host_vm_op *, int, int,
void *));
void **));
extern int flush_tlb_kernel_range_common(unsigned long start,
unsigned long end);
......
......@@ -24,14 +24,14 @@ extern void new_thread_proc(void *stack, void (*handler)(int sig));
extern void remove_sigstack(void);
extern void new_thread_handler(int sig);
extern void handle_syscall(union uml_pt_regs *regs);
extern void *map(struct mm_id * mm_idp, unsigned long virt,
extern int map(struct mm_id * mm_idp, unsigned long virt,
unsigned long len, int r, int w, int x, int phys_fd,
unsigned long long offset, int done, void *data);
extern void *unmap(struct mm_id * mm_idp, void *addr,
unsigned long len, int done, void *data);
extern void *protect(struct mm_id * mm_idp, unsigned long addr,
unsigned long long offset, int done, void **data);
extern int unmap(struct mm_id * mm_idp, void *addr, unsigned long len,
int done, void **data);
extern int protect(struct mm_id * mm_idp, unsigned long addr,
unsigned long len, int r, int w, int x, int done,
void *data);
void **data);
extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
extern int new_mm(int from, unsigned long stack);
extern int start_userspace(unsigned long stub_stack);
......@@ -39,16 +39,11 @@ extern int copy_context_skas0(unsigned long stack, int pid);
extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
extern long execute_syscall_skas(void *r);
extern unsigned long current_stub_stack(void);
extern long run_syscall_stub(struct mm_id * mm_idp,
int syscall, unsigned long *args, long expected,
void **addr, int done);
extern long syscall_stub_data(struct mm_id * mm_idp,
unsigned long *data, int data_count,
void **addr, void **stub_addr);
#endif
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/
This diff is collapsed.
......@@ -18,27 +18,28 @@
#include "os.h"
#include "tlb.h"
static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
int finished, void *flush)
static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
int finished, void **flush)
{
struct host_vm_op *op;
int i;
int i, ret = 0;
for(i = 0; i <= last; i++){
for(i = 0; i <= last && !ret; i++){
op = &ops[i];
switch(op->type){
case MMAP:
flush = map(&mmu->skas.id, op->u.mmap.addr,
ret = map(&mmu->skas.id, op->u.mmap.addr,
op->u.mmap.len, op->u.mmap.r, op->u.mmap.w,
op->u.mmap.x, op->u.mmap.fd,
op->u.mmap.offset, finished, flush);
break;
case MUNMAP:
flush = unmap(&mmu->skas.id, (void *) op->u.munmap.addr,
ret = unmap(&mmu->skas.id,
(void *) op->u.munmap.addr,
op->u.munmap.len, finished, flush);
break;
case MPROTECT:
flush = protect(&mmu->skas.id, op->u.mprotect.addr,
ret = protect(&mmu->skas.id, op->u.mprotect.addr,
op->u.mprotect.len, op->u.mprotect.r,
op->u.mprotect.w, op->u.mprotect.x,
finished, flush);
......@@ -49,7 +50,7 @@ static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
}
}
return flush;
return ret;
}
extern int proc_mm;
......
......@@ -16,34 +16,34 @@
#include "os.h"
static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
int r, int w, int x, struct host_vm_op *ops, int index,
int r, int w, int x, struct host_vm_op *ops, int *index,
int last_filled, union mm_context *mmu, void **flush,
void *(*do_ops)(union mm_context *, struct host_vm_op *,
int, int, void *))
int (*do_ops)(union mm_context *, struct host_vm_op *,
int, int, void **))
{
__u64 offset;
struct host_vm_op *last;
int fd;
int fd, ret = 0;
fd = phys_mapping(phys, &offset);
if(index != -1){
last = &ops[index];
if(*index != -1){
last = &ops[*index];
if((last->type == MMAP) &&
(last->u.mmap.addr + last->u.mmap.len == virt) &&
(last->u.mmap.r == r) && (last->u.mmap.w == w) &&
(last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
(last->u.mmap.offset + last->u.mmap.len == offset)){
last->u.mmap.len += len;
return index;
return 0;
}
}
if(index == last_filled){
*flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
index = -1;
if(*index == last_filled){
ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
*index = -1;
}
ops[++index] = ((struct host_vm_op) { .type = MMAP,
ops[++*index] = ((struct host_vm_op) { .type = MMAP,
.u = { .mmap = {
.addr = virt,
.len = len,
......@@ -53,78 +53,80 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
.fd = fd,
.offset = offset }
} });
return index;
return ret;
}
static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *ops, int index, int last_filled,
struct host_vm_op *ops, int *index, int last_filled,
union mm_context *mmu, void **flush,
void *(*do_ops)(union mm_context *, struct host_vm_op *,
int, int, void *))
int (*do_ops)(union mm_context *, struct host_vm_op *,
int, int, void **))
{
struct host_vm_op *last;
int ret = 0;
if(index != -1){
last = &ops[index];
if(*index != -1){
last = &ops[*index];
if((last->type == MUNMAP) &&
(last->u.munmap.addr + last->u.mmap.len == addr)){
last->u.munmap.len += len;
return index;
return 0;
}
}
if(index == last_filled){
*flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
index = -1;
if(*index == last_filled){
ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
*index = -1;
}
ops[++index] = ((struct host_vm_op) { .type = MUNMAP,
ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
.u = { .munmap = {
.addr = addr,
.len = len } } });
return index;
return ret;
}
static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
int x, struct host_vm_op *ops, int index,
int x, struct host_vm_op *ops, int *index,
int last_filled, union mm_context *mmu, void **flush,
void *(*do_ops)(union mm_context *,
struct host_vm_op *, int, int, void *))
int (*do_ops)(union mm_context *, struct host_vm_op *,
int, int, void **))
{
struct host_vm_op *last;
int ret = 0;
if(index != -1){
last = &ops[index];
if(*index != -1){
last = &ops[*index];
if((last->type == MPROTECT) &&
(last->u.mprotect.addr + last->u.mprotect.len == addr) &&
(last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
(last->u.mprotect.x == x)){
last->u.mprotect.len += len;
return index;
return 0;
}
}
if(index == last_filled){
*flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
index = -1;
if(*index == last_filled){
ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
*index = -1;
}
ops[++index] = ((struct host_vm_op) { .type = MPROTECT,
ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
.u = { .mprotect = {
.addr = addr,
.len = len,
.r = r,
.w = w,
.x = x } } });
return index;
return ret;
}
#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force,
void *(*do_ops)(union mm_context *, struct host_vm_op *,
int, int, void *))
int (*do_ops)(union mm_context *, struct host_vm_op *,
int, int, void **))
{
pgd_t *npgd;
pud_t *npud;
......@@ -136,19 +138,20 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
struct host_vm_op ops[1];
void *flush = NULL;
int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
int ret = 0;
if(mm == NULL) return;
ops[0].type = NONE;
for(addr = start_addr; addr < end_addr;){
for(addr = start_addr; addr < end_addr && !ret;){
npgd = pgd_offset(mm, addr);
if(!pgd_present(*npgd)){
end = ADD_ROUND(addr, PGDIR_SIZE);
if(end > end_addr)
end = end_addr;
if(force || pgd_newpage(*npgd)){
op_index = add_munmap(addr, end - addr, ops,
op_index, last_op, mmu,
ret = add_munmap(addr, end - addr, ops,
&op_index, last_op, mmu,
&flush, do_ops);
pgd_mkuptodate(*npgd);
}
......@@ -162,8 +165,8 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
if(end > end_addr)
end = end_addr;
if(force || pud_newpage(*npud)){
op_index = add_munmap(addr, end - addr, ops,
op_index, last_op, mmu,
ret = add_munmap(addr, end - addr, ops,
&op_index, last_op, mmu,
&flush, do_ops);
pud_mkuptodate(*npud);
}
......@@ -177,8 +180,8 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
if(end > end_addr)
end = end_addr;
if(force || pmd_newpage(*npmd)){
op_index = add_munmap(addr, end - addr, ops,
op_index, last_op, mmu,
ret = add_munmap(addr, end - addr, ops,
&op_index, last_op, mmu,
&flush, do_ops);
pmd_mkuptodate(*npmd);
}
......@@ -198,24 +201,32 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
}
if(force || pte_newpage(*npte)){
if(pte_present(*npte))
op_index = add_mmap(addr,
ret = add_mmap(addr,
pte_val(*npte) & PAGE_MASK,
PAGE_SIZE, r, w, x, ops,
op_index, last_op, mmu,
&op_index, last_op, mmu,
&flush, do_ops);
else op_index = add_munmap(addr, PAGE_SIZE, ops,
op_index, last_op, mmu,
else ret = add_munmap(addr, PAGE_SIZE, ops,
&op_index, last_op, mmu,
&flush, do_ops);
}
else if(pte_newprot(*npte))
op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
op_index, last_op, mmu,
ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
&op_index, last_op, mmu,
&flush, do_ops);
*npte = pte_mkuptodate(*npte);
addr += PAGE_SIZE;
}
flush = (*do_ops)(mmu, ops, op_index, 1, flush);
if(!ret)
ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
/* This is not an else because ret is modified above */
if(ret) {
printk("fix_range_common: failed, killing current process\n");
force_sig(SIGKILL, current);
}
}
int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
......
......@@ -17,26 +17,31 @@
#include "os.h"
#include "tlb.h"
static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
int finished, void *flush)
static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
int finished, void **flush)
{
struct host_vm_op *op;
int i;
int i, ret=0;
for(i = 0; i <= last; i++){
for(i = 0; i <= last && !ret; i++){
op = &ops[i];
switch(op->type){
case MMAP:
os_map_memory((void *) op->u.mmap.addr, op->u.mmap.fd,
op->u.mmap.offset, op->u.mmap.len,
op->u.mmap.r, op->u.mmap.w,
op->u.mmap.x);
ret = os_map_memory((void *) op->u.mmap.addr,
op->u.mmap.fd, op->u.mmap.offset,
op->u.mmap.len, op->u.mmap.r,
op->u.mmap.w, op->u.mmap.x);
break;
case MUNMAP:
os_unmap_memory((void *) op->u.munmap.addr,
ret = os_unmap_memory((void *) op->u.munmap.addr,
op->u.munmap.len);
break;
case MPROTECT:
ret = protect_memory(op->u.mprotect.addr,
op->u.munmap.len,
op->u.mprotect.r,
op->u.mprotect.w,
op->u.mprotect.x, 1);
protect_memory(op->u.mprotect.addr, op->u.munmap.len,
op->u.mprotect.r, op->u.mprotect.w,
op->u.mprotect.x, 1);
......@@ -47,7 +52,7 @@ static void *do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
}
}
return NULL;
return ret;
}
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
......
......@@ -2,24 +2,50 @@
.globl syscall_stub
.section .__syscall_stub, "x"
syscall_stub:
int $0x80
mov %eax, UML_CONFIG_STUB_DATA
int3
.globl batch_syscall_stub
batch_syscall_stub:
mov $UML_CONFIG_STUB_DATA, %esp
again: pop %eax
/* load pointer to first operation */
mov $(UML_CONFIG_STUB_DATA+8), %esp
again:
/* load length of additional data */
mov 0x0(%esp), %eax
/* if(length == 0) : end of list */
/* write possible 0 to header */
mov %eax, UML_CONFIG_STUB_DATA+4
cmpl $0, %eax
jz done
/* save current pointer */
mov %esp, UML_CONFIG_STUB_DATA+4
/* skip additional data */
add %eax, %esp
/* load syscall-# */
pop %eax
/* load syscall params */
pop %ebx
pop %ecx
pop %edx
pop %esi
pop %edi
pop %ebp
/* execute syscall */
int $0x80
/* check return value */
pop %ebx
cmp %ebx, %eax
je again
done:
/* save return value */
mov %eax, UML_CONFIG_STUB_DATA
jmp again
done: int3
/* stop */
int3
......@@ -16,21 +16,51 @@ syscall_stub:
.globl batch_syscall_stub
batch_syscall_stub:
movq $(UML_CONFIG_STUB_DATA >> 32), %rbx
salq $32, %rbx
movq $(UML_CONFIG_STUB_DATA & 0xffffffff), %rcx
or %rcx, %rbx
movq %rbx, %rsp
again: pop %rax
cmpq $0, %rax
jz done
mov $(UML_CONFIG_STUB_DATA >> 32), %rbx
sal $32, %rbx
mov $(UML_CONFIG_STUB_DATA & 0xffffffff), %rax
or %rax, %rbx
/* load pointer to first operation */
mov %rbx, %rsp
add $0x10, %rsp
again:
/* load length of additional data */
mov 0x0(%rsp), %rax
/* if(length == 0) : end of list */
/* write possible 0 to header */
mov %rax, 8(%rbx)
cmp $0, %rax
jz done
/* save current pointer */
mov %rsp, 8(%rbx)
/* skip additional data */
add %rax, %rsp
/* load syscall-# */
pop %rax
/* load syscall params */
pop %rdi
pop %rsi
pop %rdx
pop %r10
pop %r8
pop %r9
/* execute syscall */
syscall
/* check return value */
pop %rcx
cmp %rcx, %rax
je again
done:
/* save return value */
mov %rax, (%rbx)
jmp again
done: int3
/* stop */
int3
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment