Commit 46356545 authored by Linus Torvalds's avatar Linus Torvalds

Merge UML update

parents 0789ed0a 1171e981
......@@ -28,8 +28,13 @@ tristate 'Host filesystem' CONFIG_HOSTFS
bool 'Management console' CONFIG_MCONSOLE
dep_bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ $CONFIG_MCONSOLE
bool '2G/2G host address space split' CONFIG_HOST_2G_2G
bool 'Symmetric multi-processing support' CONFIG_UML_SMP
define_bool CONFIG_SMP $CONFIG_UML_SMP
if [ "$CONFIG_SMP" = "y" ]; then
int 'Maximum number of CPUs (2-32)' CONFIG_NR_CPUS 32
fi
int 'Nesting level' CONFIG_NEST_LEVEL 0
int 'Kernel address space size (in .5G units)' CONFIG_KERNEL_HALF_GIGS 1
bool 'Highmem support' CONFIG_HIGHMEM
......
......@@ -155,6 +155,8 @@ static void tracer_winch_handler(int sig)
errno);
}
/* Called only by the tracing thread during initialization */
void setup_tracer_winch(void)
{
int err;
......
......@@ -51,8 +51,8 @@
MODULE_LICENSE("GPL");
/* Locked by the BKL in harddog_open and harddog_release */
static int timer_alive;
static int harddog_in_fd = -1;
static int harddog_out_fd = -1;
......@@ -67,6 +67,7 @@ static int harddog_open(struct inode *inode, struct file *file)
int err;
char *sock = NULL;
lock_kernel();
if(timer_alive)
return -EBUSY;
#ifdef CONFIG_HARDDOG_NOWAYOUT
......@@ -80,6 +81,7 @@ static int harddog_open(struct inode *inode, struct file *file)
if(err) return(err);
timer_alive = 1;
unlock_kernel();
return 0;
}
......
......@@ -15,6 +15,7 @@
#include "init.h"
#include "hostaudio.h"
/* Only changed from linux_main at boot time */
char *dsp = HOSTAUDIO_DEV_DSP;
char *mixer = HOSTAUDIO_DEV_MIXER;
......
......@@ -99,19 +99,27 @@ int line_write(struct line *lines, struct tty_struct *tty, const char *buf,
i = minor(tty->device) - tty->driver.minor_start;
line = &lines[i];
down(&line->sem);
if(line->head != line->tail){
local_irq_save(flags);
buffer_data(line, buf, len);
err = flush_buffer(line);
local_irq_restore(flags);
if(err <= 0) return(len);
if(err <= 0)
goto out;
}
else {
n = write_chan(&line->chan_list, buf, len,
line->driver->write_irq);
if(n < 0) return(n);
if(n < len) buffer_data(line, buf + n, len - n);
if(n < 0){
len = n;
goto out;
}
if(n < len)
buffer_data(line, buf + n, len - n);
}
out:
up(&line->sem);
return(len);
}
......@@ -249,6 +257,7 @@ void line_close(struct line *lines, struct tty_struct *tty)
else n = minor(tty->device) - tty->driver.minor_start;
line = &lines[n];
down(&line->sem);
line->count--;
/* I don't like this, but I can't think of anything better. What's
......@@ -261,6 +270,7 @@ void line_close(struct line *lines, struct tty_struct *tty)
line->tty = NULL;
if(line->count == 0)
line_disable(line, -1);
up(&line->sem);
}
void close_lines(struct line *lines, int nlines)
......@@ -406,16 +416,18 @@ void winch_interrupt(int irq, void *data, struct pt_regs *unused)
reactivate_fd(winch->fd, WINCH_IRQ);
}
DECLARE_MUTEX(winch_handler_sem);
LIST_HEAD(winch_handlers);
void register_winch_irq(int fd, int tty_fd, int pid, void *line)
{
struct winch *winch;
down(&winch_handler_sem);
winch = kmalloc(sizeof(*winch), GFP_KERNEL);
if(winch == NULL){
printk("register_winch_irq - kmalloc failed\n");
return;
goto out;
}
*winch = ((struct winch) { list : LIST_HEAD_INIT(winch->list),
fd : fd,
......@@ -427,6 +439,8 @@ void register_winch_irq(int fd, int tty_fd, int pid, void *line)
SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
"winch", winch) < 0)
printk("register_winch_irq - failed to register IRQ\n");
out:
up(&winch_handler_sem);
}
static void winch_cleanup(void)
......
......@@ -40,6 +40,11 @@ static struct notifier_block reboot_notifier = {
priority: 0,
};
/* Safe without explicit locking for now. Tasklets provide their own
* locking, and the interrupt handler is safe because it can't interrupt
* itself and it can only happen on CPU 0.
*/
LIST_HEAD(mc_requests);
void mc_work_proc(void *unused)
......@@ -49,12 +54,12 @@ void mc_work_proc(void *unused)
int done;
do {
save_flags(flags);
local_save_flags(flags);
req = list_entry(mc_requests.next, struct mconsole_entry,
list);
list_del(&req->list);
done = list_empty(&mc_requests);
restore_flags(flags);
local_irq_restore(flags);
req->request.cmd->handler(&req->request);
kfree(req);
} while(!done);
......@@ -152,6 +157,8 @@ void mconsole_stop(struct mc_request *req)
mconsole_reply(req, "", 0, 0);
}
/* This list is populated by __initcall routines. */
LIST_HEAD(mconsole_devices);
void mconsole_register_dev(struct mc_device *new)
......@@ -224,6 +231,9 @@ void mconsole_sysrq(struct mc_request *req)
}
#endif
/* Changed by mconsole_setup, which is __setup, and called before SMP is
* active.
*/
static char *notify_socket = NULL;
int mconsole_init(void)
......@@ -301,6 +311,18 @@ static int create_proc_mconsole(void)
return(0);
}
static spinlock_t notify_spinlock = SPIN_LOCK_UNLOCKED;
void lock_notify(void)
{
spin_lock(&notify_spinlock);
}
void unlock_notify(void)
{
spin_unlock(&notify_spinlock);
}
__initcall(create_proc_mconsole);
#define NOTIFY "=notify:"
......
......@@ -30,6 +30,7 @@ static struct mconsole_command commands[] = {
{ "go", mconsole_go, 1 },
};
/* Initialized in mconsole_init, which is an initcall */
char mconsole_socket_name[256];
int mconsole_reply_v0(struct mc_request *req, char *reply)
......@@ -162,16 +163,21 @@ int mconsole_notify(char *sock_name, int type, const void *data, int len)
{
struct sockaddr_un target;
struct mconsole_notify packet;
int n, err;
int n, err = 0;
lock_notify();
if(notify_sock < 0){
notify_sock = socket(PF_UNIX, SOCK_DGRAM, 0);
if(notify_sock < 0){
printk("mconsole_notify - socket failed, errno = %d\n",
errno);
return(-errno);
err = -errno;
}
}
unlock_notify();
if(err)
return(err);
target.sun_family = AF_UNIX;
strcpy(target.sun_path, sock_name);
......
......@@ -15,13 +15,14 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
#include <asm/smplock.h>
#include <asm/pgtable.h>
#include "mem_user.h"
#include "user_util.h"
/* These are set in mmapper_init, which is called at boot time */
static unsigned long mmapper_size;
static unsigned long p_buf = 0;
static char *v_buf = NULL;
......
......@@ -27,6 +27,7 @@
#include "init.h"
#include "irq_user.h"
static spinlock_t opened_lock = SPIN_LOCK_UNLOCKED;
LIST_HEAD(opened);
static int uml_net_rx(struct net_device *dev)
......@@ -118,7 +119,9 @@ static int uml_net_open(struct net_device *dev)
lp->tl.data = (unsigned long) &lp->user;
netif_start_queue(dev);
spin_lock(&opened_lock);
list_add(&lp->list, &opened);
spin_unlock(&opened_lock);
MOD_INC_USE_COUNT;
out:
spin_unlock(&lp->lock);
......@@ -135,7 +138,9 @@ static int uml_net_close(struct net_device *dev)
free_irq(dev->irq, dev);
if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
lp->fd = -1;
spin_lock(&opened_lock);
list_del(&lp->list);
spin_unlock(&opened_lock);
MOD_DEC_USE_COUNT;
spin_unlock(&lp->lock);
......@@ -245,6 +250,7 @@ void uml_net_user_timer_expire(unsigned long _conn)
#endif
}
static spinlock_t devices_lock = SPIN_LOCK_UNLOCKED;
static struct list_head devices = LIST_HEAD_INIT(devices);
static int eth_configure(int n, void *init, char *mac,
......@@ -261,7 +267,10 @@ static int eth_configure(int n, void *init, char *mac,
return(1);
}
spin_lock(&devices_lock);
list_add(&device->list, &devices);
spin_unlock(&devices_lock);
device->index = n;
size = transport->private_size + sizeof(struct uml_net_private) +
......@@ -373,12 +382,16 @@ static struct uml_net *find_device(int n)
struct uml_net *device;
struct list_head *ele;
spin_lock(&devices_lock);
list_for_each(ele, &devices){
device = list_entry(ele, struct uml_net, list);
if(device->index == n)
return(device);
goto out;
}
return(NULL);
device = NULL;
out:
spin_unlock(&devices_lock);
return(device);
}
static int eth_parse(char *str, int *index_out, char **str_out)
......@@ -418,8 +431,12 @@ struct eth_init {
int index;
};
/* Filled in at boot time. Will need locking if the transports become
* modular.
*/
struct list_head transports = LIST_HEAD_INIT(transports);
/* Filled in during early boot */
struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
static int check_transport(struct transport *transport, char *eth, int n,
......
......@@ -62,8 +62,6 @@ static void pipe_interrupt(int irq, void *data, struct pt_regs *regs)
up(&conn->port->sem);
}
struct list_head ports = LIST_HEAD_INIT(ports);
static void port_interrupt(int irq, void *data, struct pt_regs *regs)
{
struct port_list *port = data;
......@@ -107,6 +105,9 @@ static void port_interrupt(int irq, void *data, struct pt_regs *regs)
reactivate_fd(port->fd, ACCEPT_IRQ);
}
DECLARE_MUTEX(ports_sem);
struct list_head ports = LIST_HEAD_INIT(ports);
void *port_data(int port_num)
{
struct list_head *ele;
......@@ -114,6 +115,7 @@ void *port_data(int port_num)
struct port_dev *dev;
int fd;
down(&ports_sem);
list_for_each(ele, &ports){
port = list_entry(ele, struct port_list, list);
if(port->port == port_num) goto found;
......@@ -121,7 +123,7 @@ void *port_data(int port_num)
port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
if(port == NULL){
printk(KERN_ERR "Allocation of port list failed\n");
return(NULL);
goto out;
}
fd = port_listen_fd(port_num);
......@@ -151,18 +153,21 @@ void *port_data(int port_num)
dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
if(dev == NULL){
printk(KERN_ERR "Allocation of port device entry failed\n");
return(NULL);
goto out;
}
*dev = ((struct port_dev) { port : port,
fd : -1,
helper_pid : -1 });
up(&ports_sem);
return(dev);
out_free:
kfree(port);
out_close:
os_close_file(fd);
out:
up(&ports_sem);
return(NULL);
}
......
......@@ -61,6 +61,9 @@ static struct line_driver driver = {
symlink_to : "tts",
};
/* The array is initialized by line_init, which is an initcall. The
* individual elements are protected by individual semaphores.
*/
static struct line serial_lines[NR_PORTS] =
{ [0 ... NR_PORTS - 1] = LINE_INIT(CONFIG_SSL_CHAN, &driver) };
......
......@@ -32,8 +32,14 @@
#define MAX_TTYS (8)
/* Referenced only by tty_driver below - presumably it's locked correctly
* by the tty driver.
*/
static struct tty_driver console_driver;
static int console_refcount = 0;
static struct chan_ops init_console_ops = {
init : NULL,
open : NULL,
......@@ -88,6 +94,9 @@ static struct line_driver driver = {
static struct lines console_lines = LINES_INIT(MAX_TTYS);
/* The array is initialized by line_init, which is an initcall. The
* individual elements are protected by individual semaphores.
*/
struct line vts[MAX_TTYS] = { LINE_INIT(CONFIG_CON_ZERO_CHAN, &driver),
[ 1 ... MAX_TTYS - 1 ] =
LINE_INIT(CONFIG_CON_CHAN, &driver) };
......
......@@ -25,6 +25,7 @@
#include "linux/vmalloc.h"
#include "linux/blkpg.h"
#include "linux/genhd.h"
#include "linux/spinlock.h"
#include "asm/segment.h"
#include "asm/uaccess.h"
#include "asm/irq.h"
......@@ -41,7 +42,9 @@
#include "2_5compat.h"
#include "os.h"
static spinlock_t ubd_lock;
static spinlock_t ubd_io_lock = SPIN_LOCK_UNLOCKED;
static spinlock_t ubd_lock = SPIN_LOCK_UNLOCKED;
static void (*do_ubd)(void);
static int ubd_open(struct inode * inode, struct file * filp);
......@@ -62,9 +65,12 @@ static struct block_device_operations ubd_blops = {
.revalidate = ubd_revalidate,
};
/* Protected by the queue_lock */
static request_queue_t *ubd_queue;
/* Protected by ubd_lock */
static int fake_major = 0;
static struct gendisk *ubd_gendisk[MAX_DEV];
static struct gendisk *fake_gendisk[MAX_DEV];
......@@ -74,6 +80,9 @@ static struct gendisk *fake_gendisk[MAX_DEV];
#define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0 })
#endif
/* Not protected - changed only in ubd_setup_common and then only to
* to enable O_SYNC.
*/
static struct openflags global_openflags = OPEN_FLAGS;
struct cow {
......@@ -164,7 +173,9 @@ static void make_ide_entries(char *dev_name)
char name[64];
if(!fake_ide) return;
if(proc_ide_root == NULL) make_proc_ide();
dir = proc_mkdir(dev_name, proc_ide);
ent = create_proc_entry("media", S_IFREG|S_IRUGO, dir);
if(!ent) return;
......@@ -193,7 +204,7 @@ static int ubd_setup_common(char *str, int *index_out)
{
struct openflags flags = global_openflags;
char *backing_file;
int n;
int n, err;
if(index_out) *index_out = -1;
n = *str++;
......@@ -218,12 +229,22 @@ static int ubd_setup_common(char *str, int *index_out)
return(1);
}
err = 1;
spin_lock(&ubd_lock);
if(!fake_major_allowed){
printk(KERN_ERR "Can't assign a fake major twice\n");
goto out1;
}
fake_major = major;
fake_major_allowed = 0;
printk(KERN_INFO "Setting extra ubd major number to %d\n",
major);
return(0);
err = 0;
out1:
spin_unlock(&ubd_lock);
return(err);
}
if(n < '0'){
......@@ -241,9 +262,12 @@ static int ubd_setup_common(char *str, int *index_out)
return(1);
}
err = 1;
spin_lock(&ubd_lock);
if(ubd_dev[n].file != NULL){
printk(KERN_ERR "ubd_setup : device already configured\n");
return(1);
goto out2;
}
if(index_out) *index_out = n;
......@@ -258,8 +282,10 @@ static int ubd_setup_common(char *str, int *index_out)
}
if(*str++ != '='){
printk(KERN_ERR "ubd_setup : Expected '='\n");
return(1);
goto out2;
}
err = 0;
backing_file = strchr(str, ',');
if(backing_file){
*backing_file = '\0';
......@@ -270,7 +296,9 @@ static int ubd_setup_common(char *str, int *index_out)
ubd_dev[n].is_dir = 1;
ubd_dev[n].cow.file = backing_file;
ubd_dev[n].boot_openflags = flags;
return(0);
out2:
spin_unlock(&ubd_lock);
return(err);
}
static int ubd_setup(char *str)
......@@ -311,8 +339,12 @@ __uml_help(fakehd,
static void do_ubd_request(request_queue_t * q);
/* Only changed by ubd_init, which is an initcall. */
int thread_fd = -1;
/* Changed by ubd_handler, which is serialized because interrupts only
* happen on CPU 0.
*/
int intr_count = 0;
static void ubd_finish(int error)
......@@ -320,7 +352,9 @@ static void ubd_finish(int error)
int nsect;
if(error){
spin_lock(&ubd_io_lock);
end_request(CURRENT, 0);
spin_unlock(&ubd_io_lock);
return;
}
nsect = CURRENT->current_nr_sectors;
......@@ -329,7 +363,9 @@ static void ubd_finish(int error)
CURRENT->errors = 0;
CURRENT->nr_sectors -= nsect;
CURRENT->current_nr_sectors = 0;
spin_lock(&ubd_io_lock);
end_request(CURRENT, 1);
spin_unlock(&ubd_io_lock);
}
static void ubd_handler(void)
......@@ -343,9 +379,9 @@ static void ubd_handler(void)
if(n != sizeof(req)){
printk(KERN_ERR "Pid %d - spurious interrupt in ubd_handler, "
"errno = %d\n", os_getpid(), -n);
spin_lock(&ubd_lock);
spin_lock(&ubd_io_lock);
end_request(CURRENT, 0);
spin_unlock(&ubd_lock);
spin_unlock(&ubd_io_lock);
return;
}
......@@ -353,11 +389,9 @@ static void ubd_handler(void)
(req.length != (CURRENT->current_nr_sectors) << 9))
panic("I/O op mismatch");
spin_lock(&ubd_lock);
ubd_finish(req.error);
reactivate_fd(thread_fd, UBD_IRQ);
do_ubd_request(ubd_queue);
spin_unlock(&ubd_lock);
}
static void ubd_intr(int irq, void *dev, struct pt_regs *unused)
......@@ -365,6 +399,7 @@ static void ubd_intr(int irq, void *dev, struct pt_regs *unused)
ubd_handler();
}
/* Only changed by ubd_init, which is an initcall. */
static int io_pid = -1;
void kill_io_thread(void)
......@@ -382,6 +417,7 @@ static int ubd_file_size(struct ubd *dev, __u64 *size_out)
return(os_file_size(file, size_out));
}
/* Initialized in an initcall, and unchanged thereafter */
devfs_handle_t ubd_dir_handle;
devfs_handle_t ubd_fake_dir_handle;
......@@ -394,7 +430,7 @@ static int ubd_add(int n)
u64 size;
if (!dev->file)
return -1;
goto out;
disk = alloc_disk(1 << UBD_SHIFT);
if (!disk)
......@@ -433,23 +469,32 @@ static int ubd_add(int n)
MAJOR_NR, n << UBD_SHIFT,
S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP |S_IWGRP,
&ubd_blops, NULL);
add_disk(disk);
if(real == NULL)
goto out;
ubd_dev[n].real = real;
if (fake_major) {
fake = devfs_register(ubd_fake_dir_handle, name,
DEVFS_FL_REMOVABLE, fake_major,
n << UBD_SHIFT,
S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP |
S_IWGRP, &ubd_blops, NULL);
add_disk(fake_disk);
if(fake == NULL) return(-1);
if(fake == NULL)
goto out_unregister;
ubd_dev[n].fake = fake;
add_disk(fake_disk);
}
if(real == NULL) return(-1);
ubd_dev[n].real = real;
add_disk(disk);
make_ide_entries(disk->disk_name);
return(0);
out_unregister:
devfs_unregister(real);
ubd_dev[n].real = NULL;
out:
return(-1);
}
static int ubd_config(char *str)
......@@ -468,24 +513,29 @@ static int ubd_config(char *str)
}
if(n == -1) return(0);
spin_lock(&ubd_lock);
err = ubd_add(n);
if(err){
if(err)
ubd_dev[n].file = NULL;
return(err);
}
spin_unlock(&ubd_lock);
return(0);
return(err);
}
static int ubd_remove(char *str)
{
struct ubd *dev;
int n;
int n, err;
if(!isdigit(*str)) return(-1);
if(!isdigit(*str))
return(-1);
n = *str - '0';
if(n > MAX_DEV) return(-1);
if(n > MAX_DEV)
return(-1);
dev = &ubd_dev[n];
err = 0;
spin_lock(&ubd_lock);
del_gendisk(ubd_gendisk[n]);
put_disk(ubd_gendisk[n]);
ubd_gendisk[n] = NULL;
......@@ -494,12 +544,20 @@ static int ubd_remove(char *str)
put_disk(fake_gendisk[n]);
fake_gendisk[n] = NULL;
}
if(dev->file == NULL) return(0);
if(dev->count > 0) return(-1);
if(dev->real != NULL) devfs_unregister(dev->real);
if(dev->fake != NULL) devfs_unregister(dev->fake);
if(dev->file == NULL)
goto out;
err = -1;
if(dev->count > 0)
goto out;
if(dev->real != NULL)
devfs_unregister(dev->real);
if(dev->fake != NULL)
devfs_unregister(dev->fake);
*dev = ((struct ubd) DEFAULT_UBD);
return(0);
err = 0;
out:
spin_unlock(&ubd_lock);
return(err);
}
static struct mc_device ubd_mc = {
......@@ -531,7 +589,7 @@ int ubd_init(void)
return -1;
}
ubd_queue = BLK_DEFAULT_QUEUE(MAJOR_NR);
INIT_QUEUE(ubd_queue, do_ubd_request, &ubd_lock);
blk_init_queue(ubd_queue, do_ubd_request, &ubd_io_lock);
elevator_init(ubd_queue, &elevator_noop);
if(fake_major != 0){
char name[sizeof("ubd_nnn\0")];
......@@ -634,7 +692,7 @@ static int ubd_open_dev(struct ubd *dev)
}
return(0);
error:
close_fd(dev->fd);
os_close_file(dev->fd);
return(err);
}
......@@ -643,9 +701,8 @@ static int ubd_open(struct inode *inode, struct file *filp)
int n = DEVICE_NR(inode->i_rdev);
struct ubd *dev = &ubd_dev[n];
int err;
if(dev->is_dir == 1)
return(0);
goto out;
if(dev->count == 0){
dev->openflags = dev->boot_openflags;
......@@ -654,16 +711,16 @@ static int ubd_open(struct inode *inode, struct file *filp)
if(err){
printk(KERN_ERR "ubd%d: Can't open \"%s\": "
"errno = %d\n", n, dev->file, -err);
return(err);
goto out;
}
if(err) return(err);
}
dev->count++;
if((filp->f_mode & FMODE_WRITE) && !dev->openflags.w){
if(--dev->count == 0) ubd_close(dev);
return -EROFS;
err = -EROFS;
}
return(0);
out:
return(err);
}
static int ubd_release(struct inode * inode, struct file * file)
......@@ -726,13 +783,17 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
if(dev->is_dir){
strcpy(req->buffer, "HOSTFS:");
strcat(req->buffer, dev->file);
spin_lock(&ubd_io_lock);
end_request(req, 1);
spin_unlock(&ubd_io_lock);
return(1);
}
if((rq_data_dir(req) == WRITE) && !dev->openflags.w){
printk("Write attempted on readonly ubd device %d\n", n);
spin_lock(&ubd_io_lock);
end_request(req, 0);
spin_unlock(&ubd_io_lock);
return(1);
}
......@@ -872,8 +933,11 @@ static int ubd_revalidate(kdev_t rdev)
n = minor(rdev) >> UBD_SHIFT;
dev = &ubd_dev[n];
err = 0;
spin_lock(&ubd_lock);
if(dev->is_dir)
return(0);
goto out;
err = ubd_file_size(dev, &size);
if (!err) {
......@@ -882,7 +946,8 @@ static int ubd_revalidate(kdev_t rdev)
set_capacity(fake_gendisk[n], size / 512);
dev->size = size;
}
out:
spin_unlock(&ubd_lock);
return err;
}
......
......@@ -533,8 +533,12 @@ void do_io(struct io_thread_req *req)
return;
}
/* Changed in start_io_thread, which is serialized by being called only
* from ubd_init, which is an initcall.
*/
int kernel_fd = -1;
/* Only changed by the io thread */
int io_count = 0;
int io_thread(void *arg)
......
......@@ -44,6 +44,7 @@ void *xterm_init(char *str, int device, struct chan_opts *opts)
return(data);
}
/* Only changed by xterm_setup, which is a setup */
static char *terminal_emulator = "xterm";
static char *title_switch = "-T";
static char *exec_switch = "-e";
......
......@@ -20,8 +20,6 @@
next : NULL \
}
#define INIT_QUEUE(queue, request, lock) blk_init_queue(queue, request, lock)
#define INIT_HARDSECT(arr, maj, sizes)
#define SET_PRI(task) do ; while(0)
......
......@@ -18,7 +18,9 @@ extern void forward_interrupts(int pid);
extern void init_irq_signals(int on_sigstack);
extern void forward_ipi(int fd, int pid);
extern void free_irq_later(int irq, void *dev_id);
extern int activate_ipi(int fd, int pid);
extern unsigned long irq_lock(void);
extern void irq_unlock(unsigned long flags);
#endif
/*
......
......@@ -50,12 +50,8 @@ extern int pid_to_processor_id(int pid);
extern void block_signals(void);
extern void unblock_signals(void);
extern void deliver_signals(void *t);
extern void lock_syscall(void);
extern void unlock_syscall(void);
extern void lock_trap(void);
extern void unlock_trap(void);
extern void lock_pid(void);
extern void unlock_pid(void);
extern int next_syscall_index(int max);
extern int next_trap_index(int max);
extern void default_idle(void);
extern void finish_fork(void);
extern void paging_init(void);
......@@ -121,7 +117,7 @@ extern void arch_switch(void);
extern int is_valid_pid(int pid);
extern void free_irq(unsigned int, void *);
extern int um_in_interrupt(void);
extern int cpu(void);
#endif
/*
......
......@@ -77,6 +77,8 @@ extern int mconsole_get_request(int fd, struct mc_request *req);
extern int mconsole_notify(char *sock_name, int type, const void *data,
int len);
extern char *mconsole_notify_socket(void);
extern void lock_notify(void);
extern void unlock_notify(void);
#endif
......
......@@ -11,6 +11,8 @@ extern int register_sigio_fd(int fd);
extern int read_sigio_fd(int fd);
extern int add_sigio_fd(int fd, int read);
extern int ignore_sigio_fd(int fd);
extern void sigio_lock(void);
extern void sigio_unlock(void);
#endif
......
......@@ -7,11 +7,11 @@
#define __TIME_USER_H__
extern void timer(void);
extern void get_profile_timer(void);
extern void disable_profile_timer(void);
extern void switch_timers(int to_real);
extern void user_time_init(void);
extern void set_timers(int set_signal);
extern void idle_sleep(int secs);
extern void enable_timer(void);
extern void time_lock(void);
extern void time_unlock(void);
#endif
......@@ -17,6 +17,7 @@
#include "tlb.h"
#include "2_5compat.h"
#include "os.h"
#include "time_user.h"
/* See comment above fork_tramp for why sigstop is defined and used like
* this
......@@ -28,7 +29,6 @@ static int exec_tramp(void *sig_stack)
{
int sig = sigstop;
block_signals();
init_new_thread(sig_stack, NULL);
kill(os_getpid(), sig);
return(0);
......@@ -62,6 +62,7 @@ void flush_thread(void)
unprotect_stack((unsigned long) current->thread_info);
os_usr1_process(os_getpid());
enable_timer();
free_page(stack);
protect(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1);
task_protections((unsigned long) current->thread_info);
......
......@@ -8,6 +8,9 @@
#include "linux/proc_fs.h"
#include "asm/uaccess.h"
/* If read and write race, the read will still atomically read a valid
* value.
*/
int uml_exitcode = 0;
static int read_proc_exitcode(char *page, char **start, off_t off,
......
......@@ -130,6 +130,7 @@ static void child_common(void *sp, int size, sighandler_t handler, int flags)
os_stop_process(os_getpid());
}
/* Changed only during early boot */
struct sc_frame signal_frame_sc;
struct sc_frame_raw {
......@@ -142,6 +143,7 @@ struct sc_frame_raw {
struct arch_frame_data_raw arch;
};
/* Changed only during early boot */
static struct sc_frame_raw *raw_sc = NULL;
static void sc_handler(int sig, struct sigcontext sc)
......@@ -163,6 +165,7 @@ static int sc_child(void *arg)
return(-1);
}
/* Changed only during early boot */
struct si_frame signal_frame_si;
struct si_frame_raw {
......@@ -175,6 +178,7 @@ struct si_frame_raw {
unsigned long sp;
};
/* Changed only during early boot */
static struct si_frame_raw *raw_si = NULL;
static void si_handler(int sig, siginfo_t *si)
......
......@@ -22,6 +22,7 @@ struct helper_data {
int fd;
};
/* Debugging aid, changed only from gdb */
int helper_pause = 0;
static void helper_hup(int sig)
......
......@@ -13,6 +13,7 @@
#include "init.h"
#include "os.h"
/* Changed by uml_initrd_setup, which is a setup */
static char *initrd __initdata = NULL;
static int __init read_initrd(void)
......
......@@ -78,6 +78,7 @@ struct hw_interrupt_type no_irq_type = {
end_none
};
/* Not changed */
volatile unsigned long irq_err_count;
/*
......@@ -87,6 +88,7 @@ volatile unsigned long irq_err_count;
int get_irq_list(char *buf)
{
int i, j;
unsigned long flags;
struct irqaction * action;
char *p = buf;
......@@ -96,9 +98,10 @@ int get_irq_list(char *buf)
*p++ = '\n';
for (i = 0 ; i < NR_IRQS ; i++) {
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
continue;
goto end;
p += sprintf(p, "%3d: ",i);
#ifndef CONFIG_SMP
p += sprintf(p, "%10u ", kstat_irqs(i));
......@@ -113,6 +116,8 @@ int get_irq_list(char *buf)
for (action=action->next; action; action = action->next)
p += sprintf(p, ", %s", action->name);
*p++ = '\n';
end:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
p += sprintf(p, "\n");
#ifdef notdef
......@@ -548,11 +553,15 @@ void free_irq(unsigned int irq, void *dev_id)
}
}
/* These are initialized by sysctl_init, which is called from init/main.c */
static struct proc_dir_entry * root_irq_dir;
static struct proc_dir_entry * irq_dir [NR_IRQS];
static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
/* These are read and written as longs, so a read won't see a partial write
* even during a race.
*/
static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
#define HEX_DIGITS 8
......@@ -679,6 +688,7 @@ static void register_irq_proc (unsigned int irq)
smp_affinity_entry[irq] = entry;
}
/* Read and written as a long */
unsigned long prof_cpu_mask = -1;
void __init init_irq_proc (void)
......@@ -704,6 +714,21 @@ void __init init_irq_proc (void)
register_irq_proc(i);
}
static spinlock_t irq_spinlock = SPIN_LOCK_UNLOCKED;
unsigned long irq_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&irq_spinlock, flags);
return(flags);
}
void irq_unlock(unsigned long flags)
{
spin_unlock_irqrestore(&irq_spinlock, flags);
}
unsigned long probe_irq_on(void)
{
return(0);
......
......@@ -111,40 +111,20 @@ static void maybe_sigio_broken(int fd, int type)
int activate_fd(int irq, int fd, int type, void *dev_id)
{
struct irq_fd *new_fd;
int pid, retval, events, err;
struct pollfd *tmp_pfd;
struct irq_fd *new_fd, *irq_fd;
unsigned long flags;
int pid, events, err, n, size;
pid = os_getpid();
err = os_set_fd_async(fd, pid);
if(err < 0)
goto out;
for(new_fd = active_fds;new_fd;new_fd = new_fd->next){
if((new_fd->fd == fd) && (new_fd->type == type)){
printk("Registering fd %d twice\n", fd);
printk("Irqs : %d, %d\n", new_fd->irq, irq);
printk("Ids : 0x%x, 0x%x\n", new_fd->id, dev_id);
return(-EIO);
}
}
pid = cpu_tasks[0].pid;
if((retval = os_set_fd_async(fd, pid)) != 0)
return(retval);
new_fd = um_kmalloc(sizeof(*new_fd));
err = -ENOMEM;
if(new_fd == NULL) return(err);
pollfds_num++;
if(pollfds_num > pollfds_size){
struct pollfd *tmp_pfd;
tmp_pfd = um_kmalloc(pollfds_num * sizeof(pollfds[0]));
if(tmp_pfd == NULL){
pollfds_num--;
goto out_irq;
}
if(pollfds != NULL){
memcpy(tmp_pfd, pollfds,
sizeof(pollfds[0]) * pollfds_size);
kfree(pollfds);
}
pollfds = tmp_pfd;
pollfds_size = pollfds_num;
}
if(new_fd == NULL)
goto out;
if(type == IRQ_READ) events = POLLIN | POLLPRI;
else events = POLLOUT;
......@@ -158,29 +138,90 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
current_events: 0,
freed : 0 } );
*last_irq_ptr = new_fd;
last_irq_ptr = &new_fd->next;
/* Critical section - locked by a spinlock because this stuff can
* be changed from interrupt handlers. The stuff above is done
* outside the lock because it allocates memory.
*/
/* Actually, it only looks like it can be called from interrupt
* context. The culprit is reactivate_fd, which calls
* maybe_sigio_broken, which calls write_sigio_workaround,
* which calls activate_fd. However, write_sigio_workaround should
* only be called once, at boot time. That would make it clear that
* this is called only from process context, and can be locked with
* a semaphore.
*/
flags = irq_lock();
for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
if((irq_fd->fd == fd) && (irq_fd->type == type)){
printk("Registering fd %d twice\n", fd);
printk("Irqs : %d, %d\n", irq_fd->irq, irq);
printk("Ids : 0x%x, 0x%x\n", irq_fd->id, dev_id);
goto out_unlock;
}
}
n = pollfds_num;
if(n == pollfds_size){
while(1){
/* Here we have to drop the lock in order to call
* kmalloc, which might sleep. If something else
* came in and changed the pollfds array, we free
* the buffer and try again.
*/
irq_unlock(flags);
size = (pollfds_num + 1) * sizeof(pollfds[0]);
tmp_pfd = um_kmalloc(size);
flags = irq_lock();
if(tmp_pfd == NULL)
goto out_unlock;
if(n == pollfds_size)
break;
kfree(tmp_pfd);
}
if(pollfds != NULL){
memcpy(tmp_pfd, pollfds,
sizeof(pollfds[0]) * pollfds_size);
kfree(pollfds);
}
pollfds = tmp_pfd;
pollfds_size++;
}
if(type == IRQ_WRITE) events = 0;
pollfds[pollfds_num - 1] = ((struct pollfd) { fd : fd,
pollfds[pollfds_num] = ((struct pollfd) { fd : fd,
events : events,
revents : 0 });
pollfds_num++;
*last_irq_ptr = new_fd;
last_irq_ptr = &new_fd->next;
irq_unlock(flags);
/* This calls activate_fd, so it has to be outside the critical
* section.
*/
maybe_sigio_broken(fd, type);
return(0);
out_irq:
out_unlock:
irq_unlock(flags);
out_free:
kfree(new_fd);
out:
return(err);
}
static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
{
struct irq_fd **prev;
unsigned long flags;
int i = 0;
flags = irq_lock();
prev = &active_fds;
while(*prev != NULL){
if((*test)(*prev, arg)){
......@@ -190,7 +231,7 @@ static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
printk("free_irq_by_cb - mismatch between "
"active_fds and pollfds, fd %d vs %d\n",
(*prev)->fd, pollfds[i].fd);
return;
goto out;
}
memcpy(&pollfds[i], &pollfds[i + 1],
(pollfds_num - i - 1) * sizeof(pollfds[0]));
......@@ -206,6 +247,8 @@ static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
prev = &(*prev)->next;
i++;
}
out:
irq_unlock(flags);
}
struct irq_and_dev {
......@@ -249,22 +292,26 @@ static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
}
if(irq == NULL){
printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
return(NULL);
goto out;
}
if((pollfds[i].fd != -1) && (pollfds[i].fd != fd)){
printk("find_irq_by_fd - mismatch between active_fds and "
"pollfds, fd %d vs %d, need %d\n", irq->fd,
pollfds[i].fd, fd);
return(NULL);
irq = NULL;
goto out;
}
*index_out = i;
out:
return(irq);
}
void free_irq_later(int irq, void *dev_id)
{
struct irq_fd *irq_fd;
unsigned long flags;
flags = irq_lock();
for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
if((irq_fd->irq == irq) && (irq_fd->id == dev_id))
break;
......@@ -272,30 +319,48 @@ void free_irq_later(int irq, void *dev_id)
if(irq_fd == NULL){
printk("free_irq_later found no irq, irq = %d, "
"dev_id = 0x%p\n", irq, dev_id);
return;
goto out;
}
irq_fd->freed = 1;
out:
irq_unlock(flags);
}
void reactivate_fd(int fd, int irqnum)
{
struct irq_fd *irq;
unsigned long flags;
int i;
flags = irq_lock();
irq = find_irq_by_fd(fd, irqnum, &i);
if(irq == NULL) return;
if(irq == NULL){
irq_unlock(flags);
return;
}
pollfds[i].fd = irq->fd;
irq_unlock(flags);
/* This calls activate_fd, so it has to be outside the critical
* section.
*/
maybe_sigio_broken(fd, irq->type);
}
void deactivate_fd(int fd, int irqnum)
{
struct irq_fd *irq;
unsigned long flags;
int i;
flags = irq_lock();
irq = find_irq_by_fd(fd, irqnum, &i);
if(irq == NULL) return;
if(irq == NULL)
goto out;
pollfds[i].fd = -1;
out:
irq_unlock(flags);
}
void forward_ipi(int fd, int pid)
......@@ -313,7 +378,9 @@ void forward_ipi(int fd, int pid)
void forward_interrupts(int pid)
{
struct irq_fd *irq;
unsigned long flags;
flags = irq_lock();
for(irq=active_fds;irq != NULL;irq = irq->next){
if(fcntl(irq->fd, F_SETOWN, pid) < 0){
int save_errno = errno;
......@@ -328,6 +395,7 @@ void forward_interrupts(int pid)
}
irq->pid = pid;
}
irq_unlock(flags);
}
void init_irq_signals(int on_sigstack)
......@@ -339,10 +407,10 @@ void init_irq_signals(int on_sigstack)
if(timer_irq_inited) h = (__sighandler_t) alarm_handler;
else h = boot_timer_handler;
set_handler(SIGVTALRM, h, flags | SA_NODEFER | SA_RESTART,
SIGUSR1, SIGIO, SIGWINCH, -1);
set_handler(SIGVTALRM, h, flags | SA_RESTART,
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1);
set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART,
SIGUSR1, SIGIO, SIGWINCH, -1);
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
signal(SIGWINCH, SIG_IGN);
}
......
......@@ -26,33 +26,32 @@
#include "kern.h"
#include "init.h"
/* Changed during early boot */
pgd_t swapper_pg_dir[1024];
unsigned long high_physmem;
unsigned long low_physmem;
unsigned long vm_start;
unsigned long vm_end;
unsigned long highmem;
pgd_t swapper_pg_dir[1024];
unsigned long *empty_zero_page = NULL;
unsigned long *empty_bad_page = NULL;
/* Not modified */
const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
extern char __init_begin, __init_end;
extern long physmem_size;
/* Not changed by UML */
mmu_gather_t mmu_gathers[NR_CPUS];
/* Changed during early boot */
int kmalloc_ok = 0;
#define NREGIONS (phys_region_index(0xffffffff) - phys_region_index(0x0) + 1)
struct mem_region *regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] = NULL };
#define REGION_SIZE ((0xffffffff & ~REGION_MASK) + 1)
/* Changed during early boot */
static unsigned long brk_end;
static void map_cb(void *unused)
......@@ -108,6 +107,7 @@ void mem_init(void)
}
#if CONFIG_HIGHMEM
/* Changed during early boot */
pte_t *kmap_pte;
pgprot_t kmap_prot;
......@@ -187,18 +187,22 @@ int init_maps(struct mem_region *region)
return(0);
}
DECLARE_MUTEX(regions_sem);
static int setup_one_range(int fd, char *driver, unsigned long start,
unsigned long pfn, int len,
struct mem_region *region)
{
int i;
down(&regions_sem);
for(i = 0; i < NREGIONS; i++){
if(regions[i] == NULL) break;
}
if(i == NREGIONS){
printk("setup_range : no free regions\n");
return(-1);
i = -1;
goto out;
}
if(fd == -1)
......@@ -216,6 +220,8 @@ static int setup_one_range(int fd, char *driver, unsigned long start,
len : len,
fd : fd } );
regions[i] = region;
out:
up(&regions_sem);
return(i);
}
......@@ -373,7 +379,8 @@ void show_mem(void)
printk("%d pages swap cached\n", cached);
}
unsigned long kmem_top = 0;
/* Changed during early boot */
static unsigned long kmem_top = 0;
unsigned long get_kmem_end(void)
{
......@@ -428,8 +435,10 @@ struct page *arch_validate(struct page *page, int mask, int order)
goto again;
}
DECLARE_MUTEX(vm_reserved_sem);
static struct list_head vm_reserved = LIST_HEAD_INIT(vm_reserved);
/* Static structures, linked in to the list in early boot */
static struct vm_reserved head = {
list : LIST_HEAD_INIT(head.list),
start : 0,
......@@ -455,7 +464,9 @@ int reserve_vm(unsigned long start, unsigned long end, void *e)
{
struct vm_reserved *entry = e, *reserved, *prev;
struct list_head *ele;
int err;
down(&vm_reserved_sem);
list_for_each(ele, &vm_reserved){
reserved = list_entry(ele, struct vm_reserved, list);
if(reserved->start >= end) goto found;
......@@ -469,13 +480,17 @@ int reserve_vm(unsigned long start, unsigned long end, void *e)
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if(entry == NULL){
printk("reserve_vm : Failed to allocate entry\n");
return(-ENOMEM);
err = -ENOMEM;
goto out;
}
*entry = ((struct vm_reserved)
{ list : LIST_HEAD_INIT(entry->list),
start : start,
end : end });
list_add(&entry->list, &prev->list);
err = 0;
out:
up(&vm_reserved_sem);
return(0);
}
......@@ -486,6 +501,7 @@ unsigned long get_vm(unsigned long len)
unsigned long start;
int err;
down(&vm_reserved_sem);
list_for_each(ele, &vm_reserved){
this = list_entry(ele, struct vm_reserved, list);
next = list_entry(ele->next, struct vm_reserved, list);
......@@ -493,8 +509,10 @@ unsigned long get_vm(unsigned long len)
(this->end + len + PAGE_SIZE <= next->start))
goto found;
}
up(&vm_reserved_sem);
return(0);
found:
up(&vm_reserved_sem);
start = (unsigned long) ROUND_UP(this->end) + PAGE_SIZE;
err = reserve_vm(start, start + len, NULL);
if(err) return(0);
......@@ -533,6 +551,10 @@ struct iomem {
unsigned long size;
};
/* iomem regions can only be added on the command line at the moment.
* Locking will be needed when they can be added via mconsole.
*/
struct iomem iomem_regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] =
{ name : NULL,
fd : -1,
......@@ -569,6 +591,7 @@ __initcall(setup_iomem);
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
/* Changed during early boot */
static struct mem_region physmem_region;
static struct vm_reserved physmem_reserved;
......
......@@ -46,10 +46,9 @@
#include "mem_user.h"
#include "init.h"
#include "os.h"
#include "tempfile.h"
struct mem_region physmem_region;
struct mem_region *mem_list = &physmem_region;
extern struct mem_region physmem_region;
#define TEMPNAME_TEMPLATE "vm_file-XXXXXX"
......
......@@ -48,23 +48,23 @@ void init_new_thread(void *sig_stack, void (*usr1_handler)(int))
flags = SA_ONSTACK;
}
set_handler(SIGSEGV, (__sighandler_t) sig_handler, flags,
SIGUSR1, SIGIO, SIGWINCH, -1);
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGTRAP, (__sighandler_t) sig_handler, flags,
SIGUSR1, SIGIO, SIGWINCH, -1);
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGFPE, (__sighandler_t) sig_handler, flags,
SIGUSR1, SIGIO, SIGWINCH, -1);
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGILL, (__sighandler_t) sig_handler, flags,
SIGUSR1, SIGIO, SIGWINCH, -1);
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGBUS, (__sighandler_t) sig_handler, flags,
SIGUSR1, SIGIO, SIGWINCH, -1);
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGWINCH, (__sighandler_t) sig_handler, flags,
SIGUSR1, SIGIO, SIGWINCH, -1);
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
set_handler(SIGUSR2, (__sighandler_t) sig_handler,
SA_NOMASK | flags, -1);
if(usr1_handler) set_handler(SIGUSR1, usr1_handler, flags, -1);
signal(SIGCHLD, SIG_IGN);
signal(SIGHUP, SIG_IGN);
set_timers(1); /* XXX A bit of a race here */
init_irq_signals(sig_stack != NULL);
}
......
......@@ -41,6 +41,10 @@
#include "2_5compat.h"
#include "os.h"
/* This is a per-cpu array. A processor only modifies its entry and it only
* cares about its entry, so it's OK if another processor is modifying its
* entry.
*/
struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
struct task_struct *get_task(int pid, int require)
......@@ -86,7 +90,7 @@ int pid_to_processor_id(int pid)
{
int i;
for(i = 0; i < num_online_cpus(); i++){
for(i = 0; i < ncpus; i++){
if(cpu_tasks[i].pid == pid) return(i);
}
return(-1);
......@@ -152,12 +156,19 @@ static void new_thread_handler(int sig)
current->thread.regs.regs.sc = (void *) (&sig + 1);
suspend_new_thread(current->thread.switch_pipe[0]);
block_signals();
#ifdef CONFIG_SMP
schedule_tail(NULL);
#endif
enable_timer();
free_page(current->thread.temp_stack);
set_cmdline("(kernel thread)");
force_flush_all();
current->thread.prev_sched = NULL;
change_sig(SIGUSR1, 1);
change_sig(SIGVTALRM, 1);
change_sig(SIGPROF, 1);
unblock_signals();
if(!run_kernel_thread(fn, arg, &current->thread.jmp))
do_exit(0);
......@@ -165,7 +176,9 @@ static void new_thread_handler(int sig)
static int new_thread_proc(void *stack)
{
block_signals();
change_sig(SIGIO, 0);
change_sig(SIGVTALRM, 0);
change_sig(SIGPROF, 0);
init_new_thread(stack, new_thread_handler);
os_usr1_process(os_getpid());
return(0);
......@@ -204,6 +217,9 @@ void *switch_to(void *prev, void *next, void *last)
unsigned long flags;
int vtalrm, alrm, prof, err, cpu;
char c;
/* jailing and SMP are incompatible, so this doesn't need to be
* made per-cpu
*/
static int reading;
from = prev;
......@@ -298,13 +314,16 @@ void exit_thread(void)
* onto the signal frame.
*/
extern int hit_me;
void finish_fork_handler(int sig)
{
current->thread.regs.regs.sc = (void *) (&sig + 1);
suspend_new_thread(current->thread.switch_pipe[0]);
#ifdef CONFIG_SMP
schedule_tail(NULL);
#endif
enable_timer();
change_sig(SIGVTALRM, 1);
force_flush_all();
if(current->mm != current->parent->mm)
protect(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1);
......@@ -313,7 +332,6 @@ void finish_fork_handler(int sig)
current->thread.prev_sched = NULL;
free_page(current->thread.temp_stack);
block_signals();
change_sig(SIGUSR1, 0);
set_user_mode(current);
}
......@@ -339,7 +357,9 @@ int fork_tramp(void *stack)
{
int sig = sigusr1;
block_signals();
change_sig(SIGIO, 0);
change_sig(SIGVTALRM, 0);
change_sig(SIGPROF, 0);
init_new_thread(stack, finish_fork_handler);
kill(os_getpid(), sig);
......@@ -474,7 +494,7 @@ int current_pid(void)
void default_idle(void)
{
if(current->thread_info->cpu == 0) idle_timer();
idle_timer();
atomic_inc(&init_mm.mm_count);
current->mm = &init_mm;
......@@ -644,6 +664,7 @@ char *uml_strdup(char *string)
return(new);
}
/* Changed by jail_setup, which is a setup */
int jail = 0;
int __init jail_setup(char *line, int *add)
......@@ -708,17 +729,14 @@ static void mprotect_kernel_mem(int w)
mprotect_kernel_vm(w);
}
int jail_timer_off = 0;
/* No SMP problems since jailing and SMP are incompatible */
void unprotect_kernel_mem(void)
{
mprotect_kernel_mem(1);
jail_timer_off = 0;
}
void protect_kernel_mem(void)
{
jail_timer_off = 1;
mprotect_kernel_mem(0);
}
......@@ -749,9 +767,11 @@ void set_thread_sc(void *sc)
int smp_sigio_handler(void)
{
int cpu = current->thread_info->cpu;
#ifdef CONFIG_SMP
IPI_handler(hard_smp_processor_id());
if (hard_smp_processor_id() != 0) return(1);
IPI_handler(cpu);
if(cpu != 0)
return(1);
#endif
return(0);
}
......@@ -761,6 +781,11 @@ int um_in_interrupt(void)
return(in_interrupt());
}
int cpu(void)
{
return(current->thread_info->cpu);
}
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
......
......@@ -11,6 +11,7 @@
#include "sigio.h"
#include "irq_user.h"
/* Protected by sigio_lock() called from write_sigio_workaround */
static int sigio_irq_fd = -1;
void sigio_interrupt(int irq, void *data, struct pt_regs *unused)
......@@ -31,6 +32,18 @@ int write_sigio_irq(int fd)
return(0);
}
static spinlock_t sigio_spinlock = SPIN_LOCK_UNLOCKED;
void sigio_lock(void)
{
spin_lock(&sigio_spinlock);
}
void sigio_unlock(void)
{
spin_unlock(&sigio_spinlock);
}
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
......
......@@ -21,9 +21,11 @@
#include "helper.h"
#include "os.h"
/* Changed during early boot */
int pty_output_sigio = 0;
int pty_close_sigio = 0;
/* Used as a flag during SIGIO testing early in boot */
static int got_sigio = 0;
void __init handler(int sig)
......@@ -151,7 +153,15 @@ void __init check_sigio(void)
check_one_sigio(tty_close);
}
/* Protected by sigio_lock(), also used by sigio_cleanup, which is an
* exitcall.
*/
static int write_sigio_pid = -1;
/* These arrays are initialized before the sigio thread is started, and
* the descriptors closed after it is killed. So, it can't see them change.
* On the UML side, they are changed under the sigio_lock.
*/
static int write_sigio_fds[2] = { -1, -1 };
static int sigio_private[2] = { -1, -1 };
......@@ -161,6 +171,9 @@ struct pollfds {
int used;
};
/* Protected by sigio_lock(). Used by the sigio thread, but the UML thread
* synchronizes with it.
*/
struct pollfds current_poll = {
poll : NULL,
size : 0,
......@@ -217,8 +230,6 @@ static int write_sigio_thread(void *unused)
}
}
/* XXX SMP locking needed here too */
static int need_poll(int n)
{
if(n <= next_poll.size){
......@@ -260,25 +271,31 @@ static void update_thread(void)
set_signals(flags);
return;
fail:
sigio_lock();
if(write_sigio_pid != -1) kill(write_sigio_pid, SIGKILL);
write_sigio_pid = -1;
close(sigio_private[0]);
close(sigio_private[1]);
close(write_sigio_fds[0]);
close(write_sigio_fds[1]);
sigio_unlock();
set_signals(flags);
}
int add_sigio_fd(int fd, int read)
{
int err, i, n, events;
int err = 0, i, n, events;
for(i = 0; i < current_poll.used; i++)
if(current_poll.poll[i].fd == fd) return(0);
sigio_lock();
for(i = 0; i < current_poll.used; i++){
if(current_poll.poll[i].fd == fd)
goto out;
}
n = current_poll.used + 1;
err = need_poll(n);
if(err) return(err);
if(err)
goto out;
for(i = 0; i < current_poll.used; i++)
next_poll.poll[i] = current_poll.poll[i];
......@@ -290,21 +307,26 @@ int add_sigio_fd(int fd, int read)
events : events,
revents : 0 });
update_thread();
return(0);
out:
sigio_unlock();
return(err);
}
int ignore_sigio_fd(int fd)
{
struct pollfd *p;
int err, i, n = 0;
int err = 0, i, n = 0;
sigio_lock();
for(i = 0; i < current_poll.used; i++){
if(current_poll.poll[i].fd == fd) break;
}
if(i == current_poll.used) return(0);
if(i == current_poll.used)
goto out;
err = need_poll(current_poll.used - 1);
if(err) return(err);
if(err)
goto out;
for(i = 0; i < current_poll.used; i++){
p = &current_poll.poll[i];
......@@ -312,11 +334,14 @@ int ignore_sigio_fd(int fd)
}
if(n == i){
printk("ignore_sigio_fd : fd %d not found\n", fd);
return(-1);
err = -1;
goto out;
}
update_thread();
return(0);
out:
sigio_unlock();
return(err);
}
static int setup_initial_poll(int fd)
......@@ -342,14 +367,15 @@ void write_sigio_workaround(void)
unsigned long stack;
int err;
if(write_sigio_pid != -1) return;
sigio_lock();
if(write_sigio_pid != -1)
goto out;
/* XXX This needs SMP locking */
err = os_pipe(write_sigio_fds, 1, 1);
if(err){
printk("write_sigio_workaround - os_pipe 1 failed, "
"errno = %d\n", -err);
return;
goto out;
}
err = os_pipe(sigio_private, 1, 1);
if(err){
......@@ -368,6 +394,8 @@ void write_sigio_workaround(void)
if(write_sigio_irq(write_sigio_fds[0]))
goto out_kill;
out:
sigio_unlock();
return;
out_kill:
......@@ -379,6 +407,7 @@ void write_sigio_workaround(void)
out_close1:
close(write_sigio_fds[0]);
close(write_sigio_fds[1]);
sigio_unlock();
}
int read_sigio_fd(int fd)
......
......@@ -19,8 +19,6 @@
#include "sysdep/sigcontext.h"
#include "sigcontext.h"
extern int kern_timer_on;
void set_sigstack(void *sig_stack, int size)
{
stack_t stack;
......@@ -65,12 +63,8 @@ static void change_signals(int type)
sigset_t mask;
sigemptyset(&mask);
if(type == SIG_BLOCK) kern_timer_on = 0;
else {
kern_timer_on = 1;
sigaddset(&mask, SIGVTALRM);
sigaddset(&mask, SIGALRM);
}
sigaddset(&mask, SIGIO);
sigaddset(&mask, SIGPROF);
if(sigprocmask(type, &mask, NULL) < 0)
......@@ -97,7 +91,6 @@ static int disable_mask(sigset_t *mask)
sigs = sigismember(mask, SIGIO) ? 1 << SIGIO_BIT : 0;
sigs |= sigismember(mask, SIGVTALRM) ? 1 << SIGVTALRM_BIT : 0;
sigs |= sigismember(mask, SIGALRM) ? 1 << SIGVTALRM_BIT : 0;
if(!kern_timer_on) sigs |= 1 << SIGVTALRM_BIT;
return(sigs);
}
......@@ -116,21 +109,27 @@ int set_signals(int disable)
int ret;
sigemptyset(&mask);
if(!(disable & (1 << SIGIO_BIT))) sigaddset(&mask, SIGIO);
if(!(disable & (1 << SIGIO_BIT)))
sigaddset(&mask, SIGIO);
if(!(disable & (1 << SIGVTALRM_BIT))){
kern_timer_on = 1;
sigaddset(&mask, SIGVTALRM);
sigaddset(&mask, SIGALRM);
}
if(sigprocmask(SIG_UNBLOCK, &mask, &mask) < 0)
panic("Failed to enable signals");
ret = disable_mask(&mask);
sigemptyset(&mask);
if(disable & (1 << SIGIO_BIT)) sigaddset(&mask, SIGIO);
if(disable & (1 << SIGVTALRM_BIT))
kern_timer_on = 0;
if(disable & (1 << SIGIO_BIT))
sigaddset(&mask, SIGIO);
if(disable & (1 << SIGVTALRM_BIT)){
sigaddset(&mask, SIGVTALRM);
sigaddset(&mask, SIGALRM);
}
if(sigprocmask(SIG_BLOCK, &mask, NULL) < 0)
panic("Failed to block signals");
return(ret);
}
......
......@@ -5,7 +5,7 @@
#include "linux/config.h"
/* CPU online map */
/* CPU online map, set by smp_boot_cpus */
unsigned long cpu_online_map = 1;
#ifdef CONFIG_SMP
......@@ -21,25 +21,32 @@ unsigned long cpu_online_map = 1;
#include "user_util.h"
#include "kern_util.h"
#include "kern.h"
#include "irq_user.h"
#include "os.h"
/* The 'big kernel lock' */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
/* Per CPU bogomips and other parameters */
/* Per CPU bogomips and other parameters
* The only piece used here is the ipi pipe, which is set before SMP is
* started and never changed.
*/
struct cpuinfo_um cpu_data[NR_CPUS];
spinlock_t um_bh_lock = SPIN_LOCK_UNLOCKED;
atomic_t global_bh_count;
/* Not used by UML */
unsigned char global_irq_holder = NO_PROC_ID;
unsigned volatile long global_irq_lock;
/* Set when the idlers are all forked */
int smp_threads_ready = 0;
/* A statistic, can be a little off */
int num_reschedules_sent = 0;
/* Small, random number, never changed */
unsigned long cache_decay_ticks = 5;
void smp_send_reschedule(int cpu)
{
write(cpu_data[cpu].ipi_pipe[1], "R", 1);
......@@ -83,30 +90,24 @@ void synchronize_bh(void)
void smp_send_stop(void)
{
printk(KERN_INFO "Stopping all CPUs\n");
}
int i;
printk(KERN_INFO "Stopping all CPUs...");
for(i = 0; i < num_online_cpus(); i++){
if(i == current->thread_info->cpu)
continue;
write(cpu_data[i].ipi_pipe[1], "S", 1);
}
printk("done\n");
}
static atomic_t smp_commenced = ATOMIC_INIT(0);
static unsigned long smp_commenced_mask;
static volatile unsigned long smp_callin_map = 0;
void smp_commence(void)
static int idle_proc(void *cpup)
{
printk("All CPUs are go!\n");
wmb();
atomic_set(&smp_commenced, 1);
}
static int idle_proc(void *unused)
{
int cpu, err;
set_current(current);
del_from_runqueue(current);
unhash_process(current);
int cpu = (int) cpup, err;
cpu = current->processor;
err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1);
if(err)
panic("CPU#%d failed to create IPI pipe, errno = %d", cpu,
......@@ -115,46 +116,41 @@ static int idle_proc(void *unused)
activate_ipi(cpu_data[cpu].ipi_pipe[0], current->thread.extern_pid);
wmb();
if (test_and_set_bit(current->processor, &smp_callin_map)) {
printk("huh, CPU#%d already present??\n", current->processor);
if (test_and_set_bit(cpu, &smp_callin_map)) {
printk("huh, CPU#%d already present??\n", cpu);
BUG();
}
while (!atomic_read(&smp_commenced))
while (!test_bit(cpu, &smp_commenced_mask))
cpu_relax();
init_idle();
set_bit(cpu, &cpu_online_map);
default_idle();
return(0);
}
int inited_cpus = 1;
static int idle_thread(int (*fn)(void *), int cpu)
static struct task_struct *idle_thread(int cpu)
{
struct task_struct *p;
int pid;
struct task_struct *new_task;
unsigned char c;
current->thread.request.u.thread.proc = fn;
current->thread.request.u.thread.arg = NULL;
p = do_fork(CLONE_VM | CLONE_PID, 0, NULL, 0);
if(IS_ERR(p)) panic("do_fork failed in idle_thread");
cpu_tasks[cpu].pid = p->thread.extern_pid;
cpu_tasks[cpu].task = p;
inited_cpus++;
init_tasks[cpu] = p;
p->processor = cpu;
p->cpus_allowed = 1 << cpu;
p->cpus_runnable = p->cpus_allowed;
write(p->thread.switch_pipe[1], &c, sizeof(c));
return(p->thread.extern_pid);
current->thread.request.u.thread.proc = idle_proc;
current->thread.request.u.thread.arg = (void *) cpu;
new_task = do_fork(CLONE_VM | CLONE_IDLETASK, 0, NULL, 0, NULL);
if(IS_ERR(new_task)) panic("do_fork failed in idle_thread");
cpu_tasks[cpu] = ((struct cpu_task)
{ .pid = new_task->thread.extern_pid,
.task = new_task } );
write(new_task->thread.switch_pipe[1], &c, sizeof(c));
return(new_task);
}
void smp_boot_cpus(void)
void smp_prepare_cpus(unsigned int maxcpus)
{
int err;
struct task_struct *idle;
unsigned long waittime;
int err, cpu;
set_bit(0, &cpu_online_map);
set_bit(0, &smp_callin_map);
......@@ -164,46 +160,32 @@ void smp_boot_cpus(void)
activate_ipi(cpu_data[0].ipi_pipe[0], current->thread.extern_pid);
if(ncpus < 1){
printk(KERN_INFO "ncpus set to 1\n");
ncpus = 1;
}
else if(ncpus > NR_CPUS){
printk(KERN_INFO
"ncpus can't be greater than NR_CPUS, set to %d\n",
NR_CPUS);
ncpus = NR_CPUS;
}
for(cpu = 1; cpu < ncpus; cpu++){
printk("Booting processor %d...\n", cpu);
if(ncpus > 1){
int i, pid;
idle = idle_thread(cpu);
printk(KERN_INFO "Starting up other processors:\n");
for(i=1;i<ncpus;i++){
int waittime;
/* Do this early, for hard_smp_processor_id() */
cpu_tasks[i].pid = -1;
set_bit(i, &cpu_online_map);
pid = idle_thread(idle_proc, i);
printk(KERN_INFO "\t#%d - idle thread pid = %d.. ",
i, pid);
init_idle(idle, cpu);
unhash_process(idle);
waittime = 200000000;
while (waittime-- && !test_bit(i, &smp_callin_map))
while (waittime-- && !test_bit(cpu, &smp_callin_map))
cpu_relax();
if (test_bit(i, &smp_callin_map))
printk("online\n");
else {
printk("failed\n");
clear_bit(i, &cpu_online_map);
}
}
if (test_bit(cpu, &smp_callin_map))
printk("done\n");
else printk("failed\n");
}
}
int __cpu_up(unsigned int cpu)
{
set_bit(cpu, &smp_commenced_mask);
while (!test_bit(cpu, &cpu_online_map))
mb();
return(0);
}
int setup_profiling_timer(unsigned int multiplier)
{
printk(KERN_INFO "setup_profiling_timer\n");
......@@ -225,7 +207,13 @@ void IPI_handler(int cpu)
break;
case 'R':
current->need_resched = 1;
set_tsk_need_resched(current);
break;
case 'S':
printk("CPU#%d stopping\n", cpu);
while(1)
pause();
break;
default:
......@@ -269,7 +257,8 @@ int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic,
info = _info;
for (i=0;i<NR_CPUS;i++)
if (i != current->processor && test_bit(i, &cpu_online_map))
if((i != current->thread_info->cpu) &&
test_bit(i, &cpu_online_map))
write(cpu_data[i].ipi_pipe[1], "C", 1);
while (atomic_read(&scf_started) != cpus)
......
......@@ -384,6 +384,7 @@ static int check_bogosity(struct pt_regs *regs)
return(0);
}
/* Unlocked, I don't care if this is a bit off */
int nsyscalls = 0;
extern syscall_handler_t *sys_call_table[];
......@@ -417,14 +418,18 @@ long execute_syscall(void *r)
spinlock_t syscall_lock = SPIN_LOCK_UNLOCKED;
void lock_syscall(void)
{
spin_lock(&syscall_lock);
}
static int syscall_index = 0;
void unlock_syscall(void)
int next_syscall_index(int limit)
{
int ret;
spin_lock(&syscall_lock);
ret = syscall_index;
if(++syscall_index == limit)
syscall_index = 0;
spin_unlock(&syscall_lock);
return(ret);
}
/*
......
......@@ -34,21 +34,14 @@ struct {
struct timeval end;
} syscall_record[1024];
int syscall_index = 0;
extern int kern_timer_on;
void syscall_handler(int sig, struct uml_pt_regs *regs)
{
void *sc;
long result;
int index, syscall;
int index, max, syscall;
lock_syscall();
if(syscall_index == 1024) syscall_index = 0;
index = syscall_index;
syscall_index++;
unlock_syscall();
max = sizeof(syscall_record)/sizeof(syscall_record[0]);
index = next_syscall_index(max);
syscall = regs->syscall;
sc = regs->sc;
......
......@@ -14,33 +14,15 @@
#include "user.h"
#include "process.h"
#include "signal_user.h"
#include "time_user.h"
extern struct timeval xtime;
void timer_handler(int sig, struct uml_pt_regs *regs)
{
timer_irq(regs);
}
void timer(void)
{
gettimeofday(&xtime, NULL);
}
static struct itimerval profile_interval;
void get_profile_timer(void)
{
getitimer(ITIMER_PROF, &profile_interval);
profile_interval.it_value = profile_interval.it_interval;
}
void disable_profile_timer(void)
{
struct itimerval interval = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
setitimer(ITIMER_PROF, &interval, NULL);
}
static void set_interval(int timer_type)
{
struct itimerval interval;
......@@ -53,6 +35,15 @@ static void set_interval(int timer_type)
panic("setitimer failed - errno = %d\n", errno);
}
void enable_timer(void)
{
struct itimerval enable = ((struct itimerval) { { 0, 1000000/hz() },
{ 0, 1000000/hz() }});
if(setitimer(ITIMER_VIRTUAL, &enable, NULL))
printk("enable_timer - setitimer failed, errno = %d\n",
errno);
}
void switch_timers(int to_real)
{
struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
......@@ -79,8 +70,9 @@ void idle_timer(void)
{
if(signal(SIGVTALRM, SIG_IGN) == SIG_ERR)
panic("Couldn't unset SIGVTALRM handler");
set_handler(SIGALRM, (__sighandler_t) alarm_handler,
SA_NODEFER | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, -1);
SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGVTALRM, -1);
set_interval(ITIMER_REAL);
}
......@@ -98,28 +90,24 @@ void time_init(void)
set_interval(ITIMER_VIRTUAL);
}
void set_timers(int set_signal)
{
if(set_signal)
set_interval(ITIMER_VIRTUAL);
if(setitimer(ITIMER_PROF, &profile_interval, NULL) == -1)
panic("setitimer ITIMER_PROF failed - errno = %d\n", errno);
}
struct timeval local_offset = { 0, 0 };
void do_gettimeofday(struct timeval *tv)
{
time_lock();
gettimeofday(tv, NULL);
timeradd(tv, &local_offset, tv);
time_unlock();
}
void do_settimeofday(struct timeval *tv)
{
struct timeval now;
time_lock();
gettimeofday(&now, NULL);
timersub(tv, &now, &local_offset);
time_unlock();
}
void idle_sleep(int secs)
......
......@@ -27,21 +27,21 @@ int hz(void)
return(HZ);
}
/* Changed at early boot */
int timer_irq_inited = 0;
/* kern_timer_on and missed_ticks are modified after kernel memory has been
/* missed_ticks will be modified after kernel memory has been
* write-protected, so this puts it in a section which will be left
* write-enabled.
*/
int __attribute__ ((__section__ (".unprotected"))) kern_timer_on = 0;
int __attribute__ ((__section__ (".unprotected"))) missed_ticks = 0;
int __attribute__ ((__section__ (".unprotected"))) missed_ticks[NR_CPUS];
void timer_irq(struct uml_pt_regs *regs)
{
int ticks = missed_ticks;
int cpu = current->thread_info->cpu, ticks = missed_ticks[cpu];
if(!timer_irq_inited) return;
missed_ticks = 0;
missed_ticks[cpu] = 0;
while(ticks--) do_IRQ(TIMER_IRQ, regs);
}
......@@ -117,6 +117,27 @@ void __const_udelay(um_udelay_t usecs)
for(i=0;i<n;i++) ;
}
void timer_handler(int sig, struct uml_pt_regs *regs)
{
#ifdef CONFIG_SMP
update_process_times(user_context(UPT_SP(regs)));
#endif
if(current->thread_info->cpu == 0)
timer_irq(regs);
}
static spinlock_t timer_spinlock = SPIN_LOCK_UNLOCKED;
void time_lock(void)
{
spin_lock(&timer_spinlock);
}
void time_unlock(void)
{
spin_unlock(&timer_spinlock);
}
int __init timer_init(void)
{
int err;
......
......@@ -171,14 +171,18 @@ void trap_init(void)
spinlock_t trap_lock = SPIN_LOCK_UNLOCKED;
void lock_trap(void)
{
spin_lock(&trap_lock);
}
static int trap_index = 0;
void unlock_trap(void)
int next_trap_index(int limit)
{
int ret;
spin_lock(&trap_lock);
ret = trap_index;
if(++trap_index == limit)
trap_index = 0;
spin_unlock(&trap_lock);
return(ret);
}
extern int debugger_pid;
......@@ -209,6 +213,7 @@ static struct chan_opts opts = {
tramp_stack : 0,
};
/* Accessed by the tracing thread, which automatically serializes access */
static void *xterm_data;
static int xterm_fd;
......
......@@ -70,10 +70,10 @@ void kill_child_dead(int pid)
while(waitpid(pid, NULL, 0) > 0) kill(pid, SIGCONT);
}
/* Changed early in boot, and then only read */
int debug = 0;
int debug_stop = 1;
int debug_parent = 0;
int honeypot = 0;
static int signal_tramp(void *arg)
......@@ -90,7 +90,6 @@ static int signal_tramp(void *arg)
signal(SIGUSR1, SIG_IGN);
change_sig(SIGCHLD, 0);
signal(SIGSEGV, (__sighandler_t) sig_handler);
set_timers(0);
set_cmdline("(idle thread)");
set_init_pid(os_getpid());
proc = arg;
......@@ -143,33 +142,20 @@ static void sleeping_process_signal(int pid, int sig)
}
}
#ifdef CONFIG_SMP
#error need to make these arrays
#endif
/* Accessed only by the tracing thread */
int debugger_pid = -1;
int debugger_parent = -1;
int debugger_fd = -1;
int gdb_pid = -1;
struct {
unsigned long address;
int is_write;
int pid;
unsigned long sp;
int is_user;
} segfault_record[1024];
int segfault_index = 0;
struct {
int pid;
int signal;
unsigned long addr;
struct timeval time;
} signal_record[1024];
} signal_record[1024][32];
int signal_index = 0;
int signal_index[32];
int nsignals = 0;
int debug_trace = 0;
extern int io_nsignals, io_count, intr_count;
......@@ -260,36 +246,36 @@ int signals(int (*init_proc)(void *), void *sp)
if(WIFEXITED(status)) ;
#ifdef notdef
{
printk("Child %d exited with status %d\n", pid,
printf("Child %d exited with status %d\n", pid,
WEXITSTATUS(status));
}
#endif
else if(WIFSIGNALED(status)){
sig = WTERMSIG(status);
if(sig != 9){
printk("Child %d exited with signal %d\n", pid,
printf("Child %d exited with signal %d\n", pid,
sig);
}
}
else if(WIFSTOPPED(status)){
proc_id = pid_to_processor_id(pid);
sig = WSTOPSIG(status);
if(signal_index == 1024){
signal_index = 0;
if(signal_index[proc_id] == 1024){
signal_index[proc_id] = 0;
last_index = 1023;
}
else last_index = signal_index - 1;
else last_index = signal_index[proc_id] - 1;
if(((sig == SIGPROF) || (sig == SIGVTALRM) ||
(sig == SIGALRM)) &&
(signal_record[last_index].signal == sig) &&
(signal_record[last_index].pid == pid))
signal_index = last_index;
signal_record[signal_index].pid = pid;
gettimeofday(&signal_record[signal_index].time, NULL);
(signal_record[proc_id][last_index].signal == sig)&&
(signal_record[proc_id][last_index].pid == pid))
signal_index[proc_id] = last_index;
signal_record[proc_id][signal_index[proc_id]].pid = pid;
gettimeofday(&signal_record[proc_id][signal_index[proc_id]].time, NULL);
eip = ptrace(PTRACE_PEEKUSER, pid, PT_IP_OFFSET, 0);
signal_record[signal_index].addr = eip;
signal_record[signal_index++].signal = sig;
signal_record[proc_id][signal_index[proc_id]].addr = eip;
signal_record[proc_id][signal_index[proc_id]++].signal = sig;
proc_id = pid_to_processor_id(pid);
if(proc_id == -1){
sleeping_process_signal(pid, sig);
continue;
......@@ -414,22 +400,30 @@ __uml_setup("honeypot", uml_honeypot_setup,
" UML. This implies 'jail'.\n\n"
);
/* Unlocked - don't care if this is a bit off */
int nsegfaults = 0;
struct {
unsigned long address;
int is_write;
int pid;
unsigned long sp;
int is_user;
} segfault_record[1024];
void segv_handler(int sig, struct uml_pt_regs *regs)
{
struct sigcontext *context = regs->sc;
int index;
int index, max;
if(regs->is_user && !SEGV_IS_FIXABLE(context)){
bad_segv(SC_FAULT_ADDR(context), SC_IP(context),
SC_FAULT_WRITE(context));
return;
}
lock_trap();
index = segfault_index++;
if(segfault_index == 1024) segfault_index = 0;
unlock_trap();
max = sizeof(segfault_record)/sizeof(segfault_record[0]);
index = next_trap_index(max);
nsegfaults++;
segfault_record[index].address = SC_FAULT_ADDR(context);
segfault_record[index].pid = os_getpid();
......@@ -440,8 +434,6 @@ void segv_handler(int sig, struct uml_pt_regs *regs)
regs->is_user, context);
}
extern int kern_timer_on;
struct signal_info {
void (*handler)(int, struct uml_pt_regs *);
int is_irq;
......@@ -472,7 +464,7 @@ void sig_handler_common(int sig, struct sigcontext *sc)
{
struct uml_pt_regs save_regs, *r;
struct signal_info *info;
int save_errno = errno, save_timer = kern_timer_on, is_user;
int save_errno = errno, is_user;
unprotect_kernel_mem();
......@@ -489,7 +481,6 @@ void sig_handler_common(int sig, struct sigcontext *sc)
(*info->handler)(sig, r);
kern_timer_on = save_timer;
if(is_user){
interrupt_end();
block_signals();
......@@ -506,19 +497,15 @@ void sig_handler(int sig, struct sigcontext sc)
sig_handler_common(sig, &sc);
}
extern int timer_irq_inited, missed_ticks;
extern int jail_timer_off;
extern int timer_irq_inited, missed_ticks[];
void alarm_handler(int sig, struct sigcontext sc)
{
int user;
if(!timer_irq_inited) return;
missed_ticks++;
missed_ticks[cpu()]++;
user = user_context(SC_SP(&sc));
if(!user && !kern_timer_on) return;
if(!user && jail_timer_off) return;
if(sig == SIGALRM)
switch_timers(0);
......
......@@ -17,8 +17,8 @@
#define TTY_LOG_DIR "./"
char *tty_log_dir = TTY_LOG_DIR;
/* Set early in boot and then unchanged */
static char *tty_log_dir = TTY_LOG_DIR;
static int tty_log_fd = -1;
#define TTY_LOG_OPEN 1
......
......@@ -37,6 +37,11 @@
#define DEFAULT_COMMAND_LINE "root=6200"
struct cpuinfo_um boot_cpu_data = {
.loops_per_jiffy = 0,
.ipi_pipe = { -1, -1 }
};
unsigned long thread_saved_pc(struct task_struct *task)
{
return(os_process_pc(task->thread.extern_pid));
......@@ -119,6 +124,7 @@ static int start_kernel_proc(void *unused)
#define SIZE ((CONFIG_NEST_LEVEL + CONFIG_KERNEL_HALF_GIGS) * 0x20000000)
#define START (TOP - SIZE)
/* Set in main */
unsigned long host_task_size;
unsigned long task_size;
......@@ -129,17 +135,21 @@ void set_task_sizes(int arg)
task_size = START;
}
/* Set in early boot */
unsigned long uml_physmem;
unsigned long uml_reserved;
unsigned long start_vm;
unsigned long end_vm;
int ncpus = 1;
/* Pointer set in linux_main, the array itself is private to each thread,
* and changed at address space creation time so this poses no concurrency
* problems.
*/
static char *argv1_begin = NULL;
static char *argv1_end = NULL;
/* Set in early boot */
static int have_root __initdata = 0;
long physmem_size = 32 * 1024 * 1024;
......@@ -258,8 +268,9 @@ static void __init uml_postsetup(void)
}
extern int debug_trace;
unsigned long brk_start;
/* Set during early boot */
unsigned long brk_start;
static struct vm_reserved kernel_vm_reserved;
#define MIN_VMALLOC (32 * 1024 * 1024)
......@@ -366,18 +377,6 @@ void __init check_bugs(void)
check_sigio();
}
spinlock_t pid_lock = SPIN_LOCK_UNLOCKED;
void lock_pid(void)
{
spin_lock(&pid_lock);
}
void unlock_pid(void)
{
spin_unlock(&pid_lock);
}
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
......
......@@ -21,9 +21,13 @@
#define UMID_LEN 64
#define UML_DIR "~/.uml/"
/* Changed by set_umid and make_umid, which are run early in boot */
static char umid[UMID_LEN] = { 0 };
/* Changed by set_uml_dir and make_uml_dir, which are run early in boot */
static char *uml_dir = UML_DIR;
/* Changed by set_umid */
static int umid_is_random = 1;
static int umid_inited = 0;
......
......@@ -32,6 +32,7 @@
#define COMMAND_LINE_SIZE _POSIX_ARG_MAX
/* Changed in linux_main and setup_arch, which run before SMP is started */
char saved_command_line[COMMAND_LINE_SIZE] = { 0 };
char command_line[COMMAND_LINE_SIZE] = { 0 };
......
......@@ -18,15 +18,22 @@
#include "user.h"
#include "init.h"
/* Set in set_stklim, which is called from main and __wrap_malloc.
* __wrap_malloc only calls it if main hasn't started.
*/
unsigned long stacksizelim;
/* Set in main */
char *linux_prog;
#define PGD_BOUND (4 * 1024 * 1024)
#define STACKSIZE (8 * 1024 * 1024)
#define THREAD_NAME_LEN (256)
char padding[THREAD_NAME_LEN] = { [ 0 ... THREAD_NAME_LEN - 2] = ' ', '\0' };
/* Never changed */
static char padding[THREAD_NAME_LEN] = {
[ 0 ... THREAD_NAME_LEN - 2] = ' ', '\0'
};
static void set_stklim(void)
{
......@@ -129,7 +136,8 @@ int main(int argc, char **argv, char **envp)
return(uml_exitcode);
}
int allocating_monbuf = 0;
/* Changed in __wrap___monstartup and __wrap_malloc very early */
static int allocating_monbuf = 0;
#ifdef PROFILING
extern void __real___monstartup (unsigned long, unsigned long);
......@@ -146,6 +154,7 @@ void __wrap___monstartup (unsigned long lowpc, unsigned long highpc)
extern void *__real_malloc(int);
extern unsigned long host_task_size;
/* Set in __wrap_malloc early */
static void *gmon_buf = NULL;
void *__wrap_malloc(int size)
......
......@@ -123,6 +123,7 @@ int debugger_syscall(debugger_state *debugger, pid_t child)
return(0);
}
/* Used by the tracing thread */
static debugger_state parent;
static int parent_syscall(debugger_state *debugger, int pid);
......@@ -175,10 +176,7 @@ void debugger_cancelled_return(debugger_state *debugger, int result)
syscall_continue(debugger->pid);
}
#ifdef CONFIG_SMP
#error need to make these arrays
#endif
/* Used by the tracing thread */
static debugger_state debugger;
static debugee_state debugee;
......
......@@ -15,6 +15,7 @@
#define MAXTOKEN 64
/* Set during early boot */
int cpu_has_cmov = 1;
int cpu_has_xmm = 0;
......
......@@ -59,6 +59,7 @@ static void read_debugregs(int pid, unsigned long *regs)
}
}
/* Accessed only by the tracing thread */
static unsigned long kernel_debugregs[8] = { [ 0 ... 7 ] = 0 };
static int debugregs_seq = 0;
......
......@@ -2,9 +2,6 @@
#include "linux/stddef.h" // for NULL
#include "linux/elf.h" // for AT_NULL
/* unsigned int local_bh_count[NR_CPUS]; */
unsigned long isa_io_base = 0;
/* The following function nicked from arch/ppc/kernel/process.c and
* adapted slightly */
/*
......
......@@ -67,6 +67,9 @@ SECTIONS
__setup_start = .;
.setup.init : { *(.setup.init) }
__setup_end = .;
__per_cpu_start = . ;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = . ;
__initcall_start = .;
.initcall.init : {
*(.initcall1.init)
......
#ifndef __UM_CACHE_H
#define __UM_CACHE_H
/* These are x86 numbers */
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#endif
......@@ -6,15 +6,30 @@ extern unsigned long cpu_online_map;
#ifdef CONFIG_SMP
#include "linux/config.h"
#include "linux/bitops.h"
#include "asm/current.h"
#define smp_processor_id() (current->processor)
#define smp_processor_id() (current->thread_info->cpu)
#define cpu_logical_map(n) (n)
#define cpu_number_map(n) (n)
#define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */
extern int hard_smp_processor_id(void);
#define NO_PROC_ID -1
#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
extern int ncpus;
#define cpu_possible(cpu) (cpu < ncpus)
extern inline unsigned int num_online_cpus(void)
{
return(hweight32(cpu_online_map));
}
extern inline void smp_cpus_done(unsigned int maxcpus)
{
}
#endif
#endif
......@@ -64,10 +64,14 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
* TIF_NEED_RESCHED
*/
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment