Commit b551f67a authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/export
parents 2576db03 8da730e9
...@@ -480,7 +480,7 @@ void __init cpu_init (void) ...@@ -480,7 +480,7 @@ void __init cpu_init (void)
*/ */
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm; current->active_mm = &init_mm;
if(current->mm) if (current->mm)
BUG(); BUG();
enter_lazy_tlb(&init_mm, current, cpu); enter_lazy_tlb(&init_mm, current, cpu);
...@@ -508,7 +508,7 @@ void __init cpu_init (void) ...@@ -508,7 +508,7 @@ void __init cpu_init (void)
/* /*
* Force FPU initialization: * Force FPU initialization:
*/ */
clear_thread_flag(TIF_USEDFPU); current_thread_info()->status = 0;
current->used_math = 0; current->used_math = 0;
stts(); stts();
} }
...@@ -54,9 +54,11 @@ void init_fpu(struct task_struct *tsk) ...@@ -54,9 +54,11 @@ void init_fpu(struct task_struct *tsk)
void kernel_fpu_begin(void) void kernel_fpu_begin(void)
{ {
struct thread_info *thread = current_thread_info();
preempt_disable(); preempt_disable();
if (test_thread_flag(TIF_USEDFPU)) { if (thread->status & TS_USEDFPU) {
__save_init_fpu(current); __save_init_fpu(thread->task);
return; return;
} }
clts(); clts();
......
...@@ -275,6 +275,15 @@ void release_thread(struct task_struct *dead_task) ...@@ -275,6 +275,15 @@ void release_thread(struct task_struct *dead_task)
release_x86_irqs(dead_task); release_x86_irqs(dead_task);
} }
/*
* This gets called before we allocate a new thread and copy
* the current task into it.
*/
void prepare_to_copy(struct task_struct *tsk)
{
unlazy_fpu(tsk);
}
int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
unsigned long unused, unsigned long unused,
struct task_struct * p, struct pt_regs * regs) struct task_struct * p, struct pt_regs * regs)
...@@ -297,9 +306,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, ...@@ -297,9 +306,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
savesegment(gs,p->thread.gs); savesegment(gs,p->thread.gs);
tsk = current; tsk = current;
unlazy_fpu(tsk);
struct_cpy(&p->thread.i387, &tsk->thread.i387);
if (unlikely(NULL != tsk->thread.ts_io_bitmap)) { if (unlikely(NULL != tsk->thread.ts_io_bitmap)) {
p->thread.ts_io_bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); p->thread.ts_io_bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
if (!p->thread.ts_io_bitmap) if (!p->thread.ts_io_bitmap)
......
...@@ -737,13 +737,14 @@ asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs, ...@@ -737,13 +737,14 @@ asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
*/ */
asmlinkage void math_state_restore(struct pt_regs regs) asmlinkage void math_state_restore(struct pt_regs regs)
{ {
struct task_struct *tsk = current; struct thread_info *thread = current_thread_info();
clts(); /* Allow maths ops (or we recurse) */ struct task_struct *tsk = thread->task;
clts(); /* Allow maths ops (or we recurse) */
if (!tsk->used_math) if (!tsk->used_math)
init_fpu(tsk); init_fpu(tsk);
restore_fpu(tsk); restore_fpu(tsk);
set_thread_flag(TIF_USEDFPU); /* So we fnsave on switch_to() */ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
} }
#ifndef CONFIG_MATH_EMULATION #ifndef CONFIG_MATH_EMULATION
......
...@@ -83,6 +83,8 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset) ...@@ -83,6 +83,8 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
return NULL; return NULL;
memset(sglist, 0, sizeof(*sglist) * nr_pages); memset(sglist, 0, sizeof(*sglist) * nr_pages);
if (NULL == pages[0])
goto nopage;
if (PageHighMem(pages[0])) if (PageHighMem(pages[0]))
/* DMA to highmem pages might not work */ /* DMA to highmem pages might not work */
goto highmem; goto highmem;
...@@ -118,7 +120,7 @@ int videobuf_lock(struct page **pages, int nr_pages) ...@@ -118,7 +120,7 @@ int videobuf_lock(struct page **pages, int nr_pages)
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++)
if (TryLockPage(pages[i])) if (TryLockPage(pages[i]))
goto err; goto err;
dprintk(2,"lock ok\n"); dprintk(2,"lock ok [%d pages]\n",nr_pages);
return 0; return 0;
err: err:
...@@ -136,7 +138,7 @@ int videobuf_unlock(struct page **pages, int nr_pages) ...@@ -136,7 +138,7 @@ int videobuf_unlock(struct page **pages, int nr_pages)
dprintk(2,"unlock start ...\n"); dprintk(2,"unlock start ...\n");
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++)
unlock_page(pages[i]); unlock_page(pages[i]);
dprintk(2,"unlock ok\n"); dprintk(2,"unlock ok [%d pages]\n",nr_pages);
return 0; return 0;
} }
...@@ -270,7 +272,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma) ...@@ -270,7 +272,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
/* --------------------------------------------------------------------- */ /* --------------------------------------------------------------------- */
void* videobuf_alloc(int size) void* videobuf_alloc(unsigned int size)
{ {
struct videobuf_buffer *vb; struct videobuf_buffer *vb;
...@@ -340,7 +342,7 @@ videobuf_queue_init(struct videobuf_queue *q, ...@@ -340,7 +342,7 @@ videobuf_queue_init(struct videobuf_queue *q,
spinlock_t *irqlock, spinlock_t *irqlock,
enum v4l2_buf_type type, enum v4l2_buf_type type,
enum v4l2_field field, enum v4l2_field field,
int msize) unsigned int msize)
{ {
memset(q,0,sizeof(*q)); memset(q,0,sizeof(*q));
...@@ -417,11 +419,11 @@ videobuf_next_field(struct videobuf_queue *q) ...@@ -417,11 +419,11 @@ videobuf_next_field(struct videobuf_queue *q)
if (V4L2_FIELD_ALTERNATE == field) { if (V4L2_FIELD_ALTERNATE == field) {
if (V4L2_FIELD_TOP == q->last) { if (V4L2_FIELD_TOP == q->last) {
field = V4L2_FIELD_TOP;
q->last = V4L2_FIELD_TOP;
} else {
field = V4L2_FIELD_BOTTOM; field = V4L2_FIELD_BOTTOM;
q->last = V4L2_FIELD_BOTTOM; q->last = V4L2_FIELD_BOTTOM;
} else {
field = V4L2_FIELD_TOP;
q->last = V4L2_FIELD_TOP;
} }
} }
return field; return field;
...@@ -463,7 +465,8 @@ int ...@@ -463,7 +465,8 @@ int
videobuf_reqbufs(struct file *file, struct videobuf_queue *q, videobuf_reqbufs(struct file *file, struct videobuf_queue *q,
struct v4l2_requestbuffers *req) struct v4l2_requestbuffers *req)
{ {
int size,count,retval; unsigned int size,count;
int retval;
if (req->type != q->type) if (req->type != q->type)
return -EINVAL; return -EINVAL;
...@@ -477,6 +480,8 @@ videobuf_reqbufs(struct file *file, struct videobuf_queue *q, ...@@ -477,6 +480,8 @@ videobuf_reqbufs(struct file *file, struct videobuf_queue *q,
size = 0; size = 0;
q->ops->buf_setup(file,&count,&size); q->ops->buf_setup(file,&count,&size);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
dprintk(1,"reqbufs: bufs=%d, size=0x%x [%d pages total]\n",
count, size, (count*size)>>PAGE_SHIFT);
retval = videobuf_mmap_setup(file,q,count,size); retval = videobuf_mmap_setup(file,q,count,size);
if (retval < 0) if (retval < 0)
...@@ -660,7 +665,10 @@ videobuf_read_zerocopy(struct file *file, struct videobuf_queue *q, ...@@ -660,7 +665,10 @@ videobuf_read_zerocopy(struct file *file, struct videobuf_queue *q,
retval = videobuf_waiton(q->read_buf,0,0); retval = videobuf_waiton(q->read_buf,0,0);
if (0 == retval) { if (0 == retval) {
videobuf_dma_pci_sync(q->pci,&q->read_buf->dma); videobuf_dma_pci_sync(q->pci,&q->read_buf->dma);
retval = q->read_buf->size; if (STATE_ERROR == q->read_buf->state)
retval = -EIO;
else
retval = q->read_buf->size;
} }
done: done:
...@@ -676,7 +684,8 @@ ssize_t videobuf_read_one(struct file *file, struct videobuf_queue *q, ...@@ -676,7 +684,8 @@ ssize_t videobuf_read_one(struct file *file, struct videobuf_queue *q,
{ {
enum v4l2_field field; enum v4l2_field field;
unsigned long flags; unsigned long flags;
int retval, bytes, size, nbufs; unsigned size, nbufs, bytes;
int retval;
down(&q->lock); down(&q->lock);
...@@ -686,7 +695,7 @@ ssize_t videobuf_read_one(struct file *file, struct videobuf_queue *q, ...@@ -686,7 +695,7 @@ ssize_t videobuf_read_one(struct file *file, struct videobuf_queue *q,
count >= size && count >= size &&
!(file->f_flags & O_NONBLOCK)) { !(file->f_flags & O_NONBLOCK)) {
retval = videobuf_read_zerocopy(file,q,data,count,ppos); retval = videobuf_read_zerocopy(file,q,data,count,ppos);
if (retval >= 0) if (retval >= 0 || retval == -EIO)
/* ok, all done */ /* ok, all done */
goto done; goto done;
/* fallback to kernel bounce buffer on failures */ /* fallback to kernel bounce buffer on failures */
...@@ -714,6 +723,15 @@ ssize_t videobuf_read_one(struct file *file, struct videobuf_queue *q, ...@@ -714,6 +723,15 @@ ssize_t videobuf_read_one(struct file *file, struct videobuf_queue *q,
goto done; goto done;
videobuf_dma_pci_sync(q->pci,&q->read_buf->dma); videobuf_dma_pci_sync(q->pci,&q->read_buf->dma);
if (STATE_ERROR == q->read_buf->state) {
/* catch I/O errors */
q->ops->buf_release(file,q->read_buf);
kfree(q->read_buf);
q->read_buf = NULL;
retval = -EIO;
goto done;
}
/* copy to userspace */ /* copy to userspace */
bytes = count; bytes = count;
if (bytes > q->read_buf->size - q->read_off) if (bytes > q->read_buf->size - q->read_off)
...@@ -788,8 +806,8 @@ ssize_t videobuf_read_stream(struct file *file, struct videobuf_queue *q, ...@@ -788,8 +806,8 @@ ssize_t videobuf_read_stream(struct file *file, struct videobuf_queue *q,
char *data, size_t count, loff_t *ppos, char *data, size_t count, loff_t *ppos,
int vbihack) int vbihack)
{ {
unsigned int *fc; unsigned int *fc, bytes;
int err, bytes, retval; int err, retval;
unsigned long flags; unsigned long flags;
down(&q->lock); down(&q->lock);
...@@ -968,9 +986,10 @@ static struct vm_operations_struct videobuf_vm_ops = ...@@ -968,9 +986,10 @@ static struct vm_operations_struct videobuf_vm_ops =
}; };
int videobuf_mmap_setup(struct file *file, struct videobuf_queue *q, int videobuf_mmap_setup(struct file *file, struct videobuf_queue *q,
int bcount, int bsize) unsigned int bcount, unsigned int bsize)
{ {
int i,err; unsigned int i;
int err;
err = videobuf_mmap_free(file,q); err = videobuf_mmap_free(file,q);
if (0 != err) if (0 != err)
...@@ -1008,7 +1027,8 @@ int videobuf_mmap_mapper(struct vm_area_struct *vma, ...@@ -1008,7 +1027,8 @@ int videobuf_mmap_mapper(struct vm_area_struct *vma,
struct videobuf_queue *q) struct videobuf_queue *q)
{ {
struct videobuf_mapping *map; struct videobuf_mapping *map;
int first,last,size,i,retval; unsigned int first,last,size,i;
int retval;
down(&q->lock); down(&q->lock);
retval = -EINVAL; retval = -EINVAL;
...@@ -1025,7 +1045,7 @@ int videobuf_mmap_mapper(struct vm_area_struct *vma, ...@@ -1025,7 +1045,7 @@ int videobuf_mmap_mapper(struct vm_area_struct *vma,
for (first = 0; first < VIDEO_MAX_FRAME; first++) { for (first = 0; first < VIDEO_MAX_FRAME; first++) {
if (NULL == q->bufs[first]) if (NULL == q->bufs[first])
continue; continue;
if (q->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT)) if (q->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
break; break;
} }
if (VIDEO_MAX_FRAME == first) { if (VIDEO_MAX_FRAME == first) {
......
...@@ -110,7 +110,7 @@ struct videobuf_buffer; ...@@ -110,7 +110,7 @@ struct videobuf_buffer;
struct videobuf_queue; struct videobuf_queue;
struct videobuf_mapping { struct videobuf_mapping {
int count; unsigned int count;
int highmem_ok; int highmem_ok;
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
...@@ -128,19 +128,19 @@ enum videobuf_state { ...@@ -128,19 +128,19 @@ enum videobuf_state {
}; };
struct videobuf_buffer { struct videobuf_buffer {
int i; unsigned int i;
/* info about the buffer */ /* info about the buffer */
int width; unsigned int width;
int height; unsigned int height;
long size; unsigned long size;
enum v4l2_field field; enum v4l2_field field;
enum videobuf_state state; enum videobuf_state state;
struct videobuf_dmabuf dma; struct videobuf_dmabuf dma;
struct list_head stream; /* QBUF/DQBUF list */ struct list_head stream; /* QBUF/DQBUF list */
/* for mmap'ed buffers */ /* for mmap'ed buffers */
off_t boff; /* buffer offset (mmap) */ size_t boff; /* buffer offset (mmap) */
size_t bsize; /* buffer size */ size_t bsize; /* buffer size */
unsigned long baddr; /* buffer addr (userland ptr!) */ unsigned long baddr; /* buffer addr (userland ptr!) */
struct videobuf_mapping *map; struct videobuf_mapping *map;
...@@ -148,12 +148,13 @@ struct videobuf_buffer { ...@@ -148,12 +148,13 @@ struct videobuf_buffer {
/* touched by irq handler */ /* touched by irq handler */
struct list_head queue; struct list_head queue;
wait_queue_head_t done; wait_queue_head_t done;
int field_count; unsigned int field_count;
struct timeval ts; struct timeval ts;
}; };
struct videobuf_queue_ops { struct videobuf_queue_ops {
int (*buf_setup)(struct file *file, int *count, int *size); int (*buf_setup)(struct file *file,
unsigned int *count, unsigned int *size);
int (*buf_prepare)(struct file *file,struct videobuf_buffer *vb, int (*buf_prepare)(struct file *file,struct videobuf_buffer *vb,
enum v4l2_field field); enum v4l2_field field);
void (*buf_queue)(struct file *file,struct videobuf_buffer *vb); void (*buf_queue)(struct file *file,struct videobuf_buffer *vb);
...@@ -166,23 +167,23 @@ struct videobuf_queue { ...@@ -166,23 +167,23 @@ struct videobuf_queue {
struct pci_dev *pci; struct pci_dev *pci;
enum v4l2_buf_type type; enum v4l2_buf_type type;
int msize; unsigned int msize;
enum v4l2_field field; enum v4l2_field field;
enum v4l2_field last; /* for field=V4L2_FIELD_ALTERNATE */ enum v4l2_field last; /* for field=V4L2_FIELD_ALTERNATE */
struct videobuf_buffer *bufs[VIDEO_MAX_FRAME]; struct videobuf_buffer *bufs[VIDEO_MAX_FRAME];
struct videobuf_queue_ops *ops; struct videobuf_queue_ops *ops;
/* capture via mmap() + ioctl(QBUF/DQBUF) */ /* capture via mmap() + ioctl(QBUF/DQBUF) */
int streaming; unsigned int streaming;
struct list_head stream; struct list_head stream;
/* capture via read() */ /* capture via read() */
int reading; unsigned int reading;
int read_off; unsigned int read_off;
struct videobuf_buffer *read_buf; struct videobuf_buffer *read_buf;
}; };
void* videobuf_alloc(int size); void* videobuf_alloc(unsigned int size);
int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr); int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr);
int videobuf_iolock(struct pci_dev *pci, struct videobuf_buffer *vb); int videobuf_iolock(struct pci_dev *pci, struct videobuf_buffer *vb);
...@@ -190,7 +191,8 @@ void videobuf_queue_init(struct videobuf_queue *q, ...@@ -190,7 +191,8 @@ void videobuf_queue_init(struct videobuf_queue *q,
struct videobuf_queue_ops *ops, struct videobuf_queue_ops *ops,
struct pci_dev *pci, spinlock_t *irqlock, struct pci_dev *pci, spinlock_t *irqlock,
enum v4l2_buf_type type, enum v4l2_buf_type type,
enum v4l2_field field, int msize); enum v4l2_field field,
unsigned int msize);
int videobuf_queue_is_busy(struct videobuf_queue *q); int videobuf_queue_is_busy(struct videobuf_queue *q);
void videobuf_queue_cancel(struct file *file, struct videobuf_queue *q); void videobuf_queue_cancel(struct file *file, struct videobuf_queue *q);
...@@ -218,7 +220,7 @@ unsigned int videobuf_poll_stream(struct file *file, ...@@ -218,7 +220,7 @@ unsigned int videobuf_poll_stream(struct file *file,
poll_table *wait); poll_table *wait);
int videobuf_mmap_setup(struct file *file, struct videobuf_queue *q, int videobuf_mmap_setup(struct file *file, struct videobuf_queue *q,
int bcount, int bsize); unsigned int bcount, unsigned int bsize);
int videobuf_mmap_free(struct file *file, struct videobuf_queue *q); int videobuf_mmap_free(struct file *file, struct videobuf_queue *q);
int videobuf_mmap_mapper(struct vm_area_struct *vma, int videobuf_mmap_mapper(struct vm_area_struct *vma,
struct videobuf_queue *q); struct videobuf_queue *q);
......
...@@ -51,6 +51,9 @@ extern void start_thread(struct pt_regs *, unsigned long, unsigned long); ...@@ -51,6 +51,9 @@ extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/* Create a kernel thread without removing it from tasklists. */ /* Create a kernel thread without removing it from tasklists. */
extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
......
...@@ -62,6 +62,9 @@ struct task_struct; ...@@ -62,6 +62,9 @@ struct task_struct;
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier() #define cpu_relax() barrier()
......
...@@ -123,6 +123,9 @@ static inline void release_thread(struct task_struct *dead_task) ...@@ -123,6 +123,9 @@ static inline void release_thread(struct task_struct *dead_task)
/* Nothing needs to be done. */ /* Nothing needs to be done. */
} }
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/* /*
* Return saved PC of a blocked thread. * Return saved PC of a blocked thread.
*/ */
......
...@@ -36,7 +36,7 @@ static inline void __save_init_fpu( struct task_struct *tsk ) ...@@ -36,7 +36,7 @@ static inline void __save_init_fpu( struct task_struct *tsk )
asm volatile( "fnsave %0 ; fwait" asm volatile( "fnsave %0 ; fwait"
: "=m" (tsk->thread.i387.fsave) ); : "=m" (tsk->thread.i387.fsave) );
} }
tsk->thread_info->flags &= ~_TIF_USEDFPU; tsk->thread_info->status &= ~TS_USEDFPU;
} }
static inline void save_init_fpu( struct task_struct *tsk ) static inline void save_init_fpu( struct task_struct *tsk )
...@@ -47,15 +47,15 @@ static inline void save_init_fpu( struct task_struct *tsk ) ...@@ -47,15 +47,15 @@ static inline void save_init_fpu( struct task_struct *tsk )
#define unlazy_fpu( tsk ) do { \ #define unlazy_fpu( tsk ) do { \
if ((tsk)->thread_info->flags & _TIF_USEDFPU) \ if ((tsk)->thread_info->status & TS_USEDFPU) \
save_init_fpu( tsk ); \ save_init_fpu( tsk ); \
} while (0) } while (0)
#define clear_fpu( tsk ) \ #define clear_fpu( tsk ) \
do { \ do { \
if ((tsk)->thread_info->flags & _TIF_USEDFPU) { \ if ((tsk)->thread_info->status & TS_USEDFPU) { \
asm volatile("fwait"); \ asm volatile("fwait"); \
(tsk)->thread_info->flags &= ~_TIF_USEDFPU; \ (tsk)->thread_info->status &= ~TS_USEDFPU; \
stts(); \ stts(); \
} \ } \
} while (0) } while (0)
......
...@@ -446,6 +446,10 @@ struct mm_struct; ...@@ -446,6 +446,10 @@ struct mm_struct;
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
extern void prepare_to_copy(struct task_struct *tsk);
/* /*
* create a kernel thread without removing it from tasklists * create a kernel thread without removing it from tasklists
*/ */
......
...@@ -25,6 +25,7 @@ struct thread_info { ...@@ -25,6 +25,7 @@ struct thread_info {
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */ struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */ unsigned long flags; /* low level flags */
unsigned long status; /* thread-synchronous flags */
__u32 cpu; /* current CPU */ __u32 cpu; /* current CPU */
__s32 preempt_count; /* 0 => preemptable, <0 => BUG */ __s32 preempt_count; /* 0 => preemptable, <0 => BUG */
...@@ -43,10 +44,11 @@ struct thread_info { ...@@ -43,10 +44,11 @@ struct thread_info {
#define TI_TASK 0x00000000 #define TI_TASK 0x00000000
#define TI_EXEC_DOMAIN 0x00000004 #define TI_EXEC_DOMAIN 0x00000004
#define TI_FLAGS 0x00000008 #define TI_FLAGS 0x00000008
#define TI_CPU 0x0000000C #define TI_STATUS 0x0000000C
#define TI_PRE_COUNT 0x00000010 #define TI_CPU 0x00000010
#define TI_ADDR_LIMIT 0x00000014 #define TI_PRE_COUNT 0x00000014
#define TI_RESTART_BLOCK 0x0000018 #define TI_ADDR_LIMIT 0x00000018
#define TI_RESTART_BLOCK 0x000001C
#endif #endif
...@@ -111,8 +113,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -111,8 +113,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
#define TIF_IRET 5 /* return with iret */ #define TIF_IRET 5 /* return with iret */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
...@@ -120,12 +121,20 @@ static inline struct thread_info *current_thread_info(void) ...@@ -120,12 +121,20 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_IRET (1<<TIF_IRET) #define _TIF_IRET (1<<TIF_IRET)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
/*
* Thread-synchronous status.
*
* This is different from the flags in that nobody else
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */ #endif /* _ASM_THREAD_INFO_H */
...@@ -344,6 +344,9 @@ struct task_struct; ...@@ -344,6 +344,9 @@ struct task_struct;
# define release_thread(dead_task) # define release_thread(dead_task)
#endif #endif
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/* /*
* This is the mechanism for creating a new kernel thread. * This is the mechanism for creating a new kernel thread.
* *
......
...@@ -112,6 +112,9 @@ static inline void release_thread(struct task_struct *dead_task) ...@@ -112,6 +112,9 @@ static inline void release_thread(struct task_struct *dead_task)
{ {
} }
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* /*
......
...@@ -108,6 +108,9 @@ static inline void release_thread(struct task_struct *dead_task) ...@@ -108,6 +108,9 @@ static inline void release_thread(struct task_struct *dead_task)
{ {
} }
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* /*
......
...@@ -208,6 +208,9 @@ struct thread_struct { ...@@ -208,6 +208,9 @@ struct thread_struct {
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0) #define release_thread(thread) do { } while(0)
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* /*
......
...@@ -231,6 +231,9 @@ struct thread_struct { ...@@ -231,6 +231,9 @@ struct thread_struct {
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0) #define release_thread(thread) do { } while(0)
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* /*
......
...@@ -290,6 +290,9 @@ struct mm_struct; ...@@ -290,6 +290,9 @@ struct mm_struct;
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern void map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm); extern void map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm);
static inline unsigned long get_wchan(struct task_struct *p) static inline unsigned long get_wchan(struct task_struct *p)
......
...@@ -608,6 +608,9 @@ struct task_struct; ...@@ -608,6 +608,9 @@ struct task_struct;
void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp); void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
void release_thread(struct task_struct *); void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/* /*
* Create a new kernel thread. * Create a new kernel thread.
*/ */
......
...@@ -601,6 +601,9 @@ struct task_struct; ...@@ -601,6 +601,9 @@ struct task_struct;
void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
void release_thread(struct task_struct *); void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/* /*
* Create a new kernel thread. * Create a new kernel thread.
*/ */
......
...@@ -114,6 +114,9 @@ struct mm_struct; ...@@ -114,6 +114,9 @@ struct mm_struct;
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/* /*
* Return saved PC of a blocked thread. * Return saved PC of a blocked thread.
*/ */
......
...@@ -129,6 +129,9 @@ struct mm_struct; ...@@ -129,6 +129,9 @@ struct mm_struct;
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/* /*
* Return saved PC of a blocked thread. * Return saved PC of a blocked thread.
*/ */
......
...@@ -134,6 +134,10 @@ struct mm_struct; ...@@ -134,6 +134,10 @@ struct mm_struct;
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/* /*
* create a kernel thread without removing it from tasklists * create a kernel thread without removing it from tasklists
*/ */
......
...@@ -139,6 +139,9 @@ extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc, ...@@ -139,6 +139,9 @@ extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc,
#define release_thread(tsk) do { } while(0) #define release_thread(tsk) do { } while(0)
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern unsigned long get_wchan(struct task_struct *); extern unsigned long get_wchan(struct task_struct *);
#define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc) #define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
......
...@@ -186,6 +186,9 @@ do { \ ...@@ -186,6 +186,9 @@ do { \
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
#define release_thread(tsk) do { } while (0) #define release_thread(tsk) do { } while (0)
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long get_wchan(struct task_struct *task); extern unsigned long get_wchan(struct task_struct *task);
......
...@@ -81,6 +81,9 @@ extern inline void release_thread (struct task_struct *dead_task) ...@@ -81,6 +81,9 @@ extern inline void release_thread (struct task_struct *dead_task)
{ {
} }
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern int kernel_thread (int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread (int (*fn)(void *), void * arg, unsigned long flags);
/* Free current thread data structures etc. */ /* Free current thread data structures etc. */
......
...@@ -267,6 +267,10 @@ struct mm_struct; ...@@ -267,6 +267,10 @@ struct mm_struct;
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
/* /*
* create a kernel thread without removing it from tasklists * create a kernel thread without removing it from tasklists
*/ */
......
...@@ -214,6 +214,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) ...@@ -214,6 +214,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
struct thread_info *ti; struct thread_info *ti;
int cpu = get_cpu(); int cpu = get_cpu();
prepare_to_copy(orig);
tsk = task_cache[cpu]; tsk = task_cache[cpu];
task_cache[cpu] = NULL; task_cache[cpu] = NULL;
put_cpu(); put_cpu();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment