Commit 37901802 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Paul Mackerras

[POWERPC] spusched: Switch from workqueues to kthread + timer tick

Get rid of the scheduler workqueues that complicated things a lot to
a dedicated spu scheduler thread that gets woken by a traditional
scheduler tick.  By default this scheduler tick runs a HZ * 10, aka
one spu scheduler tick for every 10 cpu ticks.

Currently the tick is not disabled when we have less context than
available spus, but I will implement this later.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarArnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent be703177
...@@ -56,7 +56,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ...@@ -56,7 +56,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
ctx->rt_priority = current->rt_priority; ctx->rt_priority = current->rt_priority;
ctx->policy = current->policy; ctx->policy = current->policy;
ctx->prio = current->prio; ctx->prio = current->prio;
INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick); ctx->time_slice = SPU_DEF_TIMESLICE;
goto out; goto out;
out_free: out_free:
kfree(ctx); kfree(ctx);
......
...@@ -144,7 +144,6 @@ static int spu_run_init(struct spu_context *ctx, u32 * npc) ...@@ -144,7 +144,6 @@ static int spu_run_init(struct spu_context *ctx, u32 * npc)
ctx->ops->runcntl_write(ctx, runcntl); ctx->ops->runcntl_write(ctx, runcntl);
} else { } else {
unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL; unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL;
spu_start_tick(ctx);
ctx->ops->npc_write(ctx, *npc); ctx->ops->npc_write(ctx, *npc);
if (test_thread_flag(TIF_SINGLESTEP)) if (test_thread_flag(TIF_SINGLESTEP))
mode = SPU_PRIVCNTL_MODE_SINGLE_STEP; mode = SPU_PRIVCNTL_MODE_SINGLE_STEP;
...@@ -160,7 +159,6 @@ static int spu_run_fini(struct spu_context *ctx, u32 * npc, ...@@ -160,7 +159,6 @@ static int spu_run_fini(struct spu_context *ctx, u32 * npc,
{ {
int ret = 0; int ret = 0;
spu_stop_tick(ctx);
*status = ctx->ops->status_read(ctx); *status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx); *npc = ctx->ops->npc_read(ctx);
spu_release(ctx); spu_release(ctx);
...@@ -330,10 +328,8 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, ...@@ -330,10 +328,8 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status); ret = spu_reacquire_runnable(ctx, npc, &status);
if (ret) { if (ret)
spu_stop_tick(ctx);
goto out2; goto out2;
}
continue; continue;
} }
ret = spu_process_events(ctx); ret = spu_process_events(ctx);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/kthread.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -45,6 +46,8 @@ ...@@ -45,6 +46,8 @@
#define SPU_TIMESLICE (HZ) #define SPU_TIMESLICE (HZ)
#define SPUSCHED_TICK (HZ / 100)
struct spu_prio_array { struct spu_prio_array {
DECLARE_BITMAP(bitmap, MAX_PRIO); DECLARE_BITMAP(bitmap, MAX_PRIO);
struct list_head runq[MAX_PRIO]; struct list_head runq[MAX_PRIO];
...@@ -54,7 +57,8 @@ struct spu_prio_array { ...@@ -54,7 +57,8 @@ struct spu_prio_array {
}; };
static struct spu_prio_array *spu_prio; static struct spu_prio_array *spu_prio;
static struct workqueue_struct *spu_sched_wq; static struct task_struct *spusched_task;
static struct timer_list spusched_timer;
static inline int node_allowed(int node) static inline int node_allowed(int node)
{ {
...@@ -68,31 +72,6 @@ static inline int node_allowed(int node) ...@@ -68,31 +72,6 @@ static inline int node_allowed(int node)
return 1; return 1;
} }
void spu_start_tick(struct spu_context *ctx)
{
if (ctx->policy == SCHED_RR) {
/*
* Make sure the exiting bit is cleared.
*/
clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
mb();
queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
}
}
void spu_stop_tick(struct spu_context *ctx)
{
if (ctx->policy == SCHED_RR) {
/*
* While the work can be rearming normally setting this flag
* makes sure it does not rearm itself anymore.
*/
set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
mb();
cancel_delayed_work(&ctx->sched_work);
}
}
/** /**
* spu_add_to_active_list - add spu to active list * spu_add_to_active_list - add spu to active list
* @spu: spu to add to the active list * @spu: spu to add to the active list
...@@ -104,6 +83,11 @@ static void spu_add_to_active_list(struct spu *spu) ...@@ -104,6 +83,11 @@ static void spu_add_to_active_list(struct spu *spu)
mutex_unlock(&spu_prio->active_mutex[spu->node]); mutex_unlock(&spu_prio->active_mutex[spu->node]);
} }
static void __spu_remove_from_active_list(struct spu *spu)
{
list_del_init(&spu->list);
}
/** /**
* spu_remove_from_active_list - remove spu from active list * spu_remove_from_active_list - remove spu from active list
* @spu: spu to remove from the active list * @spu: spu to remove from the active list
...@@ -113,7 +97,7 @@ static void spu_remove_from_active_list(struct spu *spu) ...@@ -113,7 +97,7 @@ static void spu_remove_from_active_list(struct spu *spu)
int node = spu->node; int node = spu->node;
mutex_lock(&spu_prio->active_mutex[node]); mutex_lock(&spu_prio->active_mutex[node]);
list_del_init(&spu->list); __spu_remove_from_active_list(spu);
mutex_unlock(&spu_prio->active_mutex[node]); mutex_unlock(&spu_prio->active_mutex[node]);
} }
...@@ -161,7 +145,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) ...@@ -161,7 +145,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu->timestamp = jiffies; spu->timestamp = jiffies;
spu_cpu_affinity_set(spu, raw_smp_processor_id()); spu_cpu_affinity_set(spu, raw_smp_processor_id());
spu_switch_notify(spu, ctx); spu_switch_notify(spu, ctx);
spu_add_to_active_list(spu);
ctx->state = SPU_STATE_RUNNABLE; ctx->state = SPU_STATE_RUNNABLE;
} }
...@@ -175,7 +158,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) ...@@ -175,7 +158,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
spu->pid, spu->number, spu->node); spu->pid, spu->number, spu->node);
spu_remove_from_active_list(spu);
spu_switch_notify(spu, NULL); spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx); spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu); spu_save(&ctx->csa, spu);
...@@ -312,6 +294,7 @@ static struct spu *find_victim(struct spu_context *ctx) ...@@ -312,6 +294,7 @@ static struct spu *find_victim(struct spu_context *ctx)
victim = NULL; victim = NULL;
goto restart; goto restart;
} }
spu_remove_from_active_list(spu);
spu_unbind_context(spu, victim); spu_unbind_context(spu, victim);
mutex_unlock(&victim->state_mutex); mutex_unlock(&victim->state_mutex);
/* /*
...@@ -354,6 +337,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) ...@@ -354,6 +337,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
spu = find_victim(ctx); spu = find_victim(ctx);
if (spu) { if (spu) {
spu_bind_context(spu, ctx); spu_bind_context(spu, ctx);
spu_add_to_active_list(spu);
return 0; return 0;
} }
...@@ -397,6 +381,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) ...@@ -397,6 +381,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
if (spu) { if (spu) {
new = grab_runnable_context(max_prio); new = grab_runnable_context(max_prio);
if (new || force) { if (new || force) {
spu_remove_from_active_list(spu);
spu_unbind_context(spu, ctx); spu_unbind_context(spu, ctx);
spu_free(spu); spu_free(spu);
if (new) if (new)
...@@ -437,51 +422,78 @@ void spu_yield(struct spu_context *ctx) ...@@ -437,51 +422,78 @@ void spu_yield(struct spu_context *ctx)
} }
} }
void spu_sched_tick(struct work_struct *work) static void spusched_tick(struct spu_context *ctx)
{ {
struct spu_context *ctx = if (ctx->policy != SCHED_RR || --ctx->time_slice)
container_of(work, struct spu_context, sched_work.work); return;
int preempted;
/* /*
* If this context is being stopped avoid rescheduling from the * Unfortunately active_mutex ranks outside of state_mutex, so
* scheduler tick because we would block on the state_mutex. * we have to trylock here. If we fail give the context another
* The caller will yield the spu later on anyway. * tick and try again.
*/ */
if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags)) if (mutex_trylock(&ctx->state_mutex)) {
return; struct spu_context *new = grab_runnable_context(ctx->prio + 1);
if (new) {
mutex_lock(&ctx->state_mutex); struct spu *spu = ctx->spu;
preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
mutex_unlock(&ctx->state_mutex);
if (preempted) { __spu_remove_from_active_list(spu);
/* spu_unbind_context(spu, ctx);
* We need to break out of the wait loop in spu_run manually spu_free(spu);
* to ensure this context gets put on the runqueue again wake_up(&new->stop_wq);
* ASAP. /*
*/ * We need to break out of the wait loop in
wake_up(&ctx->stop_wq); * spu_run manually to ensure this context
* gets put on the runqueue again ASAP.
*/
wake_up(&ctx->stop_wq);
}
ctx->time_slice = SPU_DEF_TIMESLICE;
mutex_unlock(&ctx->state_mutex);
} else { } else {
spu_start_tick(ctx); ctx->time_slice++;
} }
} }
static void spusched_wake(unsigned long data)
{
mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
wake_up_process(spusched_task);
}
static int spusched_thread(void *unused)
{
struct spu *spu, *next;
int node;
setup_timer(&spusched_timer, spusched_wake, 0);
__mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
for (node = 0; node < MAX_NUMNODES; node++) {
mutex_lock(&spu_prio->active_mutex[node]);
list_for_each_entry_safe(spu, next,
&spu_prio->active_list[node],
list)
spusched_tick(spu->ctx);
mutex_unlock(&spu_prio->active_mutex[node]);
}
}
del_timer_sync(&spusched_timer);
return 0;
}
int __init spu_sched_init(void) int __init spu_sched_init(void)
{ {
int i; int i;
spu_sched_wq = create_singlethread_workqueue("spusched");
if (!spu_sched_wq)
return 1;
spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
if (!spu_prio) { if (!spu_prio)
printk(KERN_WARNING "%s: Unable to allocate priority queue.\n", return -ENOMEM;
__FUNCTION__);
destroy_workqueue(spu_sched_wq);
return 1;
}
for (i = 0; i < MAX_PRIO; i++) { for (i = 0; i < MAX_PRIO; i++) {
INIT_LIST_HEAD(&spu_prio->runq[i]); INIT_LIST_HEAD(&spu_prio->runq[i]);
__clear_bit(i, spu_prio->bitmap); __clear_bit(i, spu_prio->bitmap);
...@@ -492,7 +504,14 @@ int __init spu_sched_init(void) ...@@ -492,7 +504,14 @@ int __init spu_sched_init(void)
INIT_LIST_HEAD(&spu_prio->active_list[i]); INIT_LIST_HEAD(&spu_prio->active_list[i]);
} }
spin_lock_init(&spu_prio->runq_lock); spin_lock_init(&spu_prio->runq_lock);
spusched_task = kthread_run(spusched_thread, NULL, "spusched");
if (IS_ERR(spusched_task)) {
kfree(spu_prio);
return PTR_ERR(spusched_task);
}
return 0; return 0;
} }
void __exit spu_sched_exit(void) void __exit spu_sched_exit(void)
...@@ -500,6 +519,8 @@ void __exit spu_sched_exit(void) ...@@ -500,6 +519,8 @@ void __exit spu_sched_exit(void)
struct spu *spu, *tmp; struct spu *spu, *tmp;
int node; int node;
kthread_stop(spusched_task);
for (node = 0; node < MAX_NUMNODES; node++) { for (node = 0; node < MAX_NUMNODES; node++) {
mutex_lock(&spu_prio->active_mutex[node]); mutex_lock(&spu_prio->active_mutex[node]);
list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node], list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
...@@ -510,5 +531,4 @@ void __exit spu_sched_exit(void) ...@@ -510,5 +531,4 @@ void __exit spu_sched_exit(void)
mutex_unlock(&spu_prio->active_mutex[node]); mutex_unlock(&spu_prio->active_mutex[node]);
} }
kfree(spu_prio); kfree(spu_prio);
destroy_workqueue(spu_sched_wq);
} }
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#include <asm/spu_csa.h> #include <asm/spu_csa.h>
#include <asm/spu_info.h> #include <asm/spu_info.h>
#define SPU_DEF_TIMESLICE 100
/* The magic number for our file system */ /* The magic number for our file system */
enum { enum {
SPUFS_MAGIC = 0x23c9b64e, SPUFS_MAGIC = 0x23c9b64e,
...@@ -39,11 +41,6 @@ enum { ...@@ -39,11 +41,6 @@ enum {
struct spu_context_ops; struct spu_context_ops;
struct spu_gang; struct spu_gang;
/* ctx->sched_flags */
enum {
SPU_SCHED_EXITING = 0,
};
struct spu_context { struct spu_context {
struct spu *spu; /* pointer to a physical SPU */ struct spu *spu; /* pointer to a physical SPU */
struct spu_state csa; /* SPU context save area. */ struct spu_state csa; /* SPU context save area. */
...@@ -83,7 +80,7 @@ struct spu_context { ...@@ -83,7 +80,7 @@ struct spu_context {
/* scheduler fields */ /* scheduler fields */
struct list_head rq; struct list_head rq;
struct delayed_work sched_work; unsigned int time_slice;
unsigned long sched_flags; unsigned long sched_flags;
unsigned long rt_priority; unsigned long rt_priority;
int policy; int policy;
...@@ -200,9 +197,6 @@ void spu_acquire_saved(struct spu_context *ctx); ...@@ -200,9 +197,6 @@ void spu_acquire_saved(struct spu_context *ctx);
int spu_activate(struct spu_context *ctx, unsigned long flags); int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx); void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx); void spu_yield(struct spu_context *ctx);
void spu_start_tick(struct spu_context *ctx);
void spu_stop_tick(struct spu_context *ctx);
void spu_sched_tick(struct work_struct *work);
int __init spu_sched_init(void); int __init spu_sched_init(void);
void __exit spu_sched_exit(void); void __exit spu_sched_exit(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment