Commit d5826dd6 authored by Jack Steiner's avatar Jack Steiner Committed by Linus Torvalds

gru: add user request to explicitly unload a gru context

Add user function to explicitly unload GRU kernel contexts from the GRU.
Only contexts that are not in-use will be unloaded.

This function is primarily for testing.  It is not expected that this will
be used in normal production systems.
Signed-off-by: default avatarJack Steiner <steiner@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1a2c09e3
...@@ -287,7 +287,6 @@ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, ...@@ -287,7 +287,6 @@ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n", gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n",
bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr,
gru->gs_gru_base_paddr); gru->gs_gru_base_paddr);
gru_kservices_init(gru);
} }
static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
...@@ -314,6 +313,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) ...@@ -314,6 +313,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
memset(gru_base[bid], 0, sizeof(struct gru_blade_state)); memset(gru_base[bid], 0, sizeof(struct gru_blade_state));
gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0]; gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0];
spin_lock_init(&gru_base[bid]->bs_lock); spin_lock_init(&gru_base[bid]->bs_lock);
init_rwsem(&gru_base[bid]->bs_kgts_sema);
dsrbytes = 0; dsrbytes = 0;
cbrs = 0; cbrs = 0;
...@@ -426,6 +426,7 @@ static int __init gru_init(void) ...@@ -426,6 +426,7 @@ static int __init gru_init(void)
printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
goto exit3; goto exit3;
} }
gru_kservices_init();
printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
GRU_DRIVER_VERSION_STR); GRU_DRIVER_VERSION_STR);
...@@ -444,7 +445,7 @@ static int __init gru_init(void) ...@@ -444,7 +445,7 @@ static int __init gru_init(void)
static void __exit gru_exit(void) static void __exit gru_exit(void)
{ {
int i, bid, gid; int i, bid;
int order = get_order(sizeof(struct gru_state) * int order = get_order(sizeof(struct gru_state) *
GRU_CHIPLETS_PER_BLADE); GRU_CHIPLETS_PER_BLADE);
...@@ -453,10 +454,7 @@ static void __exit gru_exit(void) ...@@ -453,10 +454,7 @@ static void __exit gru_exit(void)
for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
free_irq(IRQ_GRU + i, NULL); free_irq(IRQ_GRU + i, NULL);
gru_kservices_exit();
foreach_gid(gid)
gru_kservices_exit(GID_TO_GRU(gid));
for (bid = 0; bid < GRU_MAX_BLADES; bid++) for (bid = 0; bid < GRU_MAX_BLADES; bid++)
free_pages((unsigned long)gru_base[bid], order); free_pages((unsigned long)gru_base[bid], order);
......
...@@ -187,6 +187,34 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id) ...@@ -187,6 +187,34 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
downgrade_write(&bs->bs_kgts_sema); downgrade_write(&bs->bs_kgts_sema);
} }
/*
* Free all kernel contexts that are not currently in use.
* Returns 0 if all freed, else number of inuse context.
*/
static int gru_free_kernel_contexts(void)
{
struct gru_blade_state *bs;
struct gru_thread_state *kgts;
int bid, ret = 0;
for (bid = 0; bid < GRU_MAX_BLADES; bid++) {
bs = gru_base[bid];
if (!bs)
continue;
if (down_write_trylock(&bs->bs_kgts_sema)) {
kgts = bs->bs_kgts;
if (kgts && kgts->ts_gru)
gru_unload_context(kgts, 0);
kfree(kgts);
bs->bs_kgts = NULL;
up_write(&bs->bs_kgts_sema);
} else {
ret++;
}
}
return ret;
}
/* /*
* Lock & load the kernel context for the specified blade. * Lock & load the kernel context for the specified blade.
*/ */
...@@ -1009,35 +1037,22 @@ int gru_ktest(unsigned long arg) ...@@ -1009,35 +1037,22 @@ int gru_ktest(unsigned long arg)
case 2: case 2:
ret = quicktest2(arg); ret = quicktest2(arg);
break; break;
case 99:
ret = gru_free_kernel_contexts();
break;
} }
return ret; return ret;
} }
int gru_kservices_init(struct gru_state *gru) int gru_kservices_init(void)
{ {
struct gru_blade_state *bs;
bs = gru->gs_blade;
if (gru != &bs->bs_grus[0])
return 0;
init_rwsem(&bs->bs_kgts_sema);
return 0; return 0;
} }
void gru_kservices_exit(struct gru_state *gru) void gru_kservices_exit(void)
{ {
struct gru_blade_state *bs; if (gru_free_kernel_contexts())
struct gru_thread_state *kgts; BUG();
bs = gru->gs_blade;
if (gru != &bs->bs_grus[0])
return;
kgts = bs->bs_kgts;
if (kgts && kgts->ts_gru)
gru_unload_context(kgts, 0);
kfree(kgts);
} }
...@@ -638,8 +638,8 @@ extern void gru_unload_context(struct gru_thread_state *gts, int savestate); ...@@ -638,8 +638,8 @@ extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
extern int gru_update_cch(struct gru_thread_state *gts, int force_unload); extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
extern void gts_drop(struct gru_thread_state *gts); extern void gts_drop(struct gru_thread_state *gts);
extern void gru_tgh_flush_init(struct gru_state *gru); extern void gru_tgh_flush_init(struct gru_state *gru);
extern int gru_kservices_init(struct gru_state *gru); extern int gru_kservices_init(void);
extern void gru_kservices_exit(struct gru_state *gru); extern void gru_kservices_exit(void);
extern int gru_dump_chiplet_request(unsigned long arg); extern int gru_dump_chiplet_request(unsigned long arg);
extern irqreturn_t gru_intr(int irq, void *dev_id); extern irqreturn_t gru_intr(int irq, void *dev_id);
extern int gru_handle_user_call_os(unsigned long address); extern int gru_handle_user_call_os(unsigned long address);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment