Commit fe5bb6b0 authored by Jack Steiner's avatar Jack Steiner Committed by Linus Torvalds

sgi-gru: misc GRU cleanup

Misc trivial GRU drivers fixes:
	- fix long lines
	- eliminate extra whitespace
	- eliminate compiler warning
	- better validation of invalidate user parameters
	- bug fix for GRU TLB flush (not the cpu TLB flush)

These changes are all internal to the SGI GRU driver and have no effect
on the base kernel.
Signed-off-by: default avatarJack Steiner <steiner@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 66666e50
...@@ -19,8 +19,11 @@ ...@@ -19,8 +19,11 @@
#ifndef __GRU_INSTRUCTIONS_H__ #ifndef __GRU_INSTRUCTIONS_H__
#define __GRU_INSTRUCTIONS_H__ #define __GRU_INSTRUCTIONS_H__
#define gru_flush_cache_hook(p) extern int gru_check_status_proc(void *cb);
#define gru_emulator_wait_hook(p, w) extern int gru_wait_proc(void *cb);
extern void gru_wait_abort_proc(void *cb);
/* /*
* Architecture dependent functions * Architecture dependent functions
...@@ -29,16 +32,16 @@ ...@@ -29,16 +32,16 @@
#if defined(CONFIG_IA64) #if defined(CONFIG_IA64)
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#define __flush_cache(p) ia64_fc(p) #define __flush_cache(p) ia64_fc((unsigned long)p)
/* Use volatile on IA64 to ensure ordering via st4.rel */ /* Use volatile on IA64 to ensure ordering via st4.rel */
#define gru_ordered_store_int(p,v) \ #define gru_ordered_store_int(p, v) \
do { \ do { \
barrier(); \ barrier(); \
*((volatile int *)(p)) = v; /* force st.rel */ \ *((volatile int *)(p)) = v; /* force st.rel */ \
} while (0) } while (0)
#elif defined(CONFIG_X86_64) #elif defined(CONFIG_X86_64)
#define __flush_cache(p) clflush(p) #define __flush_cache(p) clflush(p)
#define gru_ordered_store_int(p,v) \ #define gru_ordered_store_int(p, v) \
do { \ do { \
barrier(); \ barrier(); \
*(int *)p = v; \ *(int *)p = v; \
...@@ -558,20 +561,19 @@ extern int gru_get_cb_exception_detail(void *cb, ...@@ -558,20 +561,19 @@ extern int gru_get_cb_exception_detail(void *cb,
#define GRU_EXC_STR_SIZE 256 #define GRU_EXC_STR_SIZE 256
extern int gru_check_status_proc(void *cb);
extern int gru_wait_proc(void *cb);
extern void gru_wait_abort_proc(void *cb);
/* /*
* Control block definition for checking status * Control block definition for checking status
*/ */
struct gru_control_block_status { struct gru_control_block_status {
unsigned int icmd :1; unsigned int icmd :1;
unsigned int unused1 :31; unsigned int ima :3;
unsigned int reserved0 :4;
unsigned int unused1 :24;
unsigned int unused2 :24; unsigned int unused2 :24;
unsigned int istatus :2; unsigned int istatus :2;
unsigned int isubstatus :4; unsigned int isubstatus :4;
unsigned int inused3 :2; unsigned int unused3 :2;
}; };
/* Get CB status */ /* Get CB status */
......
...@@ -368,6 +368,7 @@ static int gru_try_dropin(struct gru_thread_state *gts, ...@@ -368,6 +368,7 @@ static int gru_try_dropin(struct gru_thread_state *gts,
failfmm: failfmm:
/* FMM state on UPM call */ /* FMM state on UPM call */
gru_flush_cache(tfh);
STAT(tlb_dropin_fail_fmm); STAT(tlb_dropin_fail_fmm);
gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0; return 0;
...@@ -497,10 +498,8 @@ int gru_handle_user_call_os(unsigned long cb) ...@@ -497,10 +498,8 @@ int gru_handle_user_call_os(unsigned long cb)
if (!gts) if (!gts)
return -EINVAL; return -EINVAL;
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
ret = -EINVAL;
goto exit; goto exit;
}
/* /*
* If force_unload is set, the UPM TLB fault is phony. The task * If force_unload is set, the UPM TLB fault is phony. The task
...@@ -508,6 +507,10 @@ int gru_handle_user_call_os(unsigned long cb) ...@@ -508,6 +507,10 @@ int gru_handle_user_call_os(unsigned long cb)
* unload the context. The task will page fault and assign a new * unload the context. The task will page fault and assign a new
* context. * context.
*/ */
if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
gts->ts_blade != uv_numa_blade_id())
gts->ts_force_unload = 1;
ret = -EAGAIN; ret = -EAGAIN;
cbrnum = thread_cbr_number(gts, ucbnum); cbrnum = thread_cbr_number(gts, ucbnum);
if (gts->ts_force_unload) { if (gts->ts_force_unload) {
...@@ -541,11 +544,13 @@ int gru_get_exception_detail(unsigned long arg) ...@@ -541,11 +544,13 @@ int gru_get_exception_detail(unsigned long arg)
if (!gts) if (!gts)
return -EINVAL; return -EINVAL;
if (gts->ts_gru) {
ucbnum = get_cb_number((void *)excdet.cb); ucbnum = get_cb_number((void *)excdet.cb);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
ret = -EINVAL;
} else if (gts->ts_gru) {
cbrnum = thread_cbr_number(gts, ucbnum); cbrnum = thread_cbr_number(gts, ucbnum);
cbe = get_cbe_by_index(gts->ts_gru, cbrnum); cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
prefetchw(cbe); /* Harmless on hardware, required for emulator */ prefetchw(cbe);/* Harmless on hardware, required for emulator */
excdet.opc = cbe->opccpy; excdet.opc = cbe->opccpy;
excdet.exopc = cbe->exopccpy; excdet.exopc = cbe->exopccpy;
excdet.ecause = cbe->ecause; excdet.ecause = cbe->ecause;
...@@ -609,7 +614,7 @@ int gru_user_flush_tlb(unsigned long arg) ...@@ -609,7 +614,7 @@ int gru_user_flush_tlb(unsigned long arg)
if (!gts) if (!gts)
return -EINVAL; return -EINVAL;
gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.vaddr + req.len); gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len);
gru_unlock_gts(gts); gru_unlock_gts(gts);
return 0; return 0;
......
...@@ -45,7 +45,8 @@ ...@@ -45,7 +45,8 @@
#include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_mmrs.h>
struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly;
unsigned long gru_start_paddr, gru_end_paddr __read_mostly; unsigned long gru_start_paddr __read_mostly;
unsigned long gru_end_paddr __read_mostly;
struct gru_stats_s gru_stats; struct gru_stats_s gru_stats;
/* Guaranteed user available resources on each node */ /* Guaranteed user available resources on each node */
...@@ -295,7 +296,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) ...@@ -295,7 +296,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
for_each_online_node(nid) { for_each_online_node(nid) {
bid = uv_node_to_blade_id(nid); bid = uv_node_to_blade_id(nid);
pnode = uv_node_to_pnode(nid); pnode = uv_node_to_pnode(nid);
if (gru_base[bid]) if (bid < 0 || gru_base[bid])
continue; continue;
page = alloc_pages_node(nid, GFP_KERNEL, order); page = alloc_pages_node(nid, GFP_KERNEL, order);
if (!page) if (!page)
...@@ -312,7 +313,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) ...@@ -312,7 +313,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
chip++, gru++) { chip++, gru++) {
paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip); gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip);
n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
cbrs = max(cbrs, n); cbrs = max(cbrs, n);
n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
...@@ -370,26 +371,26 @@ static int __init gru_init(void) ...@@ -370,26 +371,26 @@ static int __init gru_init(void)
void *gru_start_vaddr; void *gru_start_vaddr;
if (!is_uv_system()) if (!is_uv_system())
return 0; return -ENODEV;
#if defined CONFIG_IA64 #if defined CONFIG_IA64
gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
#else #else
gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) &
0x7fffffffffffUL; 0x7fffffffffffUL;
#endif #endif
gru_start_vaddr = __va(gru_start_paddr); gru_start_vaddr = __va(gru_start_paddr);
gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE; gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
gru_start_paddr, gru_end_paddr); gru_start_paddr, gru_end_paddr);
irq = get_base_irq(); irq = get_base_irq();
for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
ret = request_irq(irq + chip, gru_intr, 0, id, NULL); ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
/* TODO: fix irq handling on x86. For now ignore failures because /* TODO: fix irq handling on x86. For now ignore failure because
* interrupts are not required & not yet fully supported */ * interrupts are not required & not yet fully supported */
if (ret) { if (ret) {
printk("!!!WARNING: GRU ignoring request failure!!!\n"); printk(KERN_WARNING
"!!!WARNING: GRU ignoring request failure!!!\n");
ret = 0; ret = 0;
} }
if (ret) { if (ret) {
...@@ -469,7 +470,11 @@ struct vm_operations_struct gru_vm_ops = { ...@@ -469,7 +470,11 @@ struct vm_operations_struct gru_vm_ops = {
.fault = gru_fault, .fault = gru_fault,
}; };
#ifndef MODULE
fs_initcall(gru_init); fs_initcall(gru_init);
#else
module_init(gru_init);
#endif
module_exit(gru_exit); module_exit(gru_exit);
module_param(gru_options, ulong, 0644); module_param(gru_options, ulong, 0644);
......
...@@ -489,7 +489,7 @@ enum gru_cbr_state { ...@@ -489,7 +489,7 @@ enum gru_cbr_state {
* 64m 26 8 * 64m 26 8
* ... * ...
*/ */
#define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2: (sh)) >> 1) - 6) #define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2 : (sh)) >> 1) - 6)
#define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh)) #define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh))
/* minimum TLB purge count to ensure a full purge */ /* minimum TLB purge count to ensure a full purge */
......
...@@ -437,7 +437,7 @@ static int send_message_failure(void *cb, ...@@ -437,7 +437,7 @@ static int send_message_failure(void *cb,
break; break;
case CBSS_PUT_NACKED: case CBSS_PUT_NACKED:
STAT(mesq_send_put_nacked); STAT(mesq_send_put_nacked);
m =mq + (gru_get_amo_value_head(cb) << 6); m = mq + (gru_get_amo_value_head(cb) << 6);
gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
if (gru_wait(cb) == CBS_IDLE) if (gru_wait(cb) == CBS_IDLE)
ret = MQE_OK; ret = MQE_OK;
......
...@@ -432,8 +432,8 @@ static inline long gru_copy_handle(void *d, void *s) ...@@ -432,8 +432,8 @@ static inline long gru_copy_handle(void *d, void *s)
return GRU_HANDLE_BYTES; return GRU_HANDLE_BYTES;
} }
static void gru_prefetch_context(void *gseg, void *cb, void *cbe, unsigned long cbrmap, static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
unsigned long length) unsigned long cbrmap, unsigned long length)
{ {
int i, scr; int i, scr;
...@@ -773,8 +773,8 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -773,8 +773,8 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
again: again:
preempt_disable();
mutex_lock(&gts->ts_ctxlock); mutex_lock(&gts->ts_ctxlock);
preempt_disable();
if (gts->ts_gru) { if (gts->ts_gru) {
if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) {
STAT(migrated_nopfn_unload); STAT(migrated_nopfn_unload);
......
...@@ -278,13 +278,12 @@ struct gru_stats_s { ...@@ -278,13 +278,12 @@ struct gru_stats_s {
/* Generate a GRU asid value from a GRU base asid & a virtual address. */ /* Generate a GRU asid value from a GRU base asid & a virtual address. */
#if defined CONFIG_IA64 #if defined CONFIG_IA64
#define VADDR_HI_BIT 64 #define VADDR_HI_BIT 64
#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
#elif defined CONFIG_X86_64 #elif defined CONFIG_X86_64
#define VADDR_HI_BIT 48 #define VADDR_HI_BIT 48
#define GRUREGION(addr) (0) /* ZZZ could do better */
#else #else
#error "Unsupported architecture" #error "Unsupported architecture"
#endif #endif
#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
#define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) #define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
/*------------------------------------------------------------------------------ /*------------------------------------------------------------------------------
...@@ -297,12 +296,12 @@ struct gru_state; ...@@ -297,12 +296,12 @@ struct gru_state;
* This structure is pointed to from the mmstruct via the notifier pointer. * This structure is pointed to from the mmstruct via the notifier pointer.
* There is one of these per address space. * There is one of these per address space.
*/ */
struct gru_mm_tracker { struct gru_mm_tracker { /* pack to reduce size */
unsigned int mt_asid_gen; /* ASID wrap count */ unsigned int mt_asid_gen:24; /* ASID wrap count */
int mt_asid; /* current base ASID for gru */ unsigned int mt_asid:24; /* current base ASID for gru */
unsigned short mt_ctxbitmap; /* bitmap of contexts using unsigned short mt_ctxbitmap:16;/* bitmap of contexts using
asid */ asid */
}; } __attribute__ ((packed));
struct gru_mm_struct { struct gru_mm_struct {
struct mmu_notifier ms_notifier; struct mmu_notifier ms_notifier;
...@@ -359,6 +358,8 @@ struct gru_thread_state { ...@@ -359,6 +358,8 @@ struct gru_thread_state {
required for contest */ required for contest */
unsigned char ts_cbr_au_count;/* Number of CBR resources unsigned char ts_cbr_au_count;/* Number of CBR resources
required for contest */ required for contest */
char ts_blade; /* If >= 0, migrate context if
ref from diferent blade */
char ts_force_unload;/* force context to be unloaded char ts_force_unload;/* force context to be unloaded
after migration */ after migration */
char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
......
...@@ -187,7 +187,7 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, ...@@ -187,7 +187,7 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
" FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n", " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n",
gid, asid, num, asids->mt_ctxbitmap); gid, asid, num, asids->mt_ctxbitmap);
tgh = get_lock_tgh_handle(gru); tgh = get_lock_tgh_handle(gru);
tgh_invalidate(tgh, start, 0, asid, grupagesize, 0, tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
num - 1, asids->mt_ctxbitmap); num - 1, asids->mt_ctxbitmap);
get_unlock_tgh_handle(tgh); get_unlock_tgh_handle(tgh);
} else { } else {
...@@ -212,9 +212,8 @@ void gru_flush_all_tlb(struct gru_state *gru) ...@@ -212,9 +212,8 @@ void gru_flush_all_tlb(struct gru_state *gru)
gru_dbg(grudev, "gru %p, gid %d\n", gru, gru->gs_gid); gru_dbg(grudev, "gru %p, gid %d\n", gru, gru->gs_gid);
tgh = get_lock_tgh_handle(gru); tgh = get_lock_tgh_handle(gru);
tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0); tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff);
get_unlock_tgh_handle(tgh); get_unlock_tgh_handle(tgh);
preempt_enable();
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment