Commit ecdaf2b5 authored by Jack Steiner's avatar Jack Steiner Committed by Linus Torvalds

sgi-gru: restructure the GRU vtop functions

Restructure the GRU vtop functions in preparation for future changes.
This patch simply moves code around & does not change the algorithm.
Signed-off-by: default avatarJack Steiner <steiner@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 87419412
...@@ -267,6 +267,44 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, ...@@ -267,6 +267,44 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
return 1; return 1;
} }
static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
int write, int atomic, unsigned long *gpa, int *pageshift)
{
struct mm_struct *mm = gts->ts_mm;
struct vm_area_struct *vma;
unsigned long paddr;
int ret, ps;
vma = find_vma(mm, vaddr);
if (!vma)
goto inval;
/*
* Atomic lookup is faster & usually works even if called in non-atomic
* context.
*/
rmb(); /* Must/check ms_range_active before loading PTEs */
ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
if (ret) {
if (atomic)
goto upm;
if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
goto inval;
}
if (is_gru_paddr(paddr))
goto inval;
paddr = paddr & ~((1UL << ps) - 1);
*gpa = uv_soc_phys_ram_to_gpa(paddr);
*pageshift = ps;
return 0;
inval:
return -1;
upm:
return -2;
}
/* /*
* Drop a TLB entry into the GRU. The fault is described by info in an TFH. * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
* Input: * Input:
...@@ -281,10 +319,8 @@ static int gru_try_dropin(struct gru_thread_state *gts, ...@@ -281,10 +319,8 @@ static int gru_try_dropin(struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh, struct gru_tlb_fault_handle *tfh,
unsigned long __user *cb) unsigned long __user *cb)
{ {
struct mm_struct *mm = gts->ts_mm; int pageshift = 0, asid, write, ret, atomic = !cb;
struct vm_area_struct *vma; unsigned long gpa = 0, vaddr = 0;
int pageshift, asid, write, ret;
unsigned long paddr, gpa, vaddr;
/* /*
* NOTE: The GRU contains magic hardware that eliminates races between * NOTE: The GRU contains magic hardware that eliminates races between
...@@ -318,28 +354,12 @@ static int gru_try_dropin(struct gru_thread_state *gts, ...@@ -318,28 +354,12 @@ static int gru_try_dropin(struct gru_thread_state *gts,
if (atomic_read(&gts->ts_gms->ms_range_active)) if (atomic_read(&gts->ts_gms->ms_range_active))
goto failactive; goto failactive;
vma = find_vma(mm, vaddr); ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
if (!vma) if (ret == -1)
goto failinval; goto failinval;
if (ret == -2)
/*
* Atomic lookup is faster & usually works even if called in non-atomic
* context.
*/
rmb(); /* Must/check ms_range_active before loading PTEs */
ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &pageshift);
if (ret) {
if (!cb)
goto failupm; goto failupm;
if (non_atomic_pte_lookup(vma, vaddr, write, &paddr,
&pageshift))
goto failinval;
}
if (is_gru_paddr(paddr))
goto failinval;
paddr = paddr & ~((1UL << pageshift) - 1);
gpa = uv_soc_phys_ram_to_gpa(paddr);
gru_cb_set_istatus_active(cb); gru_cb_set_istatus_active(cb);
tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift)); GRU_PAGESIZE(pageshift));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment