Commit 7b2cb64f authored by Paul Burton's avatar Paul Burton Committed by Ralf Baechle

MIPS: mm: Fix MIPS32 36b physical addressing (alchemy, netlogic)

There are 2 distinct cases in which a kernel for a MIPS32 CPU
(CONFIG_CPU_MIPS32=y) may use 64 bit physical addresses
(CONFIG_PHYS_ADDR_T_64BIT=y):

  - 36 bit physical addressing as used by RMI Alchemy & Netlogic XLP/XLR
    CPUs.

  - MIPS32r5 eXtended Physical Addressing (XPA).

These 2 cases are distinct in that they require different behaviour from
the kernel - the EntryLo registers have different formats. Until Linux
v4.1 we only supported the first case, with code conditional upon the 2
aforementioned Kconfig variables being set. Commit c5b36783 ("MIPS:
Add support for XPA.") added support for the second case, but did so by
modifying the code that existed for the first case rather than treating
the 2 cases as distinct. Since the EntryLo registers have different
formats this breaks the 36 bit Alchemy/XLP/XLR case. Fix this by
splitting the 2 cases, with XPA cases now being conditional upon
CONFIG_XPA and the non-XPA case matching the code as it existed prior to
commit c5b36783 ("MIPS: Add support for XPA.").
Signed-off-by: default avatarPaul Burton <paul.burton@imgtec.com>
Reported-by: default avatarManuel Lauss <manuel.lauss@gmail.com>
Tested-by: default avatarManuel Lauss <manuel.lauss@gmail.com>
Fixes: c5b36783 ("MIPS: Add support for XPA.")
Cc: James Hogan <james.hogan@imgtec.com>
Cc: David Daney <david.daney@cavium.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Maciej W. Rozycki <macro@linux-mips.org>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Alex Smith <alex.smith@imgtec.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: stable@vger.kernel.org # v4.1+
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/13119/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 745f3558
...@@ -103,7 +103,7 @@ static inline void pmd_clear(pmd_t *pmdp) ...@@ -103,7 +103,7 @@ static inline void pmd_clear(pmd_t *pmdp)
pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
} }
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #if defined(CONFIG_XPA)
#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT)) #define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
static inline pte_t static inline pte_t
...@@ -118,6 +118,20 @@ pfn_pte(unsigned long pfn, pgprot_t prot) ...@@ -118,6 +118,20 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
return pte; return pte;
} }
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
pte_t pte;
pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
pte.pte_low = pgprot_val(prot);
return pte;
}
#else #else
#ifdef CONFIG_CPU_VR41XX #ifdef CONFIG_CPU_VR41XX
...@@ -166,7 +180,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot) ...@@ -166,7 +180,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#else #else
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #if defined(CONFIG_XPA)
/* Swap entries must have VALID and GLOBAL bits cleared. */ /* Swap entries must have VALID and GLOBAL bits cleared. */
#define __swp_type(x) (((x).val >> 4) & 0x1f) #define __swp_type(x) (((x).val >> 4) & 0x1f)
...@@ -175,6 +189,15 @@ pfn_pte(unsigned long pfn, pgprot_t prot) ...@@ -175,6 +189,15 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
/* Swap entries must have VALID and GLOBAL bits cleared. */
#define __swp_type(x) (((x).val >> 2) & 0x1f)
#define __swp_offset(x) ((x).val >> 7)
#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
#else #else
/* /*
* Constraints: * Constraints:
......
...@@ -32,11 +32,11 @@ ...@@ -32,11 +32,11 @@
* unpredictable things. The code (when it is written) to deal with * unpredictable things. The code (when it is written) to deal with
* this problem will be in the update_mmu_cache() code for the r4k. * this problem will be in the update_mmu_cache() code for the r4k.
*/ */
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #if defined(CONFIG_XPA)
/* /*
* Page table bit offsets used for 64 bit physical addressing on MIPS32, * Page table bit offsets used for 64 bit physical addressing on
* for example with Alchemy, Netlogic XLP/XLR or XPA. * MIPS32r5 with XPA.
*/ */
enum pgtable_bits { enum pgtable_bits {
/* Used by TLB hardware (placed in EntryLo*) */ /* Used by TLB hardware (placed in EntryLo*) */
...@@ -59,6 +59,27 @@ enum pgtable_bits { ...@@ -59,6 +59,27 @@ enum pgtable_bits {
*/ */
#define _PFNX_MASK 0xffffff #define _PFNX_MASK 0xffffff
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
/*
* Page table bit offsets used for 36 bit physical addressing on MIPS32,
* for example with Alchemy or Netlogic XLP/XLR.
*/
enum pgtable_bits {
/* Used by TLB hardware (placed in EntryLo*) */
_PAGE_GLOBAL_SHIFT,
_PAGE_VALID_SHIFT,
_PAGE_DIRTY_SHIFT,
_CACHE_SHIFT,
/* Used only by software (masked out before writing EntryLo*) */
_PAGE_PRESENT_SHIFT = _CACHE_SHIFT + 3,
_PAGE_NO_READ_SHIFT,
_PAGE_WRITE_SHIFT,
_PAGE_ACCESSED_SHIFT,
_PAGE_MODIFIED_SHIFT,
};
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
/* Page table bits used for r3k systems */ /* Page table bits used for r3k systems */
...@@ -116,7 +137,7 @@ enum pgtable_bits { ...@@ -116,7 +137,7 @@ enum pgtable_bits {
#endif #endif
/* Used by TLB hardware (placed in EntryLo*) */ /* Used by TLB hardware (placed in EntryLo*) */
#if (defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)) #if defined(CONFIG_XPA)
# define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) # define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
# define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0) # define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
......
...@@ -133,7 +133,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -133,7 +133,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) #ifdef CONFIG_XPA
# define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
#else
# define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
#endif
#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
#define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC) #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
...@@ -143,14 +148,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte) ...@@ -143,14 +148,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
smp_wmb(); smp_wmb();
ptep->pte_low = pte.pte_low; ptep->pte_low = pte.pte_low;
#ifdef CONFIG_XPA
if (pte.pte_high & _PAGE_GLOBAL) { if (pte.pte_high & _PAGE_GLOBAL) {
#else
if (pte.pte_low & _PAGE_GLOBAL) {
#endif
pte_t *buddy = ptep_buddy(ptep); pte_t *buddy = ptep_buddy(ptep);
/* /*
* Make sure the buddy is global too (if it's !none, * Make sure the buddy is global too (if it's !none,
* it better already be global) * it better already be global)
*/ */
if (pte_none(*buddy)) if (pte_none(*buddy)) {
if (!config_enabled(CONFIG_XPA))
buddy->pte_low |= _PAGE_GLOBAL;
buddy->pte_high |= _PAGE_GLOBAL; buddy->pte_high |= _PAGE_GLOBAL;
}
} }
} }
...@@ -160,8 +172,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt ...@@ -160,8 +172,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
htw_stop(); htw_stop();
/* Preserve global status for the pair */ /* Preserve global status for the pair */
if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) if (config_enabled(CONFIG_XPA)) {
null.pte_high = _PAGE_GLOBAL; if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
null.pte_high = _PAGE_GLOBAL;
} else {
if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
null.pte_low = null.pte_high = _PAGE_GLOBAL;
}
set_pte_at(mm, addr, ptep, null); set_pte_at(mm, addr, ptep, null);
htw_start(); htw_start();
...@@ -302,6 +319,8 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } ...@@ -302,6 +319,8 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_wrprotect(pte_t pte)
{ {
pte.pte_low &= ~_PAGE_WRITE; pte.pte_low &= ~_PAGE_WRITE;
if (!config_enabled(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE; pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte; return pte;
} }
...@@ -309,6 +328,8 @@ static inline pte_t pte_wrprotect(pte_t pte) ...@@ -309,6 +328,8 @@ static inline pte_t pte_wrprotect(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte) static inline pte_t pte_mkclean(pte_t pte)
{ {
pte.pte_low &= ~_PAGE_MODIFIED; pte.pte_low &= ~_PAGE_MODIFIED;
if (!config_enabled(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE; pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte; return pte;
} }
...@@ -316,6 +337,8 @@ static inline pte_t pte_mkclean(pte_t pte) ...@@ -316,6 +337,8 @@ static inline pte_t pte_mkclean(pte_t pte)
static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkold(pte_t pte)
{ {
pte.pte_low &= ~_PAGE_ACCESSED; pte.pte_low &= ~_PAGE_ACCESSED;
if (!config_enabled(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_READ;
pte.pte_high &= ~_PAGE_SILENT_READ; pte.pte_high &= ~_PAGE_SILENT_READ;
return pte; return pte;
} }
...@@ -323,24 +346,33 @@ static inline pte_t pte_mkold(pte_t pte) ...@@ -323,24 +346,33 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte) static inline pte_t pte_mkwrite(pte_t pte)
{ {
pte.pte_low |= _PAGE_WRITE; pte.pte_low |= _PAGE_WRITE;
if (pte.pte_low & _PAGE_MODIFIED) if (pte.pte_low & _PAGE_MODIFIED) {
if (!config_enabled(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE; pte.pte_high |= _PAGE_SILENT_WRITE;
}
return pte; return pte;
} }
static inline pte_t pte_mkdirty(pte_t pte) static inline pte_t pte_mkdirty(pte_t pte)
{ {
pte.pte_low |= _PAGE_MODIFIED; pte.pte_low |= _PAGE_MODIFIED;
if (pte.pte_low & _PAGE_WRITE) if (pte.pte_low & _PAGE_WRITE) {
if (!config_enabled(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE; pte.pte_high |= _PAGE_SILENT_WRITE;
}
return pte; return pte;
} }
static inline pte_t pte_mkyoung(pte_t pte) static inline pte_t pte_mkyoung(pte_t pte)
{ {
pte.pte_low |= _PAGE_ACCESSED; pte.pte_low |= _PAGE_ACCESSED;
if (!(pte.pte_low & _PAGE_NO_READ)) if (!(pte.pte_low & _PAGE_NO_READ)) {
if (!config_enabled(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_READ;
pte.pte_high |= _PAGE_SILENT_READ; pte.pte_high |= _PAGE_SILENT_READ;
}
return pte; return pte;
} }
#else #else
...@@ -438,7 +470,7 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) ...@@ -438,7 +470,7 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
*/ */
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #if defined(CONFIG_XPA)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
...@@ -447,6 +479,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -447,6 +479,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
return pte; return pte;
} }
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte.pte_low &= _PAGE_CHG_MASK;
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot);
pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte;
}
#else #else
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
......
...@@ -98,8 +98,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) ...@@ -98,8 +98,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
idx += in_interrupt() ? FIX_N_COLOURS : 0; idx += in_interrupt() ? FIX_N_COLOURS : 0;
vaddr = __fix_to_virt(FIX_CMAP_END - idx); vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, prot); pte = mk_pte(page, prot);
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #if defined(CONFIG_XPA)
entrylo = pte_to_entrylo(pte.pte_high); entrylo = pte_to_entrylo(pte.pte_high);
#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
entrylo = pte.pte_high;
#else #else
entrylo = pte_to_entrylo(pte_val(pte)); entrylo = pte_to_entrylo(pte_val(pte));
#endif #endif
......
...@@ -1011,25 +1011,21 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) ...@@ -1011,25 +1011,21 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
{ {
/* if (config_enabled(CONFIG_XPA)) {
* 64bit address support (36bit on a 32bit CPU) in a 32bit
* Kernel is a special case. Only a few CPUs use it.
*/
if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
int pte_off_even = sizeof(pte_t) / 2; int pte_off_even = sizeof(pte_t) / 2;
int pte_off_odd = pte_off_even + sizeof(pte_t); int pte_off_odd = pte_off_even + sizeof(pte_t);
#ifdef CONFIG_XPA
const int scratch = 1; /* Our extra working register */ const int scratch = 1; /* Our extra working register */
uasm_i_addu(p, scratch, 0, ptep); uasm_i_addu(p, scratch, 0, ptep);
#endif
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, ptep, C0_ENTRYLO1); UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
#ifdef CONFIG_XPA
uasm_i_lw(p, tmp, 0, scratch); uasm_i_lw(p, tmp, 0, scratch);
uasm_i_lw(p, ptep, sizeof(pte_t), scratch); uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
uasm_i_lui(p, scratch, 0xff); uasm_i_lui(p, scratch, 0xff);
...@@ -1038,7 +1034,22 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) ...@@ -1038,7 +1034,22 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
uasm_i_and(p, ptep, scratch, ptep); uasm_i_and(p, ptep, scratch, ptep);
uasm_i_mthc0(p, tmp, C0_ENTRYLO0); uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
uasm_i_mthc0(p, ptep, C0_ENTRYLO1); uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
#endif return;
}
/*
* 64bit address support (36bit on a 32bit CPU) in a 32bit
* Kernel is a special case. Only a few CPUs use it.
*/
if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
int pte_off_even = sizeof(pte_t) / 2;
int pte_off_odd = pte_off_even + sizeof(pte_t);
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
return; return;
} }
...@@ -1637,7 +1648,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, ...@@ -1637,7 +1648,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
#ifdef CONFIG_PHYS_ADDR_T_64BIT #ifdef CONFIG_PHYS_ADDR_T_64BIT
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
if (!cpu_has_64bits) { if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) {
const int scratch = 1; /* Our extra working register */ const int scratch = 1; /* Our extra working register */
uasm_i_lui(p, scratch, (mode >> 16)); uasm_i_lui(p, scratch, (mode >> 16));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment