Commit b26fe855 authored by David Mosberger's avatar David Mosberger Committed by David Mosberger

ia64: More 2.5.xx syncing.

parent 9f1e5eef
...@@ -245,7 +245,14 @@ if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then ...@@ -245,7 +245,14 @@ if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
bool ' Disable VHPT' CONFIG_DISABLE_VHPT bool ' Disable VHPT' CONFIG_DISABLE_VHPT
bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
bool ' Early printk support (requires VGA!)' CONFIG_IA64_EARLY_PRINTK bool ' Early printk support' CONFIG_IA64_EARLY_PRINTK
if [ "$CONFIG_IA64_EARLY_PRINTK" != "n" ]; then
bool ' Early printk on MMIO serial port' CONFIG_IA64_EARLY_PRINTK_UART
if [ "$CONFIG_IA64_EARLY_PRINTK_UART" != "n" ]; then
hex ' UART MMIO base address' CONFIG_IA64_EARLY_PRINTK_UART_BASE ff5e0000
fi
bool ' Early printk on VGA' CONFIG_IA64_EARLY_PRINTK_VGA
fi
bool ' Debug memory allocations' CONFIG_DEBUG_SLAB bool ' Debug memory allocations' CONFIG_DEBUG_SLAB
bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
bool ' Turn on compare-and-exchange bug checking (slow!)' CONFIG_IA64_DEBUG_CMPXCHG bool ' Turn on compare-and-exchange bug checking (slow!)' CONFIG_IA64_DEBUG_CMPXCHG
......
...@@ -450,7 +450,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) ...@@ -450,7 +450,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
** We need the alignment to invalidate I/O TLB using ** We need the alignment to invalidate I/O TLB using
** SBA HW features in the unmap path. ** SBA HW features in the unmap path.
*/ */
unsigned long o = 1 << get_order(bits_wanted << IOVP_SHIFT); unsigned long o = 1UL << get_order(bits_wanted << IOVP_SHIFT);
uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
unsigned long mask; unsigned long mask;
...@@ -1005,7 +1005,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, ...@@ -1005,7 +1005,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg,
** Prepare for first/next DMA stream ** Prepare for first/next DMA stream
*/ */
dma_len = sba_sg_len(startsg); dma_len = sba_sg_len(startsg);
dma_offset = sba_sg_address(startsg); dma_offset = (unsigned long) sba_sg_address(startsg);
startsg++; startsg++;
nents--; nents--;
...@@ -1016,7 +1016,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, ...@@ -1016,7 +1016,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg,
** to take advantage of the block IO TLB flush. ** to take advantage of the block IO TLB flush.
*/ */
while (nents) { while (nents) {
unsigned int end_offset = dma_offset + dma_len; unsigned long end_offset = dma_offset + dma_len;
/* prev entry must end on a page boundary */ /* prev entry must end on a page boundary */
if (end_offset & IOVP_MASK) if (end_offset & IOVP_MASK)
...@@ -1114,9 +1114,9 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, ...@@ -1114,9 +1114,9 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
#endif #endif
/* Fast path single entry scatterlists. */ /* Fast path single entry scatterlists. */
if (nents == 1) { if (nents == 1) {
sba_sg_iova(sglist) = (char *)sba_map_single(dev, sba_sg_iova(sglist) = sba_map_single(dev,
sba_sg_iova(sglist), (void *) sba_sg_iova(sglist),
sba_sg_len(sglist), direction); sba_sg_len(sglist), direction);
sba_sg_iova_len(sglist) = sba_sg_len(sglist); sba_sg_iova_len(sglist) = sba_sg_len(sglist);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* /*
...@@ -1455,7 +1455,7 @@ sba_common_init(struct sba_device *sba_dev) ...@@ -1455,7 +1455,7 @@ sba_common_init(struct sba_device *sba_dev)
sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL; sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL;
for (reserved_iov = 0xA0000 ; reserved_iov < 0xC0000 ; reserved_iov += IOVP_SIZE) { for (reserved_iov = 0xA0000 ; reserved_iov < 0xC0000 ; reserved_iov += IOVP_SIZE) {
u64 *res_ptr = sba_dev->ioc[i].res_map; u64 *res_ptr = (u64 *) sba_dev->ioc[i].res_map;
int index = PDIR_INDEX(reserved_iov); int index = PDIR_INDEX(reserved_iov);
int res_word; int res_word;
u64 mask; u64 mask;
...@@ -1586,8 +1586,8 @@ void __init sba_init(void) ...@@ -1586,8 +1586,8 @@ void __init sba_init(void)
for (i = 0; i < PCI_NUM_RESOURCES; i++) { for (i = 0; i < PCI_NUM_RESOURCES; i++) {
if (pci_resource_flags(device, i) == IORESOURCE_MEM) { if (pci_resource_flags(device, i) == IORESOURCE_MEM) {
hpa = ioremap(pci_resource_start(device, i), hpa = (u64) ioremap(pci_resource_start(device, i),
pci_resource_len(device, i)); pci_resource_len(device, i));
break; break;
} }
} }
...@@ -1595,7 +1595,7 @@ void __init sba_init(void) ...@@ -1595,7 +1595,7 @@ void __init sba_init(void)
func_id = READ_REG(hpa + SBA_FUNC_ID); func_id = READ_REG(hpa + SBA_FUNC_ID);
if (func_id == ZX1_FUNC_ID_VALUE) { if (func_id == ZX1_FUNC_ID_VALUE) {
(void)strcpy(sba_rev, "zx1"); strcpy(sba_rev, "zx1");
func_offset = zx1_func_offsets; func_offset = zx1_func_offsets;
} else { } else {
return; return;
......
This diff is collapsed.
...@@ -474,7 +474,7 @@ acpi_find_rsdp (void) ...@@ -474,7 +474,7 @@ acpi_find_rsdp (void)
} }
#ifdef CONFIG_SERIAL_ACPI #ifdef CONFIG_SERIAL_8250_ACPI
#include <linux/acpi_serial.h> #include <linux/acpi_serial.h>
...@@ -529,7 +529,7 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size) ...@@ -529,7 +529,7 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
return 0; return 0;
} }
#endif /* CONFIG_SERIAL_ACPI */ #endif /* CONFIG_SERIAL_8250_ACPI */
int __init int __init
...@@ -587,7 +587,7 @@ acpi_boot_init (char *cmdline) ...@@ -587,7 +587,7 @@ acpi_boot_init (char *cmdline)
if (acpi_table_parse(ACPI_FACP, acpi_parse_fadt) < 1) if (acpi_table_parse(ACPI_FACP, acpi_parse_fadt) < 1)
printk(KERN_ERR PREFIX "Can't find FADT\n"); printk(KERN_ERR PREFIX "Can't find FADT\n");
#ifdef CONFIG_SERIAL_ACPI #ifdef CONFIG_SERIAL_8250_ACPI
/* /*
* TBD: Need phased approach to table parsing (only do those absolutely * TBD: Need phased approach to table parsing (only do those absolutely
* required during boot-up). Recommend expanding concept of fix- * required during boot-up). Recommend expanding concept of fix-
......
...@@ -446,6 +446,9 @@ efi_init (void) ...@@ -446,6 +446,9 @@ efi_init (void)
} else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
efi.sal_systab = __va(config_tables[i].table); efi.sal_systab = __va(config_tables[i].table);
printk(" SALsystab=0x%lx", config_tables[i].table); printk(" SALsystab=0x%lx", config_tables[i].table);
} else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
efi.hcdp = __va(config_tables[i].table);
printk(" HCDP=0x%lx", config_tables[i].table);
} }
} }
printk("\n"); printk("\n");
......
...@@ -160,6 +160,10 @@ set_rte (unsigned int vector, unsigned long dest) ...@@ -160,6 +160,10 @@ set_rte (unsigned int vector, unsigned long dest)
int pin; int pin;
char redir; char redir;
#ifdef DEBUG_IRQ_ROUTING
printk(KERN_DEBUG "set_rte: routing vector 0x%02x to 0x%lx\n", vector, dest);
#endif
pin = iosapic_irq[vector].pin; pin = iosapic_irq[vector].pin;
if (pin < 0) if (pin < 0)
return; /* not an IOSAPIC interrupt */ return; /* not an IOSAPIC interrupt */
...@@ -406,7 +410,7 @@ iosapic_reassign_vector (int vector) ...@@ -406,7 +410,7 @@ iosapic_reassign_vector (int vector)
|| iosapic_irq[vector].polarity || iosapic_irq[vector].trigger) || iosapic_irq[vector].polarity || iosapic_irq[vector].trigger)
{ {
new_vector = ia64_alloc_irq(); new_vector = ia64_alloc_irq();
printk("Reassigning Vector 0x%x to 0x%x\n", vector, new_vector); printk("Reassigning vector 0x%x to 0x%x\n", vector, new_vector);
memcpy (&iosapic_irq[new_vector], &iosapic_irq[vector], memcpy (&iosapic_irq[new_vector], &iosapic_irq[vector],
sizeof(struct iosapic_irq)); sizeof(struct iosapic_irq));
memset (&iosapic_irq[vector], 0, sizeof(struct iosapic_irq)); memset (&iosapic_irq[vector], 0, sizeof(struct iosapic_irq));
...@@ -757,10 +761,11 @@ iosapic_pci_fixup (int phase) ...@@ -757,10 +761,11 @@ iosapic_pci_fixup (int phase)
if (!(smp_int_redirect & SMP_IRQ_REDIRECTION)) { if (!(smp_int_redirect & SMP_IRQ_REDIRECTION)) {
static int cpu_index = 0; static int cpu_index = 0;
set_rte(vector, cpu_physical_id(cpu_index) & 0xffff); while (!cpu_online(cpu_index))
if (++cpu_index >= NR_CPUS)
cpu_index = 0;
for (cpu_index++; !cpu_online(cpu_index % NR_CPUS); cpu_index++); set_rte(vector, cpu_physical_id(cpu_index) & 0xffff);
cpu_index %= NR_CPUS;
} else { } else {
/* /*
* Direct the interrupt vector to the current cpu, * Direct the interrupt vector to the current cpu,
......
...@@ -368,7 +368,7 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs) ...@@ -368,7 +368,7 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
* use the action we have. * use the action we have.
*/ */
action = NULL; action = NULL;
if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
action = desc->action; action = desc->action;
status &= ~IRQ_PENDING; /* we commit to handling */ status &= ~IRQ_PENDING; /* we commit to handling */
status |= IRQ_INPROGRESS; /* we are handling it */ status |= IRQ_INPROGRESS; /* we are handling it */
...@@ -381,7 +381,7 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs) ...@@ -381,7 +381,7 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
* a different instance of this same irq, the other processor * a different instance of this same irq, the other processor
* will take care of it. * will take care of it.
*/ */
if (!action) if (unlikely(!action))
goto out; goto out;
/* /*
...@@ -403,8 +403,8 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs) ...@@ -403,8 +403,8 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
break; break;
desc->status &= ~IRQ_PENDING; desc->status &= ~IRQ_PENDING;
} }
desc->status &= ~IRQ_INPROGRESS;
out: out:
desc->status &= ~IRQ_INPROGRESS;
/* /*
* The ->end() handler has to deal with interrupts which got * The ->end() handler has to deal with interrupts which got
* disabled while the handler was running. * disabled while the handler was running.
......
...@@ -347,6 +347,14 @@ setup_arch (char **cmdline_p) ...@@ -347,6 +347,14 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_ACPI_BOOT #ifdef CONFIG_ACPI_BOOT
acpi_boot_init(*cmdline_p); acpi_boot_init(*cmdline_p);
#endif #endif
#ifdef CONFIG_SERIAL_HCDP
if (efi.hcdp) {
void setup_serial_hcdp(void *);
/* Setup the serial ports described by HCDP */
setup_serial_hcdp(efi.hcdp);
}
#endif
#ifdef CONFIG_VT #ifdef CONFIG_VT
# if defined(CONFIG_DUMMY_CONSOLE) # if defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con; conswitchp = &dummy_con;
......
...@@ -130,6 +130,8 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs) ...@@ -130,6 +130,8 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
siginfo_t siginfo; siginfo_t siginfo;
int sig, code; int sig, code;
die_if_kernel("bad break", regs, break_num);
/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri); siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
siginfo.si_imm = break_num; siginfo.si_imm = break_num;
......
...@@ -108,7 +108,7 @@ free_initmem (void) ...@@ -108,7 +108,7 @@ free_initmem (void)
addr = (unsigned long) &__init_begin; addr = (unsigned long) &__init_begin;
for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) { for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) {
clear_bit(PG_reserved, &virt_to_page(addr)->flags); ClearPageReserved(virt_to_page(addr));
set_page_count(virt_to_page(addr), 1); set_page_count(virt_to_page(addr), 1);
free_page(addr); free_page(addr);
++totalram_pages; ++totalram_pages;
...@@ -162,9 +162,9 @@ free_initrd_mem (unsigned long start, unsigned long end) ...@@ -162,9 +162,9 @@ free_initrd_mem (unsigned long start, unsigned long end)
if (!virt_addr_valid(start)) if (!virt_addr_valid(start))
continue; continue;
page = virt_to_page(start); page = virt_to_page(start);
clear_bit(PG_reserved, &page->flags); ClearPageReserved(page);
set_page_count(page, 1); set_page_count(page, 1);
__free_page(page); free_page(page);
++totalram_pages; ++totalram_pages;
} }
} }
......
#ifndef AGP_H #ifndef _ASM_IA64_AGP_H
#define AGP_H 1 #define _ASM_IA64_AGP_H
/* dummy for now */ /*
* IA-64 specific AGP definitions.
*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#define map_page_into_agp(page) /*
#define unmap_page_from_agp(page) * To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate
#define flush_agp_mappings() * in coherent mode, which lets us map the AGP memory as normal (write-back) memory
#define flush_agp_cache() mb() * (unlike x86, where it gets mapped "write-coalescing").
*/
#define map_page_into_agp(page) /* nothing */
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_mappings() /* nothing */
#define flush_agp_cache() mb()
#endif #endif /* _ASM_IA64_AGP_H */
...@@ -190,6 +190,9 @@ typedef void efi_reset_system_t (int reset_type, efi_status_t status, ...@@ -190,6 +190,9 @@ typedef void efi_reset_system_t (int reset_type, efi_status_t status,
#define SAL_SYSTEM_TABLE_GUID \ #define SAL_SYSTEM_TABLE_GUID \
EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
#define HCDP_TABLE_GUID \
EFI_GUID( 0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98 )
typedef struct { typedef struct {
efi_guid_t guid; efi_guid_t guid;
u64 table; u64 table;
...@@ -225,6 +228,7 @@ extern struct efi { ...@@ -225,6 +228,7 @@ extern struct efi {
void *smbios; /* SM BIOS table */ void *smbios; /* SM BIOS table */
void *sal_systab; /* SAL system table */ void *sal_systab; /* SAL system table */
void *boot_info; /* boot info table */ void *boot_info; /* boot info table */
void *hcdp; /* HCDP table */
efi_get_time_t *get_time; efi_get_time_t *get_time;
efi_set_time_t *set_time; efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time; efi_get_wakeup_time_t *get_wakeup_time;
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_ELF_H #define _ASM_IA64_ELF_H
/* /*
* ELF archtecture specific definitions. * ELF-specific definitions.
* *
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
......
...@@ -87,7 +87,12 @@ typedef union ia64_va { ...@@ -87,7 +87,12 @@ typedef union ia64_va {
#define REGION_SIZE REGION_NUMBER(1) #define REGION_SIZE REGION_NUMBER(1)
#define REGION_KERNEL 7 #define REGION_KERNEL 7
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; } while (0) #if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
# define ia64_abort() __builtin_trap()
#else
# define ia64_abort() (*(volatile int *) 0 = 0)
#endif
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
#define PAGE_BUG(page) do { BUG(); } while (0) #define PAGE_BUG(page) do { BUG(); } while (0)
static __inline__ int static __inline__ int
......
/* /*
* include/asm-ia64/serial.h * include/asm-ia64/serial.h
* *
* Derived from the i386 version. * Derived from the i386 version.
*/ */
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#else #else
#define RS_TABLE_SIZE #define RS_TABLE_SIZE
#endif #endif
/* /*
* The following define the access methods for the HUB6 card. All * The following define the access methods for the HUB6 card. All
* access is through two ports for all 24 possible chips. The card is * access is through two ports for all 24 possible chips. The card is
...@@ -115,21 +115,8 @@ ...@@ -115,21 +115,8 @@
#define HUB6_SERIAL_PORT_DFNS #define HUB6_SERIAL_PORT_DFNS
#endif #endif
#ifdef CONFIG_MCA
#define MCA_SERIAL_PORT_DFNS \
{ 0, BASE_BAUD, 0x3220, 3, STD_COM_FLAGS }, \
{ 0, BASE_BAUD, 0x3228, 3, STD_COM_FLAGS }, \
{ 0, BASE_BAUD, 0x4220, 3, STD_COM_FLAGS }, \
{ 0, BASE_BAUD, 0x4228, 3, STD_COM_FLAGS }, \
{ 0, BASE_BAUD, 0x5220, 3, STD_COM_FLAGS }, \
{ 0, BASE_BAUD, 0x5228, 3, STD_COM_FLAGS },
#else
#define MCA_SERIAL_PORT_DFNS
#endif
#define SERIAL_PORT_DFNS \ #define SERIAL_PORT_DFNS \
STD_SERIAL_PORT_DEFNS \ STD_SERIAL_PORT_DEFNS \
EXTRA_SERIAL_PORT_DEFNS \ EXTRA_SERIAL_PORT_DEFNS \
HUB6_SERIAL_PORT_DFNS \ HUB6_SERIAL_PORT_DFNS
MCA_SERIAL_PORT_DFNS
...@@ -123,13 +123,20 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) ...@@ -123,13 +123,20 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()]; mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()];
tlb->mm = mm; tlb->mm = mm;
tlb->nr = 0; /*
if (full_mm_flush || num_online_cpus() == 1) * Use fast mode if only 1 CPU is online.
/* *
* Use fast mode if only 1 CPU is online or if we're tearing down the * It would be tempting to turn on fast-mode for full_mm_flush as well. But this
* entire address space. * doesn't work because of speculative accesses and software prefetching: the page
*/ * table of "mm" may (and usually is) the currently active page table and even
tlb->nr = ~0U; * though the kernel won't do any user-space accesses during the TLB shoot down, a
* compiler might use speculation or lfetch.fault on what happens to be a valid
* user-space address. This in turn could trigger a TLB miss fault (or a VHPT
* walk) and re-insert a TLB entry we just removed. Slow mode avoids such
* problems. (We could make fast-mode work by switching the current task to a
* different "mm" during the shootdown.) --davidm 08/02/2002
*/
tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
tlb->fullmm = full_mm_flush; tlb->fullmm = full_mm_flush;
tlb->freed = 0; tlb->freed = 0;
tlb->start_addr = ~0UL; tlb->start_addr = ~0UL;
......
...@@ -60,6 +60,8 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) ...@@ -60,6 +60,8 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
#else #else
if (vma->vm_mm == current->active_mm) if (vma->vm_mm == current->active_mm)
asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory"); asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
else
vma->vm_mm->context = 0;
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment