Commit 2a4a7e02 authored by Linus Torvalds's avatar Linus Torvalds

Automatic merge of master.kernel.org:/home/rmk/linux-2.6-rmk.git

parents 85bcc130 bfd4e070
......@@ -478,7 +478,7 @@ static int s3c2440_clk_add(struct sys_device *sysdev)
{
unsigned long upllcon = __raw_readl(S3C2410_UPLLCON);
s3c2440_clk_upll.rate = s3c2410_get_pll(upllcon, clk_xtal.rate) * 2;
s3c2440_clk_upll.rate = s3c2410_get_pll(upllcon, clk_xtal.rate);
printk("S3C2440: Clock Support, UPLL %ld.%03ld MHz\n",
print_mhz(s3c2440_clk_upll.rate));
......
......@@ -192,9 +192,11 @@ void __init s3c2440_map_io(struct map_desc *mach_desc, int size)
iotable_init(s3c2440_iodesc, ARRAY_SIZE(s3c2440_iodesc));
iotable_init(mach_desc, size);
/* rename any peripherals used differing from the s3c2410 */
s3c_device_i2c.name = "s3c2440-i2c";
s3c_device_i2c.name = "s3c2440-i2c";
s3c_device_nand.name = "s3c2440-nand";
/* change irq for watchdog */
......@@ -225,7 +227,7 @@ void __init s3c2440_init_clocks(int xtal)
break;
case S3C2440_CLKDIVN_HDIVN_2:
hdiv = 1;
hdiv = 2;
break;
case S3C2440_CLKDIVN_HDIVN_4_8:
......
......@@ -412,21 +412,20 @@ config CPU_BPREDICT_DISABLE
config TLS_REG_EMUL
bool
default y if (SMP || CPU_32v6) && (CPU_32v5 || CPU_32v4 || CPU_32v3)
default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)
help
We might be running on an ARMv6+ processor which should have the TLS
register but for some reason we can't use it, or maybe an SMP system
using a pre-ARMv6 processor (there are apparently a few prototypes
like that in existence) and therefore access to that register must
be emulated.
An SMP system using a pre-ARMv6 processor (there are apparently
a few prototypes like that in existence) and therefore access to
that required register must be emulated.
config HAS_TLS_REG
bool
depends on CPU_32v6
default y if !TLS_REG_EMUL
depends on !TLS_REG_EMUL
default y if SMP || CPU_32v7
help
This selects support for the CP15 thread register.
It is defined to be available on ARMv6 or later. If a particular
ARMv6 or later CPU doesn't support it then it must omc;ide "select
TLS_REG_EMUL" along with its other caracteristics.
It is defined to be available on some ARMv6 processors (including
all SMP capable ARMv6's) or later processors. User space may
assume directly accessing that register and always obtain the
expected value only on ARMv7 and above.
/*
* linux/arch/arm/lib/copy_page-armv4mc.S
*
* Copyright (C) 1995-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ASM optimised string functions
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/constants.h>
.text
.align 5
/*
* ARMv4 mini-dcache optimised copy_user_page
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
* own copy_user_page that does the right thing.
*/
ENTRY(v4_mc_copy_user_page)
stmfd sp!, {r4, lr} @ 2
mov r4, r0
mov r0, r1
bl map_page_minicache
mov r1, #PAGE_SZ/64 @ 1
ldmia r0!, {r2, r3, ip, lr} @ 4
1: mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line
stmia r4!, {r2, r3, ip, lr} @ 4
ldmia r0!, {r2, r3, ip, lr} @ 4+1
stmia r4!, {r2, r3, ip, lr} @ 4
ldmia r0!, {r2, r3, ip, lr} @ 4
mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line
stmia r4!, {r2, r3, ip, lr} @ 4
ldmia r0!, {r2, r3, ip, lr} @ 4
subs r1, r1, #1 @ 1
stmia r4!, {r2, r3, ip, lr} @ 4
ldmneia r0!, {r2, r3, ip, lr} @ 4
bne 1b @ 1
ldmfd sp!, {r4, pc} @ 3
.align 5
/*
* ARMv4 optimised clear_user_page
*
* Same story as above.
*/
ENTRY(v4_mc_clear_user_page)
str lr, [sp, #-4]!
mov r1, #PAGE_SZ/64 @ 1
mov r2, #0 @ 1
mov r3, #0 @ 1
mov ip, #0 @ 1
mov lr, #0 @ 1
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
subs r1, r1, #1 @ 1
bne 1b @ 1
ldr pc, [sp], #4
__INITDATA
.type v4_mc_user_fns, #object
ENTRY(v4_mc_user_fns)
.long v4_mc_clear_user_page
.long v4_mc_copy_user_page
.size v4_mc_user_fns, . - v4_mc_user_fns
/*
* linux/arch/arm/lib/copypage-armv4mc.S
*
* Copyright (C) 1995-2005 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This handles the mini data cache, as found on SA11x0 and XScale
* processors. When we copy a user page page, we map it in such a way
* that accesses to this page will not touch the main data cache, but
* will be cached in the mini data cache. This prevents us thrashing
* the main data cache on page faults.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
* 0xffff8000 to 0xffffffff is reserved for any ARM architecture
* specific hacks for copying pages efficiently.
*/
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_CACHEABLE)
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
static DEFINE_SPINLOCK(minicache_lock);
/*
* ARMv4 mini-dcache optimised copy_user_page
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
* own copy_user_page that does the right thing.
*/
static void __attribute__((naked))
mc_copy_user_page(void *from, void *to)
{
asm volatile(
"stmfd sp!, {r4, lr} @ 2\n\
mov r4, %2 @ 1\n\
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
subs r4, r4, #1 @ 1\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
bne 1b @ 1\n\
ldmfd sp!, {r4, pc} @ 3"
:
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
}
void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
spin_lock(&minicache_lock);
set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot));
flush_tlb_kernel_page(COPYPAGE_MINICACHE);
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
spin_unlock(&minicache_lock);
}
/*
* ARMv4 optimised clear_user_page
*/
void __attribute__((naked))
v4_mc_clear_user_page(void *kaddr, unsigned long vaddr)
{
asm volatile(
"str lr, [sp, #-4]!\n\
mov r1, %0 @ 1\n\
mov r2, #0 @ 1\n\
mov r3, #0 @ 1\n\
mov ip, #0 @ 1\n\
mov lr, #0 @ 1\n\
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
stmia r0!, {r2, r3, ip, lr} @ 4\n\
stmia r0!, {r2, r3, ip, lr} @ 4\n\
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
stmia r0!, {r2, r3, ip, lr} @ 4\n\
stmia r0!, {r2, r3, ip, lr} @ 4\n\
subs r1, r1, #1 @ 1\n\
bne 1b @ 1\n\
ldr pc, [sp], #4"
:
: "I" (PAGE_SIZE / 64));
}
struct cpu_user_fns v4_mc_user_fns __initdata = {
.cpu_clear_user_page = v4_mc_clear_user_page,
.cpu_copy_user_page = v4_mc_copy_user_page,
};
......@@ -26,8 +26,8 @@
#define to_address (0xffffc000)
#define to_pgprot PAGE_KERNEL
static pte_t *from_pte;
static pte_t *to_pte;
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
static DEFINE_SPINLOCK(v6_lock);
#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
......@@ -74,8 +74,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
*/
spin_lock(&v6_lock);
set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
from = from_address + (offset << PAGE_SHIFT);
to = to_address + (offset << PAGE_SHIFT);
......@@ -114,7 +114,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
*/
spin_lock(&v6_lock);
set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
flush_tlb_kernel_page(to);
clear_page((void *)to);
......@@ -129,21 +129,6 @@ struct cpu_user_fns v6_user_fns __initdata = {
static int __init v6_userpage_init(void)
{
if (cache_is_vipt_aliasing()) {
pgd_t *pgd;
pmd_t *pmd;
pgd = pgd_offset_k(from_address);
pmd = pmd_alloc(&init_mm, pgd, from_address);
if (!pmd)
BUG();
from_pte = pte_alloc_kernel(&init_mm, pmd, from_address);
if (!from_pte)
BUG();
to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
if (!to_pte)
BUG();
cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing;
}
......@@ -151,5 +136,4 @@ static int __init v6_userpage_init(void)
return 0;
}
__initcall(v6_userpage_init);
core_initcall(v6_userpage_init);
......@@ -13,6 +13,29 @@
#include <asm/cacheflush.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_CPU_CACHE_VIPT
#define ALIAS_FLUSH_START 0xffff4000
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
{
unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
flush_tlb_kernel_page(to);
asm( "mcrr p15, 0, %1, %0, c14\n"
" mcrr p15, 0, %1, %0, c5\n"
:
: "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
: "cc");
}
#else
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
#endif
static void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
......@@ -36,6 +59,18 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
if (!mapping)
return;
/*
* This is a page cache page. If we have a VIPT cache, we
* only need to do one flush - which would be at the relevant
* userspace colour, which is congruent with page->index.
*/
if (cache_is_vipt()) {
if (cache_is_vipt_aliasing())
flush_pfn_alias(page_to_pfn(page),
page->index << PAGE_CACHE_SHIFT);
return;
}
/*
* There are possible user space mappings of this page:
* - VIVT cache: we need to also write back and invalidate all user
......@@ -57,8 +92,6 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
if (cache_is_vipt())
break;
}
flush_dcache_mmap_unlock(mapping);
}
......
......@@ -37,6 +37,8 @@ pgprot_t pgprot_kernel;
EXPORT_SYMBOL(pgprot_kernel);
pmd_t *top_pmd;
struct cachepolicy {
const char policy[16];
unsigned int cr_mask;
......@@ -142,6 +144,16 @@ __setup("noalign", noalign_setup);
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
{
return pmd_offset(pgd, virt);
}
static inline pmd_t *pmd_off_k(unsigned long virt)
{
return pmd_off(pgd_offset_k(virt), virt);
}
/*
* need to get a 16k page for level 1
*/
......@@ -220,7 +232,7 @@ void free_pgd_slow(pgd_t *pgd)
return;
/* pgd is always present and good */
pmd = (pmd_t *)pgd;
pmd = pmd_off(pgd, 0);
if (pmd_none(*pmd))
goto free;
if (pmd_bad(*pmd)) {
......@@ -246,9 +258,8 @@ void free_pgd_slow(pgd_t *pgd)
static inline void
alloc_init_section(unsigned long virt, unsigned long phys, int prot)
{
pmd_t *pmdp;
pmd_t *pmdp = pmd_off_k(virt);
pmdp = pmd_offset(pgd_offset_k(virt), virt);
if (virt & (1 << 20))
pmdp++;
......@@ -283,11 +294,9 @@ alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
static inline void
alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
{
pmd_t *pmdp;
pmd_t *pmdp = pmd_off_k(virt);
pte_t *ptep;
pmdp = pmd_offset(pgd_offset_k(virt), virt);
if (pmd_none(*pmdp)) {
unsigned long pmdval;
ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
......@@ -310,7 +319,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
*/
static inline void clear_mapping(unsigned long virt)
{
pmd_clear(pmd_offset(pgd_offset_k(virt), virt));
pmd_clear(pmd_off_k(virt));
}
struct mem_types {
......@@ -578,7 +587,7 @@ void setup_mm_for_reboot(char mode)
PMD_TYPE_SECT;
if (cpu_arch <= CPU_ARCH_ARMv5)
pmdval |= PMD_BIT4;
pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
pmd = pmd_off(pgd, i << PGDIR_SHIFT);
pmd[0] = __pmd(pmdval);
pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
flush_pmd_entry(pmd);
......@@ -675,6 +684,8 @@ void __init memtable_init(struct meminfo *mi)
flush_cache_all();
flush_tlb_all();
top_pmd = pmd_off_k(VECTORS_HIGH);
}
/*
......
......@@ -227,6 +227,30 @@
#define PD31_PF_TMR2OUT ( GPIO_PORTD | GPIO_PF | 31 )
#define PD31_BIN_SPI2_TXD ( GPIO_PORTD | GPIO_BIN | 31 )
/*
* PWM controller
*/
#define PWMC __REG(IMX_PWM_BASE + 0x00) /* PWM Control Register */
#define PWMS __REG(IMX_PWM_BASE + 0x04) /* PWM Sample Register */
#define PWMP __REG(IMX_PWM_BASE + 0x08) /* PWM Period Register */
#define PWMCNT __REG(IMX_PWM_BASE + 0x0C) /* PWM Counter Register */
#define PWMC_HCTR (0x01<<18) /* Halfword FIFO Data Swapping */
#define PWMC_BCTR (0x01<<17) /* Byte FIFO Data Swapping */
#define PWMC_SWR (0x01<<16) /* Software Reset */
#define PWMC_CLKSRC (0x01<<15) /* Clock Source */
#define PWMC_PRESCALER(x) (((x-1) & 0x7F) << 8) /* PRESCALER */
#define PWMC_IRQ (0x01<< 7) /* Interrupt Request */
#define PWMC_IRQEN (0x01<< 6) /* Interrupt Request Enable */
#define PWMC_FIFOAV (0x01<< 5) /* FIFO Available */
#define PWMC_EN (0x01<< 4) /* Enables/Disables the PWM */
#define PWMC_REPEAT(x) (((x) & 0x03) << 2) /* Sample Repeats */
#define PWMC_CLKSEL(x) (((x) & 0x03) << 0) /* Clock Selection */
#define PWMS_SAMPLE(x) ((x) & 0xFFFF) /* Contains a two-sample word */
#define PWMP_PERIOD(x) ((x) & 0xFFFF) /* Represents the PWM's period */
#define PWMC_COUNTER(x) ((x) & 0xFFFF) /* Represents the current count value */
/*
* DMA Controller
*/
......
/* linux/include/asm-arm/arch-s3c2410/regs-nand.h
*
* Copyright (c) 2004 Simtec Electronics <linux@simtec.co.uk>
* Copyright (c) 2004,2005 Simtec Electronics <linux@simtec.co.uk>
* http://www.simtec.co.uk/products/SWLINUX/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* S3C2410 clock register definitions
* S3C2410 NAND register definitions
*
* Changelog:
* 18-Aug-2004 BJD Copied file from 2.4 and updated
* 01-May-2005 BJD Added definitions for s3c2440 controller
*/
#ifndef __ASM_ARM_REGS_NAND
......@@ -26,6 +27,22 @@
#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
#define S3C2410_NFECC S3C2410_NFREG(0x14)
#define S3C2440_NFCONT S3C2410_NFREG(0x04)
#define S3C2440_NFCMD S3C2410_NFREG(0x08)
#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
#define S3C2440_NFDATA S3C2410_NFREG(0x10)
#define S3C2440_NFECCD0 S3C2410_NFREG(0x14)
#define S3C2440_NFECCD1 S3C2410_NFREG(0x18)
#define S3C2440_NFECCD S3C2410_NFREG(0x1C)
#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
#define S3C2440_NFESTAT0 S3C2410_NFREG(0x24)
#define S3C2440_NFESTAT1 S3C2410_NFREG(0x28)
#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
#define S3C2440_NFMECC1 S3C2410_NFREG(0x30)
#define S3C2440_NFSECC S3C2410_NFREG(0x34)
#define S3C2440_NFSBLK S3C2410_NFREG(0x38)
#define S3C2440_NFEBLK S3C2410_NFREG(0x3C)
#define S3C2410_NFCONF_EN (1<<15)
#define S3C2410_NFCONF_512BYTE (1<<14)
#define S3C2410_NFCONF_4STEP (1<<13)
......@@ -37,7 +54,28 @@
#define S3C2410_NFSTAT_BUSY (1<<0)
/* think ECC can only be 8bit read? */
#define S3C2440_NFCONF_BUSWIDTH_8 (0<<0)
#define S3C2440_NFCONF_BUSWIDTH_16 (1<<0)
#define S3C2440_NFCONF_ADVFLASH (1<<3)
#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
#define S3C2440_NFCONT_LOCKTIGHT (1<<13)
#define S3C2440_NFCONT_SOFTLOCK (1<<12)
#define S3C2440_NFCONT_ILLEGALACC_EN (1<<10)
#define S3C2440_NFCONT_RNBINT_EN (1<<9)
#define S3C2440_NFCONT_RN_FALLING (1<<8)
#define S3C2440_NFCONT_SPARE_ECCLOCK (1<<6)
#define S3C2440_NFCONT_MAIN_ECCLOCK (1<<5)
#define S3C2440_NFCONT_INITECC (1<<4)
#define S3C2440_NFCONT_nFCE (1<<1)
#define S3C2440_NFCONT_ENABLE (1<<0)
#define S3C2440_NFSTAT_READY (1<<0)
#define S3C2440_NFSTAT_nCE (1<<1)
#define S3C2440_NFSTAT_RnB_CHANGE (1<<2)
#define S3C2440_NFSTAT_ILLEGAL_ACCESS (1<<3)
#endif /* __ASM_ARM_REGS_NAND */
......@@ -114,19 +114,8 @@ extern void __cpu_copy_user_page(void *to, const void *from,
unsigned long user);
#endif
#define clear_user_page(addr,vaddr,pg) \
do { \
preempt_disable(); \
__cpu_clear_user_page(addr, vaddr); \
preempt_enable(); \
} while (0)
#define copy_user_page(to,from,vaddr,pg) \
do { \
preempt_disable(); \
__cpu_copy_user_page(to, from, vaddr); \
preempt_enable(); \
} while (0)
#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
#define clear_page(page) memzero((void *)(page), PAGE_SIZE)
extern void copy_page(void *to, const void *from);
......@@ -171,6 +160,9 @@ typedef unsigned long pgprot_t;
#endif /* STRICT_MM_TYPECHECKS */
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
/* Pure 2^n version of get_order */
static inline int get_order(unsigned long size)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment