Commit cbd884c9 authored by Len Brown's avatar Len Brown

Merge intel.com:/home/lenb/bk/linux-2.6.2

into intel.com:/home/lenb/src/linux-acpi-test-2.6.2
parents 9da392c9 7508df7c
VERSION = 2 VERSION = 2
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 2 SUBLEVEL = 2
EXTRAVERSION =-rc3 EXTRAVERSION =
NAME=Feisty Dunnart
# *DOCUMENTATION* # *DOCUMENTATION*
# To see a list of typical targets execute "make help" # To see a list of typical targets execute "make help"
......
...@@ -121,7 +121,6 @@ void die_if_kernel(char *str, struct pt_regs *regs) ...@@ -121,7 +121,6 @@ void die_if_kernel(char *str, struct pt_regs *regs)
!(((unsigned long) rw) & 0x7)) { !(((unsigned long) rw) & 0x7)) {
printk("Caller[%08lx]", rw->ins[7]); printk("Caller[%08lx]", rw->ins[7]);
print_symbol(": %s\n", rw->ins[7]); print_symbol(": %s\n", rw->ins[7]);
printk("\n");
rw = (struct reg_window *)rw->ins[6]; rw = (struct reg_window *)rw->ins[6];
} }
} }
......
...@@ -392,7 +392,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -392,7 +392,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
if (pmd_present(*pmd) || !pmd_present(*pmd_k)) if (pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
pmd_val(*pmd) = pmd_val(*pmd_k); *pmd = *pmd_k;
return; return;
} }
} }
......
...@@ -51,26 +51,6 @@ ...@@ -51,26 +51,6 @@
#include <asm/btfixup.h> #include <asm/btfixup.h>
/*
* To support pagetables in highmem, Linux introduces APIs which
* return struct page* and generally manipulate page tables when
* they are not mapped into kernel space. Our hardware page tables
* are smaller than pages. We lump hardware tabes into big, page sized
* software tables.
*
* PMD_SHIFT determines the size of the area a second-level page table entry
* can map, and our pmd_t is 16 times larger than normal.
*/
#define SRMMU_PTRS_PER_PMD_SOFT 0x4 /* Each pmd_t contains 16 hard PTPs */
#define SRMMU_PTRS_PER_PTE_SOFT 0x400 /* 16 hard tables per 4K page */
#define SRMMU_PTE_SZ_SOFT 0x1000 /* same as above, in bytes */
#define SRMMU_PMD_SHIFT_SOFT 22
#define SRMMU_PMD_SIZE_SOFT (1UL << SRMMU_PMD_SHIFT_SOFT)
#define SRMMU_PMD_MASK_SOFT (~(SRMMU_PMD_SIZE_SOFT-1))
// #define SRMMU_PMD_ALIGN(addr) (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK)
enum mbus_module srmmu_modtype; enum mbus_module srmmu_modtype;
unsigned int hwbug_bitmask; unsigned int hwbug_bitmask;
int vac_cache_size; int vac_cache_size;
......
...@@ -56,8 +56,8 @@ ...@@ -56,8 +56,8 @@
#define DRV_MODULE_NAME "tg3" #define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": " #define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "2.5" #define DRV_MODULE_VERSION "2.6"
#define DRV_MODULE_RELDATE "December 22, 2003" #define DRV_MODULE_RELDATE "February 3, 2004"
#define TG3_DEF_MAC_MODE 0 #define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0 #define TG3_DEF_RX_MODE 0
...@@ -5904,7 +5904,8 @@ do { p = orig_p + (reg); \ ...@@ -5904,7 +5904,8 @@ do { p = orig_p + (reg); \
GET_REG32_LOOP(MSGINT_MODE, 0x0c); GET_REG32_LOOP(MSGINT_MODE, 0x0c);
GET_REG32_1(DMAC_MODE); GET_REG32_1(DMAC_MODE);
GET_REG32_LOOP(GRC_MODE, 0x4c); GET_REG32_LOOP(GRC_MODE, 0x4c);
GET_REG32_LOOP(NVRAM_CMD, 0x24); if (tp->tg3_flags & TG3_FLAG_NVRAM)
GET_REG32_LOOP(NVRAM_CMD, 0x24);
#undef __GET_REG32 #undef __GET_REG32
#undef GET_REG32_LOOP #undef GET_REG32_LOOP
...@@ -7190,26 +7191,33 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm ...@@ -7190,26 +7191,33 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
test_desc.addr_lo = buf_dma & 0xffffffff; test_desc.addr_lo = buf_dma & 0xffffffff;
test_desc.nic_mbuf = 0x00002100; test_desc.nic_mbuf = 0x00002100;
test_desc.len = size; test_desc.len = size;
/*
* HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
* the *second* time the tg3 driver was getting loaded after an
* initial scan.
*
* Broadcom tells me:
* ...the DMA engine is connected to the GRC block and a DMA
* reset may affect the GRC block in some unpredictable way...
* The behavior of resets to individual blocks has not been tested.
*
* Broadcom noted the GRC reset will also reset all sub-components.
*/
if (to_device) { if (to_device) {
test_desc.cqid_sqid = (13 << 8) | 2; test_desc.cqid_sqid = (13 << 8) | 2;
tw32(RDMAC_MODE, RDMAC_MODE_RESET);
tr32(RDMAC_MODE);
udelay(40);
tw32(RDMAC_MODE, RDMAC_MODE_ENABLE); tw32(RDMAC_MODE, RDMAC_MODE_ENABLE);
tr32(RDMAC_MODE); tr32(RDMAC_MODE);
udelay(40); udelay(40);
} else { } else {
test_desc.cqid_sqid = (16 << 8) | 7; test_desc.cqid_sqid = (16 << 8) | 7;
tw32(WDMAC_MODE, WDMAC_MODE_RESET);
tr32(WDMAC_MODE);
udelay(40);
tw32(WDMAC_MODE, WDMAC_MODE_ENABLE); tw32(WDMAC_MODE, WDMAC_MODE_ENABLE);
tr32(WDMAC_MODE); tr32(WDMAC_MODE);
udelay(40); udelay(40);
} }
test_desc.flags = 0x00000004; test_desc.flags = 0x00000005;
for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
u32 val; u32 val;
...@@ -7368,9 +7376,19 @@ static int __devinit tg3_test_dma(struct tg3 *tp) ...@@ -7368,9 +7376,19 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
/* Remove this if it causes problems for some boards. */ /* Remove this if it causes problems for some boards. */
tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
}
tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; /* On 5700/5701 chips, we need to set this bit.
* Otherwise the chip will issue cacheline transactions
* to streamable DMA memory with not all the byte
* enables turned on. This is an error on several
* RISC PCI controllers, in particular sparc64.
*
* On 5703/5704 chips, this bit has been reassigned
* a different meaning. In particular, it is used
* on those chips to enable a PCI-X workaround.
*/
tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
}
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
...@@ -7385,28 +7403,38 @@ static int __devinit tg3_test_dma(struct tg3 *tp) ...@@ -7385,28 +7403,38 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
goto out; goto out;
while (1) { while (1) {
u32 *p, i; u32 *p = buf, i;
p = buf;
for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
p[i] = i; p[i] = i;
/* Send the buffer to the chip. */ /* Send the buffer to the chip. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
if (ret) if (ret) {
printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
break; break;
}
p = buf; /* validate data reached card RAM correctly. */
for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
u32 val;
tg3_read_mem(tp, 0x2100 + (i*4), &val);
if (val != p[i]) {
printk( KERN_ERR " tg3_test_dma() Card buffer currupted on write! (%d != %d)\n", val, i);
/* ret = -ENODEV here? */
}
p[i] = 0; p[i] = 0;
}
/* Now read it back. */ /* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
if (ret) if (ret) {
printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
break; break;
}
/* Verify it. */ /* Verify it. */
p = buf;
for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
if (p[i] == i) if (p[i] == i)
continue; continue;
...@@ -7417,6 +7445,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) ...@@ -7417,6 +7445,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
break; break;
} else { } else {
printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
ret = -ENODEV; ret = -ENODEV;
goto out; goto out;
} }
......
...@@ -38,10 +38,12 @@ extern void kmap_init(void) __init; ...@@ -38,10 +38,12 @@ extern void kmap_init(void) __init;
/* /*
* Right now we initialize only a single pte table. It can be extended * Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical * easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM. * chunk of RAM. Currently the simplest way to do this is to align the
* pkmap region on a pagetable boundary (4MB).
*/ */
#define PKMAP_BASE (SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
#define LAST_PKMAP 1024 #define LAST_PKMAP 1024
#define PKMAP_SIZE (LAST_PKMAP << PAGE_SHIFT)
#define PKMAP_BASE SRMMU_PMD_ALIGN_SOFT(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
#define LAST_PKMAP_MASK (LAST_PKMAP - 1) #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
......
...@@ -36,6 +36,25 @@ ...@@ -36,6 +36,25 @@
#define SRMMU_PMD_TABLE_SIZE 0x100 /* 64 entries, 4 bytes a piece */ #define SRMMU_PMD_TABLE_SIZE 0x100 /* 64 entries, 4 bytes a piece */
#define SRMMU_PGD_TABLE_SIZE 0x400 /* 256 entries, 4 bytes a piece */ #define SRMMU_PGD_TABLE_SIZE 0x400 /* 256 entries, 4 bytes a piece */
/*
* To support pagetables in highmem, Linux introduces APIs which
* return struct page* and generally manipulate page tables when
* they are not mapped into kernel space. Our hardware page tables
* are smaller than pages. We lump hardware tabes into big, page sized
* software tables.
*
* PMD_SHIFT determines the size of the area a second-level page table entry
* can map, and our pmd_t is 16 times larger than normal.
*/
#define SRMMU_PTRS_PER_PTE_SOFT (PAGE_SIZE/4) /* 16 hard tables per 4K page */
#define SRMMU_PTRS_PER_PMD_SOFT 4 /* Each pmd_t contains 16 hard PTPs */
#define SRMMU_PTE_SZ_SOFT PAGE_SIZE /* same as above, in bytes */
#define SRMMU_PMD_SHIFT_SOFT 22
#define SRMMU_PMD_SIZE_SOFT (1UL << SRMMU_PMD_SHIFT_SOFT)
#define SRMMU_PMD_MASK_SOFT (~(SRMMU_PMD_SIZE_SOFT-1))
#define SRMMU_PMD_ALIGN_SOFT(addr) (((addr)+SRMMU_PMD_SIZE_SOFT-1)&SRMMU_PMD_MASK_SOFT)
/* Definition of the values in the ET field of PTD's and PTE's */ /* Definition of the values in the ET field of PTD's and PTE's */
#define SRMMU_ET_MASK 0x3 #define SRMMU_ET_MASK 0x3
#define SRMMU_ET_INVALID 0x0 #define SRMMU_ET_INVALID 0x0
......
...@@ -254,6 +254,15 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) ...@@ -254,6 +254,15 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
/* 'z' support added 23/7/1999 S.H. */ /* 'z' support added 23/7/1999 S.H. */
/* 'z' changed to 'Z' --davidm 1/25/99 */ /* 'z' changed to 'Z' --davidm 1/25/99 */
/* Reject out-of-range values early */
if (unlikely((int) size < 0)) {
/* There can be only one.. */
static int warn = 1;
WARN_ON(warn);
warn = 0;
return 0;
}
str = buf; str = buf;
end = buf + size - 1; end = buf + size - 1;
...@@ -498,7 +507,7 @@ EXPORT_SYMBOL(snprintf); ...@@ -498,7 +507,7 @@ EXPORT_SYMBOL(snprintf);
*/ */
int vsprintf(char *buf, const char *fmt, va_list args) int vsprintf(char *buf, const char *fmt, va_list args)
{ {
return vsnprintf(buf, 0xFFFFFFFFUL, fmt, args); return vsnprintf(buf, (~0U)>>1, fmt, args);
} }
EXPORT_SYMBOL(vsprintf); EXPORT_SYMBOL(vsprintf);
......
...@@ -414,6 +414,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) ...@@ -414,6 +414,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->nfmark = from->nfmark; to->nfmark = from->nfmark;
to->nfcache = from->nfcache; to->nfcache = from->nfcache;
/* Connection association is same as pre-frag packet */ /* Connection association is same as pre-frag packet */
nf_conntrack_put(to->nfct);
to->nfct = from->nfct; to->nfct = from->nfct;
nf_conntrack_get(to->nfct); nf_conntrack_get(to->nfct);
#ifdef CONFIG_BRIDGE_NETFILTER #ifdef CONFIG_BRIDGE_NETFILTER
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment