Commit d831e5a2 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 160cc4eb 5209a2ab
......@@ -620,7 +620,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
cpumask_t tmp = CPU_MASK_NONE;
irq = virt_irq_to_real(irq_offset_down(virq));
if (irq == XICS_IPI)
if (irq == XICS_IPI || irq == NO_IRQ)
return;
status = rtas_call(ibm_get_xive, 1, 3, (void *)&xics_status, irq);
......
......@@ -358,13 +358,6 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
nsectors.all = (u16) rq->nr_sectors;
if (drive->using_tcq && idedisk_start_tag(drive, rq)) {
if (!ata_pending_commands(drive))
BUG();
return ide_started;
}
if (IDE_CONTROL_REG)
hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
......@@ -482,7 +475,7 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
((lba48) ? WIN_READ_EXT : WIN_READ));
ide_execute_command(drive, command, &read_intr, WAIT_CMD, NULL);
return ide_started;
} else if (rq_data_dir(rq) == WRITE) {
} else {
ide_startstop_t startstop;
#ifdef CONFIG_BLK_DEV_IDE_TCQ
if (blk_rq_tagged(rq))
......@@ -520,9 +513,6 @@ ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector
}
return ide_started;
}
blk_dump_rq_flags(rq, "__ide_do_rw_disk - bad command");
ide_end_request(drive, 0, 0);
return ide_stopped;
}
EXPORT_SYMBOL_GPL(__ide_do_rw_disk);
......@@ -539,26 +529,11 @@ static ide_startstop_t lba_48_rw_disk(ide_drive_t *, struct request *, unsigned
*/
ide_startstop_t __ide_do_rw_disk (ide_drive_t *drive, struct request *rq, sector_t block)
{
BUG_ON(drive->blocked);
if (!blk_fs_request(rq)) {
blk_dump_rq_flags(rq, "__ide_do_rw_disk - bad command");
ide_end_request(drive, 0, 0);
return ide_stopped;
}
/*
* 268435455 == 137439 MB or 28bit limit
*
* need to add split taskfile operations based on 28bit threshold.
*/
if (drive->using_tcq && idedisk_start_tag(drive, rq)) {
if (!ata_pending_commands(drive))
BUG();
return ide_started;
}
if (drive->addressing == 1) /* 48-bit LBA */
return lba_48_rw_disk(drive, rq, (unsigned long long) block);
if (drive->select.b.lba) /* 28-bit LBA */
......@@ -734,6 +709,21 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s
{
ide_hwif_t *hwif = HWIF(drive);
BUG_ON(drive->blocked);
if (!blk_fs_request(rq)) {
blk_dump_rq_flags(rq, "ide_do_rw_disk - bad command");
ide_end_request(drive, 0, 0);
return ide_stopped;
}
if (drive->using_tcq && idedisk_start_tag(drive, rq)) {
if (!ata_pending_commands(drive))
BUG();
return ide_started;
}
if (hwif->rw_disk)
return hwif->rw_disk(drive, rq, block);
else
......
......@@ -767,7 +767,7 @@ int ide_driveid_update (ide_drive_t *drive)
SELECT_MASK(drive, 1);
if (IDE_CONTROL_REG)
hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
ide_delay_50ms();
msleep(50);
hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
timeout = jiffies + WAIT_WORSTCASE;
do {
......@@ -775,9 +775,9 @@ int ide_driveid_update (ide_drive_t *drive)
SELECT_MASK(drive, 0);
return 0; /* drive timed-out */
}
ide_delay_50ms(); /* give drive a breather */
msleep(50); /* give drive a breather */
} while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT);
ide_delay_50ms(); /* wait for IRQ and DRQ_STAT */
msleep(50); /* wait for IRQ and DRQ_STAT */
if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) {
SELECT_MASK(drive, 0);
printk("%s: CHECK for good STATUS\n", drive->name);
......@@ -827,7 +827,7 @@ int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
u8 stat;
// while (HWGROUP(drive)->busy)
// ide_delay_50ms();
// msleep(50);
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->ide_dma_check) /* check if host supports DMA */
......
......@@ -283,9 +283,10 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
unsigned long timeout;
u8 s = 0, a = 0;
/* take a deep breath */
msleep(50);
if (IDE_CONTROL_REG) {
/* take a deep breath */
ide_delay_50ms();
a = hwif->INB(IDE_ALTSTATUS_REG);
s = hwif->INB(IDE_STATUS_REG);
if ((a ^ s) & ~INDEX_STAT) {
......@@ -297,10 +298,8 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
/* use non-intrusive polling */
hd_status = IDE_ALTSTATUS_REG;
}
} else {
ide_delay_50ms();
} else
hd_status = IDE_STATUS_REG;
}
/* set features register for atapi
* identify command to be sure of reply
......@@ -324,11 +323,11 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
return 1;
}
/* give drive a breather */
ide_delay_50ms();
msleep(50);
} while ((hwif->INB(hd_status)) & BUSY_STAT);
/* wait for IRQ and DRQ_STAT */
ide_delay_50ms();
msleep(50);
if (OK_STAT((hwif->INB(IDE_STATUS_REG)), DRQ_STAT, BAD_R_STAT)) {
unsigned long flags;
......@@ -457,15 +456,15 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
/* needed for some systems
* (e.g. crw9624 as drive0 with disk as slave)
*/
ide_delay_50ms();
msleep(50);
SELECT_DRIVE(drive);
ide_delay_50ms();
msleep(50);
if (hwif->INB(IDE_SELECT_REG) != drive->select.all && !drive->present) {
if (drive->select.b.unit != 0) {
/* exit with drive0 selected */
SELECT_DRIVE(&hwif->drives[0]);
/* allow BUSY_STAT to assert & clear */
ide_delay_50ms();
msleep(50);
}
/* no i/f present: mmm.. this should be a 4 -ml */
return 3;
......@@ -488,14 +487,14 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
printk("%s: no response (status = 0x%02x), "
"resetting drive\n", drive->name,
hwif->INB(IDE_STATUS_REG));
ide_delay_50ms();
msleep(50);
hwif->OUTB(drive->select.all, IDE_SELECT_REG);
ide_delay_50ms();
msleep(50);
hwif->OUTB(WIN_SRST, IDE_COMMAND_REG);
timeout = jiffies;
while (((hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) &&
time_before(jiffies, timeout + WAIT_WORSTCASE))
ide_delay_50ms();
msleep(50);
rc = try_to_identify(drive, cmd);
}
if (rc == 1)
......@@ -510,7 +509,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
if (drive->select.b.unit != 0) {
/* exit with drive0 selected */
SELECT_DRIVE(&hwif->drives[0]);
ide_delay_50ms();
msleep(50);
/* ensure drive irq is clear */
(void) hwif->INB(IDE_STATUS_REG);
}
......@@ -527,7 +526,7 @@ static void enable_nest (ide_drive_t *drive)
printk("%s: enabling %s -- ", hwif->name, drive->id->model);
SELECT_DRIVE(drive);
ide_delay_50ms();
msleep(50);
hwif->OUTB(EXABYTE_ENABLE_NEST, IDE_COMMAND_REG);
timeout = jiffies + WAIT_WORSTCASE;
do {
......@@ -535,10 +534,10 @@ static void enable_nest (ide_drive_t *drive)
printk("failed (timeout)\n");
return;
}
ide_delay_50ms();
msleep(50);
} while ((hwif->INB(IDE_STATUS_REG)) & BUSY_STAT);
ide_delay_50ms();
msleep(50);
if (!OK_STAT((hwif->INB(IDE_STATUS_REG)), 0, BAD_STAT)) {
printk("failed (status = 0x%02x)\n", hwif->INB(IDE_STATUS_REG));
......@@ -781,7 +780,7 @@ void probe_hwif (ide_hwif_t *hwif)
udelay(10);
hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
do {
ide_delay_50ms();
msleep(50);
stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
} while ((stat & BUSY_STAT) && time_after(timeout, jiffies));
......
......@@ -1387,26 +1387,6 @@ void ide_add_generic_settings (ide_drive_t *drive)
ide_add_setting(drive, "ide-scsi", SETTING_RW, -1, HDIO_SET_IDE_SCSI, TYPE_BYTE, 0, 1, 1, 1, &drive->scsi, ide_atapi_to_scsi);
}
/*
* Delay for *at least* 50ms. As we don't know how much time is left
* until the next tick occurs, we wait an extra tick to be safe.
* This is used only during the probing/polling for drives at boot time.
*
* However, its usefullness may be needed in other places, thus we export it now.
* The future may change this to a millisecond setable delay.
*/
void ide_delay_50ms (void)
{
#ifndef CONFIG_BLK_DEV_IDECS
mdelay(50);
#else
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1+HZ/20);
#endif /* CONFIG_BLK_DEV_IDECS */
}
EXPORT_SYMBOL(ide_delay_50ms);
int system_bus_clock (void)
{
return((int) ((!system_bus_speed) ? ide_system_bus_speed() : system_bus_speed ));
......
......@@ -282,8 +282,8 @@ int __init detect_pdc4030(ide_hwif_t *hwif)
hwif->OUTB(0xF3, IDE_SECTOR_REG);
hwif->OUTB(0x14, IDE_SELECT_REG);
hwif->OUTB(PROMISE_EXTENDED_COMMAND, IDE_COMMAND_REG);
ide_delay_50ms();
msleep(50);
if (hwif->INB(IDE_ERROR_REG) == 'P' &&
hwif->INB(IDE_NSECTOR_REG) == 'T' &&
......@@ -756,12 +756,6 @@ static ide_startstop_t promise_rw_disk (ide_drive_t *drive, struct request *rq,
BUG_ON(rq->nr_sectors > 127);
if (!blk_fs_request(rq)) {
blk_dump_rq_flags(rq, "promise_rw_disk - bad command");
DRIVER(drive)->end_request(drive, 0, 0);
return ide_stopped;
}
#ifdef DEBUG
printk(KERN_DEBUG "%s: %sing: LBAsect=%lu, sectors=%lu\n",
drive->name, rq_data_dir(rq) ? "writ" : "read",
......
......@@ -2,26 +2,38 @@
#define _ASM_GENERIC_PGTABLE_H
#ifndef __HAVE_ARCH_PTEP_ESTABLISH
#ifndef ptep_update_dirty_accessed
#define ptep_update_dirty_accessed(__ptep, __entry, __dirty) set_pte(__ptep, __entry)
#endif
/*
* Establish a new mapping:
* - flush the old one
* - update the page tables
* - inform the TLB about the new one
*
* We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
* We hold the mm semaphore for reading and vma->vm_mm->page_table_lock.
*
* Note: the old pte is known to not be writable, so we don't need to
* worry about dirty bits etc getting lost.
*/
#define ptep_establish(__vma, __address, __ptep, __entry, __dirty) \
do { \
ptep_update_dirty_accessed(__ptep, __entry, __dirty); \
#define ptep_establish(__vma, __address, __ptep, __entry) \
do { \
set_pte(__ptep, __entry); \
flush_tlb_page(__vma, __address); \
} while (0)
#endif
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
* Largely same as above, but only sets the access flags (dirty,
* accessed, and writable). Furthermore, we know it always gets set
* to a "more permissive" setting, which allows most architectures
* to optimize this.
*/
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
do { \
set_pte(__ptep, __entry); \
flush_tlb_page(__vma, __address); \
} while (0)
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(pte_t *ptep)
{
......
......@@ -325,9 +325,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* bit at the same time.
*/
#define update_mmu_cache(vma,address,pte) do { } while (0)
#define ptep_update_dirty_accessed(__ptep, __entry, __dirty) \
do { \
if (__dirty) set_pte(__ptep, __entry); \
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
do { \
if (__dirty) { \
(__ptep)->pte_low = (__entry).pte_low; \
flush_tlb_page(__vma, __address); \
} \
} while (0)
/* Encode and de-code a swap entry */
......
......@@ -548,6 +548,16 @@ static inline void ptep_mkdirty(pte_t *ptep)
pte_update(ptep, 0, _PAGE_DIRTY);
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
{
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
pte_update(ptep, 0, bits);
}
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
__ptep_set_access_flags(__ptep, __entry, __dirty)
/*
* Macro to mark a page protection value as "uncacheable".
*/
......
......@@ -306,7 +306,10 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr)
return old;
}
/* PTE updating functions */
/* PTE updating functions, this function puts the PTE in the
* batch, doesn't actually triggers the hash flush immediately,
* you need to call flush_tlb_pending() to do that.
*/
extern void hpte_update(pte_t *ptep, unsigned long pte, int wrprot);
static inline int ptep_test_and_clear_young(pte_t *ptep)
......@@ -318,7 +321,7 @@ static inline int ptep_test_and_clear_young(pte_t *ptep)
old = pte_update(ptep, _PAGE_ACCESSED);
if (old & _PAGE_HASHPTE) {
hpte_update(ptep, old, 0);
flush_tlb_pending(); /* XXX generic code doesn't flush */
flush_tlb_pending();
}
return (old & _PAGE_ACCESSED) != 0;
}
......@@ -396,11 +399,37 @@ static inline void pte_clear(pte_t * ptep)
*/
static inline void set_pte(pte_t *ptep, pte_t pte)
{
if (pte_present(*ptep))
if (pte_present(*ptep)) {
pte_clear(ptep);
flush_tlb_pending();
}
*ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS;
}
/* Set the dirty and/or accessed bits atomically in a linux PTE, this
* function doesn't need to flush the hash entry
*/
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
{
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
unsigned long old, tmp;
__asm__ __volatile__(
"1: ldarx %0,0,%4\n\
andi. %1,%0,%6\n\
bne- 1b \n\
or %0,%3,%0\n\
stdcx. %0,0,%4\n\
bne- 1b"
:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
:"r" (bits), "r" (ptep), "m" (ptep), "i" (_PAGE_BUSY)
:"cc");
}
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
__ptep_set_access_flags(__ptep, __entry, __dirty)
/*
* Macro to mark a page protection value as "uncacheable".
*/
......
......@@ -581,7 +581,7 @@ static inline void ptep_mkdirty(pte_t *ptep)
static inline void
ptep_establish(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
pte_t entry)
{
ptep_clear_flush(vma, address, ptep);
set_pte(ptep, entry);
......
......@@ -1477,7 +1477,6 @@ int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long);
int ide_task_ioctl(ide_drive_t *, unsigned int, unsigned long);
extern void ide_delay_50ms(void);
extern int system_bus_clock(void);
extern u8 ide_auto_reduce_xfer(ide_drive_t *);
......
......@@ -1004,7 +1004,7 @@ static inline void break_cow(struct vm_area_struct * vma, struct page * new_page
flush_cache_page(vma, address);
entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
vma);
ptep_establish(vma, address, page_table, entry, 1);
ptep_establish(vma, address, page_table, entry);
update_mmu_cache(vma, address, entry);
}
......@@ -1056,7 +1056,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
flush_cache_page(vma, address);
entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
vma);
ptep_establish(vma, address, page_table, entry, 1);
ptep_set_access_flags(vma, address, page_table, entry, 1);
update_mmu_cache(vma, address, entry);
pte_unmap(page_table);
spin_unlock(&mm->page_table_lock);
......@@ -1646,7 +1646,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
ptep_establish(vma, address, pte, entry, write_access);
ptep_set_access_flags(vma, address, pte, entry, write_access);
update_mmu_cache(vma, address, entry);
pte_unmap(pte);
spin_unlock(&mm->page_table_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment