Commit ff31dc0b authored by Linus Torvalds's avatar Linus Torvalds

v2.4.5.6 -> v2.4.5.7

  - Patrick Mochel: PCI documentation, and PM cleanups
  - mtd: export nand ECC functions
  - Jes Sorenson: acenic driver update
  - Alan Cox: fix vxfs merge boo-boo
  - me: undo page_launder() LRU changes, they have nasty side effects
  - wanrouter: fix error code
parent 29f279c7
......@@ -1993,6 +1993,13 @@ S: 1805 Marquette
S: Richardson, Texas 75081
S: USA
N: Patrick Mochel
E: mochel@transmeta.com
D: PCI Power Management, ACPI work
S: 3940 Freedom Circle
S: Santa Clara, CA 95054
S: USA
N: Eberhard Moenkeberg
E: emoenke@gwdg.de
D: CDROM driver "sbpcd" (Matsushita/Panasonic/Soundblaster)
......
This diff is collapsed.
......@@ -62,8 +62,13 @@ contains:
deregistration of the driver or when it's manually pulled
out of a hot-pluggable slot). This function always gets
called from process context, so it can sleep.
suspend, Power management hooks -- called when the device goes to
resume sleep or is resumed.
save_state Save a device's state before it's suspend.
suspend Put device into low power state.
resume Wake device from low power state.
enable_wake Enable device to generate wake events from a low power state.
(Please see Documentation/power/pci.txt for descriptions
of PCI Power Management and the related functions)
The ID table is an array of struct pci_device_id ending with a all-zero entry.
Each entry consists of:
......
This diff is collapsed.
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 6
EXTRAVERSION =-pre6
EXTRAVERSION =-pre7
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -63,7 +63,6 @@
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
#include <math.h>
#include <linux/vmalloc.h>
#include "iphase.h"
#include "suni.h"
......
#
# linux/drivers/nand/Makefile
#
# $Id: Makefile,v 1.3 2001/04/19 23:54:48 dwmw2 Exp $
# $Id: Makefile,v 1.4 2001/06/28 10:49:45 dwmw2 Exp $
O_TARGET := nandlink.o
export-objs := nand.o
export-objs := nand.o nand_ecc.o
obj-$(CONFIG_MTD_NAND) += nand.o
obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
......
......@@ -4,7 +4,7 @@
* Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
* Toshiba America Electronics Components, Inc.
*
* $Id: nand_ecc.c,v 1.4 2001/01/03 20:02:20 mgadbois Exp $
* $Id: nand_ecc.c,v 1.6 2001/06/28 10:52:26 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -15,11 +15,13 @@
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
/*
* Pre-calculated 256-way 1 byte column parity
*/
const u_char nand_ecc_precalc_table[] = {
static const u_char nand_ecc_precalc_table[] = {
0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00,
0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
......@@ -42,7 +44,7 @@ const u_char nand_ecc_precalc_table[] = {
/*
* Creates non-inverted ECC code from line parity
*/
void nand_trans_result(u_char reg2, u_char reg3,
static void nand_trans_result(u_char reg2, u_char reg3,
u_char *ecc_code)
{
u_char a, b, i, tmp1, tmp2;
......@@ -202,3 +204,6 @@ int nand_correct_data (u_char *dat, u_char *read_ecc, u_char *calc_ecc)
/* Should never happen */
return -1;
}
EXPORT_SYMBOL(nand_calculate_ecc);
EXPORT_SYMBOL(nand_correct_data);
......@@ -157,6 +157,9 @@ MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
#define __devinit __init
#endif
#ifndef min
#define min(a,b) (((a)<(b))?(a):(b))
#endif
#ifndef SMP_CACHE_BYTES
#define SMP_CACHE_BYTES L1_CACHE_BYTES
......@@ -260,6 +263,11 @@ static inline void tasklet_init(struct tasklet_struct *tasklet,
#define ace_if_down(dev) {do{} while(0);}
#endif
#ifndef pci_set_dma_mask
#define pci_set_dma_mask(dev, mask) dev->dma_mask = mask;
#endif
#if (LINUX_VERSION_CODE >= 0x02031b)
#define NEW_NETINIT
#define ACE_PROBE_ARG void
......@@ -510,7 +518,7 @@ static int tx_ratio[ACE_MAX_MOD_PARMS];
static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
static char version[] __initdata =
"acenic.c: v0.80 03/08/2001 Jes Sorensen, linux-acenic@SunSITE.dk\n"
"acenic.c: v0.81 04/20/2001 Jes Sorensen, linux-acenic@SunSITE.dk\n"
" http://home.cern.ch/~jes/gige/acenic.html\n";
static struct net_device *root_dev;
......@@ -740,7 +748,8 @@ int __devinit acenic_probe (ACE_PROBE_ARG)
}
MODULE_AUTHOR("Jes Sorensen <jes@linuxcare.com>");
#ifdef MODULE
MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
MODULE_PARM(link, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(trace, "1-" __MODULE_STRING(8) "i");
......@@ -748,12 +757,7 @@ MODULE_PARM(tx_coal_tick, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(max_tx_desc, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(rx_coal_tick, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(max_rx_desc, "1-" __MODULE_STRING(8) "i");
MODULE_PARM_DESC(link, "Acenic/3C985/NetGear link state");
MODULE_PARM_DESC(trace, "Acenic/3C985/NetGear firmware trace level");
MODULE_PARM_DESC(tx_coal_tick, "Acenic/3C985/NetGear maximum clock ticks to wait for packets");
MODULE_PARM_DESC(max_tx_desc, "Acenic/3C985/NetGear maximum number of transmit descriptors");
MODULE_PARM_DESC(rx_coal_tick, "Acenic/3C985/NetGear maximum clock ticks to wait for packets");
MODULE_PARM_DESC(max_rx_desc, "Acenic/3C985/NetGear maximum number of receive descriptors");
#endif
static void __exit ace_module_cleanup(void)
......@@ -1040,7 +1044,7 @@ static int __init ace_init(struct net_device *dev)
u32 tig_ver, mac1, mac2, tmp, pci_state;
int board_idx, ecode = 0;
short i;
unsigned char cache;
unsigned char cache_size;
ap = dev->priv;
regs = ap->regs;
......@@ -1173,14 +1177,19 @@ static int __init ace_init(struct net_device *dev)
* Ie. having two NICs in the machine, one will have the cache
* line set at boot time, the other will not.
*/
pci_read_config_byte(ap->pdev, PCI_CACHE_LINE_SIZE, &cache);
if ((cache << 2) != SMP_CACHE_BYTES) {
pci_read_config_byte(ap->pdev, PCI_CACHE_LINE_SIZE, &cache_size);
cache_size <<= 2;
if (cache_size != SMP_CACHE_BYTES) {
printk(KERN_INFO " PCI cache line size set incorrectly "
"(%i bytes) by BIOS/FW, correcting to %i\n",
(cache << 2), SMP_CACHE_BYTES);
"(%i bytes) by BIOS/FW, ", cache_size);
if (cache_size > SMP_CACHE_BYTES)
printk("expecting %i\n", SMP_CACHE_BYTES);
else {
printk("correcting to %i\n", SMP_CACHE_BYTES);
pci_write_config_byte(ap->pdev, PCI_CACHE_LINE_SIZE,
SMP_CACHE_BYTES >> 2);
}
}
pci_state = readl(&regs->PciState);
printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
......@@ -1189,6 +1198,12 @@ static int __init ace_init(struct net_device *dev)
(pci_state & PCI_66MHZ) ? 66 : 33,
ap->pci_latency);
/*
* Make sure to enable the 64 bit DMA mask if we're in a 64bit slot
*/
if (!(pci_state & PCI_32BIT))
pci_set_dma_mask(ap->pdev, (dma_addr_t)~0ULL);
/*
* Set the max DMA transfer size. Seems that for most systems
* the performance is better when no MAX parameter is
......@@ -1214,21 +1229,10 @@ static int __init ace_init(struct net_device *dev)
printk(KERN_INFO " Disabling PCI memory "
"write and invalidate\n");
}
#ifdef __alpha__
/* This maximizes throughput on my alpha. */
tmp |= DMA_WRITE_MAX_128;
#endif
} else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
printk(KERN_INFO " PCI memory write & invalidate "
"enabled by BIOS, enabling counter measures\n");
#ifdef __alpha__
/* All the docs sy MUST NOT. Well, I did.
* Nothing terrible happens, if we load wrong size.
* Bit w&i still works better!
*/
tmp |= DMA_WRITE_MAX_128;
#else
switch(SMP_CACHE_BYTES) {
case 16:
tmp |= DMA_WRITE_MAX_16;
......@@ -1239,6 +1243,9 @@ static int __init ace_init(struct net_device *dev)
case 64:
tmp |= DMA_WRITE_MAX_64;
break;
case 128:
tmp |= DMA_WRITE_MAX_128;
break;
default:
printk(KERN_INFO " Cache line size %i not "
"supported, PCI write and invalidate "
......@@ -1247,7 +1254,6 @@ static int __init ace_init(struct net_device *dev)
pci_write_config_word(ap->pdev, PCI_COMMAND,
ap->pci_command);
}
#endif
}
}
......@@ -1263,12 +1269,19 @@ static int __init ace_init(struct net_device *dev)
* set will give the PCI controller proper hints about
* prefetching.
*/
tmp = tmp & ~DMA_READ_WRITE_MASK;
tmp &= ~DMA_READ_WRITE_MASK;
tmp |= DMA_READ_MAX_64;
tmp |= DMA_WRITE_MAX_64;
#endif
#ifdef __alpha__
tmp &= ~DMA_READ_WRITE_MASK;
tmp |= DMA_READ_MAX_128;
/*
* All the docs sy MUST NOT. Well, I did.
* Nothing terrible happens, if we load wrong size.
* Bit w&i still works better!
*/
tmp |= DMA_WRITE_MAX_128;
#endif
writel(tmp, &regs->PciState);
......@@ -3322,6 +3335,6 @@ static int __init read_eeprom_byte(struct net_device *dev,
/*
* Local variables:
* compile-command: "gcc -D__KERNEL__ -DMODULE -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -DMODVERSIONS -include ../../include/linux/modversions.h -c -o acenic.o acenic.c"
* compile-command: "gcc -D__SMP__ -D__KERNEL__ -DMODULE -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -DMODVERSIONS -include ../../include/linux/modversions.h -c -o acenic.o acenic.c"
* End:
*/
......@@ -207,8 +207,8 @@ typedef struct {
/*
* udelay() values for when clocking the eeprom
*/
#define ACE_SHORT_DELAY 1
#define ACE_LONG_DELAY 2
#define ACE_SHORT_DELAY 2
#define ACE_LONG_DELAY 4
/*
......
......@@ -275,8 +275,9 @@ pci_set_power_state(struct pci_dev *dev, int state)
else if (state == 2 && !(pmc & PCI_PM_CAP_D2)) return -EIO;
}
/* If we're in D3, force entire word to 0, since we can't access the
* PCI config space for the device
/* If we're in D3, force entire word to 0.
* This doesn't affect PME_Status, disables PME_En, and
* sets PowerState to 0.
*/
if (dev->current_state == 3)
pmcsr = 0;
......@@ -369,10 +370,8 @@ pci_enable_device(struct pci_dev *dev)
* @dev: PCI device to be disabled
*
* Signal to the system that the PCI device is not in use by the system
* anymore. Currently this only involves disabling PCI busmastering,
* if active.
* anymore. This only involves disabling PCI bus-mastering, if active.
*/
void
pci_disable_device(struct pci_dev *dev)
{
......@@ -405,18 +404,23 @@ int pci_enable_wake(struct pci_dev *dev, u32 state, int enable)
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
if (!pm) return -EIO; /* this device cannot poweroff - up to bridge to cut power */
/* make sure device supports wake events (from any state) */
/* If device doesn't support PM Capabilities, but request is to disable
* wake events, it's a nop; otherwise fail */
if (!pm)
return enable ? -EIO : 0;
/* Check device's ability to generate PME# */
pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
if (!(value & PCI_PM_CAP_PME_MASK)) return -EINVAL; /* doesn't support wake events */
value &= PCI_PM_CAP_PME_MASK;
value >>= ffs(value); /* First bit of mask */
/*
* XXX - We're assuming that device can generate wake events from whatever
* state it may be entering.
* We're not actually checking what state we're going into to.
*/
/* Check if it can generate PME# from requested state. */
if (!value || !(value & (1 << state)))
return enable ? -EINVAL : 0;
/* Enable PME# Generation */
pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
if (enable) value |= PCI_PM_CTRL_PME_STATUS;
......@@ -1385,6 +1389,18 @@ struct pci_bus * __init pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata
* easily implement them (ie just have a suspend function that calls
* the pci_set_power_state() function).
*/
static int pci_pm_save_state_device(struct pci_dev *dev, u32 state)
{
int error = 0;
if (dev) {
struct pci_driver *driver = dev->driver;
if (driver && driver->save_state)
error = driver->save_state(dev,state);
}
return error;
}
static int pci_pm_suspend_device(struct pci_dev *dev, u32 state)
{
int error = 0;
......@@ -1407,7 +1423,21 @@ static int pci_pm_resume_device(struct pci_dev *dev)
return error;
}
/* take care to suspend/resume bridges only once */
static int pci_pm_save_state_bus(struct pci_bus *bus, u32 state)
{
struct list_head *list;
int error = 0;
list_for_each(list, &bus->children) {
error = pci_pm_save_state_bus(pci_bus_b(list),state);
if (error) return error;
}
list_for_each(list, &bus->devices) {
error = pci_pm_save_state_device(pci_dev_b(list),state);
if (error) return error;
}
return 0;
}
static int pci_pm_suspend_bus(struct pci_bus *bus, u32 state)
{
......@@ -1437,6 +1467,21 @@ static int pci_pm_resume_bus(struct pci_bus *bus)
return 0;
}
static int pci_pm_save_state(u32 state)
{
struct list_head *list;
struct pci_bus *bus;
int error = 0;
list_for_each(list, &pci_root_buses) {
bus = pci_bus_b(list);
error = pci_pm_save_state_bus(bus,state);
if (!error)
error = pci_pm_save_state_device(bus->self,state);
}
return error;
}
static int pci_pm_suspend(u32 state)
{
struct list_head *list;
......@@ -1466,15 +1511,23 @@ static int pci_pm_resume(void)
static int
pci_pm_callback(struct pm_dev *pm_device, pm_request_t rqst, void *data)
{
int error = 0;
switch (rqst) {
case PM_SAVE_STATE:
error = pci_pm_save_state((u32)data);
break;
case PM_SUSPEND:
return pci_pm_suspend((u32)data);
error = pci_pm_suspend((u32)data);
break;
case PM_RESUME:
return pci_pm_resume();
error = pci_pm_resume();
break;
default: break;
}
return 0;
return error;
}
#endif
/*
......
......@@ -47,7 +47,6 @@ extern struct address_space_operations vxfs_immed_aops;
extern struct inode_operations vxfs_immed_symlink_iops;
static struct file_operations vxfs_file_operations = {
.llseek = generic_file_llseek,
.read = generic_file_read,
.mmap = generic_file_mmap,
};
......
......@@ -478,6 +478,7 @@ struct pci_driver {
const struct pci_device_id *id_table; /* NULL if wants all devices */
int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
int (*save_state) (struct pci_dev *dev, u32 state); /* Save Device Context */
int (*suspend)(struct pci_dev *dev, u32 state); /* Device suspended */
int (*resume) (struct pci_dev *dev); /* Device woken up */
int (*enable_wake) (struct pci_dev *dev, u32 state, int enable); /* Enable wake event */
......@@ -647,6 +648,12 @@ static inline int scsi_to_pci_dma_dir(unsigned char scsi_dir) { return scsi_dir;
static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; }
static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; }
/* Power management related routines */
static inline int pci_save_state(struct pci_dev *dev, u32 *buffer) { return 0; }
static inline int pci_restore_state(struct pci_dev *dev, u32 *buffer) { return 0; }
static inline int pci_set_power_state(struct pci_dev *dev, int state) { return 0; }
static inline int pci_enable_wake(struct pci_dev *dev, u32 state, int enable) { return 0; }
#define pci_for_each_dev(dev) \
for(dev = NULL; 0; )
......
......@@ -34,6 +34,8 @@ enum
PM_SUSPEND, /* enter D1-D3 */
PM_RESUME, /* enter D0 */
PM_SAVE_STATE, /* save device's state */
/* enable wake-on */
PM_SET_WAKEUP,
......
......@@ -205,16 +205,6 @@ extern spinlock_t pagemap_lru_lock;
page->zone->inactive_dirty_pages++; \
}
/* Like the above, but add us after the bookmark. */
#define add_page_to_inactive_dirty_list_marker(page) { \
DEBUG_ADD_PAGE \
ZERO_PAGE_BUG \
SetPageInactiveDirty(page); \
list_add(&(page)->lru, marker_lru); \
nr_inactive_dirty_pages++; \
page->zone->inactive_dirty_pages++; \
}
#define add_page_to_inactive_clean_list(page) { \
DEBUG_ADD_PAGE \
ZERO_PAGE_BUG \
......
......@@ -426,61 +426,23 @@ struct page * reclaim_page(zone_t * zone)
#define MAX_LAUNDER (4 * (1 << page_cluster))
#define CAN_DO_IO (gfp_mask & __GFP_IO)
#define CAN_DO_BUFFERS (gfp_mask & __GFP_BUFFER)
#define marker_lru (&marker_page_struct.lru)
int page_launder(int gfp_mask, int sync)
{
static int cannot_free_pages;
int launder_loop, maxscan, cleaned_pages, maxlaunder;
struct list_head * page_lru;
struct page * page;
/* Our bookmark of where we are in the inactive_dirty list. */
struct page marker_page_struct = { zone: NULL };
launder_loop = 0;
maxlaunder = 0;
cleaned_pages = 0;
dirty_page_rescan:
spin_lock(&pagemap_lru_lock);
/*
* By not scanning all inactive dirty pages we'll write out
* really old dirty pages before evicting newer clean pages.
* This should cause some LRU behaviour if we have a large
* amount of inactive pages (due to eg. drop behind).
*
* It also makes us accumulate dirty pages until we have enough
* to be worth writing to disk without causing excessive disk
* seeks and eliminates the infinite penalty clean pages incurred
* vs. dirty pages.
*/
maxscan = nr_inactive_dirty_pages / 4;
if (launder_loop)
maxscan *= 2;
list_add_tail(marker_lru, &inactive_dirty_list);
for (;;) {
page_lru = marker_lru->prev;
if (page_lru == &inactive_dirty_list)
break;
if (--maxscan < 0)
break;
if (!free_shortage())
break;
maxscan = nr_inactive_dirty_pages;
while ((page_lru = inactive_dirty_list.prev) != &inactive_dirty_list &&
maxscan-- > 0) {
page = list_entry(page_lru, struct page, lru);
/* Move the bookmark backwards.. */
list_del(marker_lru);
list_add_tail(marker_lru, page_lru);
/* Don't waste CPU if chances are we cannot free anything. */
if (launder_loop && maxlaunder < 0 && cannot_free_pages)
break;
/* Skip other people's marker pages. */
if (!page->zone)
continue;
/* Wrong page on list?! (list corruption, should not happen) */
if (!PageInactiveDirty(page)) {
printk("VM: page_launder, wrong page on list.\n");
......@@ -501,9 +463,11 @@ int page_launder(int gfp_mask, int sync)
/*
* The page is locked. IO in progress?
* Skip the page, we'll take a look when it unlocks.
* Move it to the back of the list.
*/
if (TryLockPage(page)) {
list_del(page_lru);
list_add(page_lru, &inactive_dirty_list);
continue;
}
......@@ -517,8 +481,10 @@ int page_launder(int gfp_mask, int sync)
if (!writepage)
goto page_active;
/* First time through? Skip the page. */
/* First time through? Move it to the back of the list */
if (!launder_loop || !CAN_DO_IO) {
list_del(page_lru);
list_add(page_lru, &inactive_dirty_list);
UnlockPage(page);
continue;
}
......@@ -531,8 +497,6 @@ int page_launder(int gfp_mask, int sync)
writepage(page);
page_cache_release(page);
maxlaunder--;
/* And re-start the thing.. */
spin_lock(&pagemap_lru_lock);
continue;
......@@ -560,12 +524,13 @@ int page_launder(int gfp_mask, int sync)
spin_unlock(&pagemap_lru_lock);
/* Will we do (asynchronous) IO? */
if (launder_loop && maxlaunder-- == 0 && sync)
wait = 0; /* No IO */
if (launder_loop) {
if (maxlaunder == 0 && sync)
wait = 2; /* Synchrounous IO */
else if (launder_loop && maxlaunder > 0)
else if (maxlaunder-- > 0)
wait = 1; /* Async IO */
else
wait = 0; /* No IO */
}
/* Try to free the page buffers. */
clearedbuf = try_to_free_buffers(page, wait);
......@@ -579,7 +544,7 @@ int page_launder(int gfp_mask, int sync)
/* The buffers were not freed. */
if (!clearedbuf) {
add_page_to_inactive_dirty_list_marker(page);
add_page_to_inactive_dirty_list(page);
/* The page was only in the buffer cache. */
} else if (!page->mapping) {
......@@ -635,8 +600,6 @@ int page_launder(int gfp_mask, int sync)
UnlockPage(page);
}
}
/* Remove our marker. */
list_del(marker_lru);
spin_unlock(&pagemap_lru_lock);
/*
......@@ -652,29 +615,16 @@ int page_launder(int gfp_mask, int sync)
*/
if ((CAN_DO_IO || CAN_DO_BUFFERS) && !launder_loop && free_shortage()) {
launder_loop = 1;
/*
* If we, or the previous process running page_launder(),
* managed to free any pages we never do synchronous IO.
*/
if (cleaned_pages || !cannot_free_pages)
/* If we cleaned pages, never do synchronous IO. */
if (cleaned_pages)
sync = 0;
/* Else, do synchronous IO (if we are allowed to). */
else if (sync)
sync = 1;
/* We only do a few "out of order" flushes. */
maxlaunder = MAX_LAUNDER;
/* Let bdflush take care of the rest. */
/* Kflushd takes care of the rest. */
wakeup_bdflush(0);
goto dirty_page_rescan;
}
/*
* If we failed to free pages (because all pages are dirty)
* we remember this for the next time. This will prevent us
* from wasting too much CPU here.
*/
cannot_free_pages = !cleaned_pages;
/* Return the number of pages moved to the inactive_clean list. */
return cleaned_pages;
}
......@@ -902,7 +852,7 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user)
* list, so this is a relatively cheap operation.
*/
if (free_shortage()) {
ret += page_launder(gfp_mask, 1);
ret += page_launder(gfp_mask, user);
shrink_dcache_memory(DEF_PRIORITY, gfp_mask);
shrink_icache_memory(DEF_PRIORITY, gfp_mask);
}
......
......@@ -2633,7 +2633,7 @@ static int wanpipe_connect(struct socket *sock, struct sockaddr *uaddr, int addr
return -EINVAL;
if (sk->state == WANSOCK_CONNECTED)
return EISCONN; /* No reconnect on a seqpacket socket */
return -EISCONN; /* No reconnect on a seqpacket socket */
if (sk->state != WAN_DISCONNECTED){
printk(KERN_INFO "wansock: Trying to connect on channel NON DISCONNECT\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment