Commit c613bbba authored by Russell King's avatar Russell King Committed by Russell King

Merge branch 'mxc-pu-imxfb' of git://pasiphae.extern.pengutronix.de/git/imx/linux-2.6 into devel

parents cd434833 80eee6bc
......@@ -24,7 +24,7 @@ real bad - it changes the behaviour of all unaligned instructions in user
space, and might cause programs to fail unexpectedly.
To change the alignment trap behavior, simply echo a number into
/proc/sys/debug/alignment. The number is made up from various bits:
/proc/cpu/alignment. The number is made up from various bits:
bit behavior when set
--- -----------------
......
......@@ -1527,10 +1527,10 @@ W: http://ebtables.sourceforge.net/
S: Maintained
ECRYPT FILE SYSTEM
P: Mike Halcrow, Phillip Hellewell
M: mhalcrow@us.ibm.com, phillip@hellewell.homeip.net
L: ecryptfs-devel@lists.sourceforge.net
W: http://ecryptfs.sourceforge.net/
P: Tyler Hicks, Dustin Kirkland
M: tyhicks@linux.vnet.ibm.com, kirkland@canonical.com
L: ecryptfs-devel@lists.launchpad.net
W: https://launchpad.net/ecryptfs
S: Supported
EDAC-CORE
......@@ -1760,6 +1760,13 @@ L: linuxppc-dev@ozlabs.org
L: linux-i2c@vger.kernel.org
S: Maintained
FREESCALE IMX / MXC FRAMEBUFFER DRIVER
P: Sascha Hauer
M: kernel@pengutronix.de
L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
S: Maintained
FREESCALE SOC FS_ENET DRIVER
P: Pantelis Antoniou
M: pantelis.antoniou@gmail.com
......
......@@ -630,7 +630,7 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq)
return -ENOMEM;
sachip->clk = clk_get(me, "SA1111_CLK");
if (!sachip->clk) {
if (IS_ERR(sachip->clk)) {
ret = PTR_ERR(sachip->clk);
goto err_free;
}
......
......@@ -116,6 +116,8 @@ EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__strncpy_from_user);
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
......@@ -182,8 +184,6 @@ EXPORT_SYMBOL(_find_first_bit_be);
EXPORT_SYMBOL(_find_next_bit_be);
#endif
EXPORT_SYMBOL(copy_page);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(mcount);
#endif
......@@ -18,6 +18,7 @@
#include <linux/personality.h>
#include <linux/kallsyms.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/uaccess.h>
......
/*
* This structure describes the machine which we are running on.
*/
struct imxfb_mach_info {
#define PCR_TFT (1 << 31)
#define PCR_COLOR (1 << 30)
#define PCR_PBSIZ_1 (0 << 28)
#define PCR_PBSIZ_2 (1 << 28)
#define PCR_PBSIZ_4 (2 << 28)
#define PCR_PBSIZ_8 (3 << 28)
#define PCR_BPIX_1 (0 << 25)
#define PCR_BPIX_2 (1 << 25)
#define PCR_BPIX_4 (2 << 25)
#define PCR_BPIX_8 (3 << 25)
#define PCR_BPIX_12 (4 << 25)
#define PCR_BPIX_16 (4 << 25)
#define PCR_PIXPOL (1 << 24)
#define PCR_FLMPOL (1 << 23)
#define PCR_LPPOL (1 << 22)
#define PCR_CLKPOL (1 << 21)
#define PCR_OEPOL (1 << 20)
#define PCR_SCLKIDLE (1 << 19)
#define PCR_END_SEL (1 << 18)
#define PCR_END_BYTE_SWAP (1 << 17)
#define PCR_REV_VS (1 << 16)
#define PCR_ACD_SEL (1 << 15)
#define PCR_ACD(x) (((x) & 0x7f) << 8)
#define PCR_SCLK_SEL (1 << 7)
#define PCR_SHARP (1 << 6)
#define PCR_PCD(x) ((x) & 0x3f)
#define PWMR_CLS(x) (((x) & 0x1ff) << 16)
#define PWMR_LDMSK (1 << 15)
#define PWMR_SCR1 (1 << 10)
#define PWMR_SCR0 (1 << 9)
#define PWMR_CC_EN (1 << 8)
#define PWMR_PW(x) ((x) & 0xff)
#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26)
#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16)
#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8)
#define LSCR1_GRAY2(x) (((x) & 0xf) << 4)
#define LSCR1_GRAY1(x) (((x) & 0xf))
#define DMACR_BURST (1 << 31)
#define DMACR_HM(x) (((x) & 0xf) << 16)
#define DMACR_TM(x) ((x) & 0xf)
struct imx_fb_platform_data {
u_long pixclock;
u_short xres;
......@@ -34,4 +79,5 @@ struct imxfb_mach_info {
void (*lcd_power)(int);
void (*backlight_power)(int);
};
void set_imx_fb_info(struct imxfb_mach_info *hard_imx_fb_info);
void set_imx_fb_info(struct imx_fb_platform_data *);
......@@ -12,9 +12,8 @@ extern void clear_reset_status(unsigned int mask);
/**
* init_gpio_reset() - register GPIO as reset generator
*
* @gpio - gpio nr
* @output - set gpio as out/low instead of input during normal work
* @gpio: gpio nr
* @output: set gpio as out/low instead of input during normal work
*/
extern int init_gpio_reset(int gpio, int output);
......
......@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
......
......@@ -4,6 +4,7 @@ menu "HP Simulator drivers"
config HP_SIMETH
bool "Simulated Ethernet "
depends on NET
config HP_SIMSERIAL
bool "Simulated serial driver support"
......
......@@ -48,7 +48,7 @@ config RUNTIME_DEBUG
help
If you say Y here, some debugging macros will do run-time checking.
If you say N here, those macros will mostly turn to no-ops. See
include/asm-mips/debug.h for debuging macros.
arch/mips/include/asm/debug.h for debugging macros.
If unsure, say N.
endmenu
This diff is collapsed.
......@@ -35,6 +35,16 @@
mtc0 \reg, CP0_TCSTATUS
_ehb
.endm
#elif defined(CONFIG_CPU_MIPSR2)
.macro local_irq_enable reg=t0
ei
irq_enable_hazard
.endm
.macro local_irq_disable reg=t0
di
irq_disable_hazard
.endm
#else
.macro local_irq_enable reg=t0
mfc0 \reg, CP0_STATUS
......
......@@ -111,6 +111,7 @@ EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
plat_unmap_dma_mem(dma_handle);
free_pages((unsigned long) vaddr, get_order(size));
}
......@@ -121,6 +122,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
{
unsigned long addr = (unsigned long) vaddr;
plat_unmap_dma_mem(dma_handle);
if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);
......
......@@ -41,6 +41,7 @@ $(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
......
......@@ -479,6 +479,8 @@ _GLOBAL(_tlbil_pid)
* (no broadcast)
*/
_GLOBAL(_tlbil_va)
mfmsr r10
wrteei 0
slwi r4,r4,16
mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
tlbsx 0,r3
......@@ -490,6 +492,7 @@ _GLOBAL(_tlbil_va)
tlbwe
msync
isync
wrtee r10
blr
#endif /* CONFIG_FSL_BOOKE */
......
......@@ -223,9 +223,15 @@ struct pci_header {
} __attribute__((packed));
/* Function prototypes for bootstrapping */
#ifdef CONFIG_VMI
extern void vmi_init(void);
extern void vmi_activate(void);
extern void vmi_bringup(void);
extern void vmi_apply_boot_page_allocations(void);
#else
static inline void vmi_init(void) {}
static inline void vmi_activate(void) {}
static inline void vmi_bringup(void) {}
#endif
/* State needed to start an application processor in an SMP system. */
struct vmi_ap_state {
......
......@@ -794,6 +794,9 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Command line: %s\n", boot_command_line);
#endif
/* VMI may relocate the fixmap; do this before touching ioremap area */
vmi_init();
early_cpu_init();
early_ioremap_init();
......@@ -880,13 +883,8 @@ void __init setup_arch(char **cmdline_p)
check_efer();
#endif
#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
/*
* Must be before kernel pagetables are setup
* or fixmap area is touched.
*/
vmi_init();
#endif
/* Must be before kernel pagetables are setup */
vmi_activate();
/* after early param, so could get panic from serial */
reserve_early_setup_data();
......
......@@ -294,9 +294,7 @@ static void __cpuinit start_secondary(void *unused)
* fragile that we want to limit the things done here to the
* most necessary things.
*/
#ifdef CONFIG_VMI
vmi_bringup();
#endif
cpu_init();
preempt_disable();
smp_callin();
......
......@@ -960,8 +960,6 @@ static inline int __init activate_vmi(void)
void __init vmi_init(void)
{
unsigned long flags;
if (!vmi_rom)
probe_vmi_rom();
else
......@@ -973,13 +971,21 @@ void __init vmi_init(void)
reserve_top_address(-vmi_rom->virtual_top);
local_irq_save(flags);
activate_vmi();
#ifdef CONFIG_X86_IO_APIC
/* This is virtual hardware; timer routing is wired correctly */
no_timer_check = 1;
#endif
}
void vmi_activate(void)
{
unsigned long flags;
if (!vmi_rom)
return;
local_irq_save(flags);
activate_vmi();
local_irq_restore(flags & X86_EFLAGS_IF);
}
......
......@@ -824,12 +824,12 @@ static int __init toshiba_acpi_init(void)
toshiba_acpi_exit();
return -ENOMEM;
}
}
/* Register input device for kill switch */
toshiba_acpi.poll_dev = input_allocate_polled_device();
if (!toshiba_acpi.poll_dev) {
printk(MY_ERR "unable to allocate kill-switch input device\n");
printk(MY_ERR
"unable to allocate kill-switch input device\n");
toshiba_acpi_exit();
return -ENOMEM;
}
......@@ -839,18 +839,22 @@ static int __init toshiba_acpi_init(void)
toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
toshiba_acpi.poll_dev->input->id.vendor = 0x0930; /* Toshiba USB ID */
/* Toshiba USB ID */
toshiba_acpi.poll_dev->input->id.vendor = 0x0930;
set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
input_report_switch(toshiba_acpi.poll_dev->input, SW_RFKILL_ALL, TRUE);
input_report_switch(toshiba_acpi.poll_dev->input,
SW_RFKILL_ALL, TRUE);
input_sync(toshiba_acpi.poll_dev->input);
ret = input_register_polled_device(toshiba_acpi.poll_dev);
if (ret) {
printk(MY_ERR "unable to register kill-switch input device\n");
printk(MY_ERR
"unable to register kill-switch input device\n");
toshiba_acpi_exit();
return ret;
}
}
return 0;
}
......
......@@ -2081,10 +2081,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if (!q)
return -ENXIO;
rq = blk_get_request(q, READ, GFP_KERNEL);
if (!rq)
return -ENOMEM;
cdi->last_sense = 0;
while (nframes) {
......@@ -2096,9 +2092,17 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len = nr * CD_FRAMESIZE_RAW;
rq = blk_get_request(q, READ, GFP_KERNEL);
if (!rq) {
ret = -ENOMEM;
break;
}
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret)
if (ret) {
blk_put_request(rq);
break;
}
rq->cmd[0] = GPCMD_READ_CD;
rq->cmd[1] = 1 << 2;
......@@ -2124,6 +2128,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if (blk_rq_unmap_user(bio))
ret = -EFAULT;
blk_put_request(rq);
if (ret)
break;
......@@ -2133,7 +2138,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
ubuf += len;
}
blk_put_request(rq);
return ret;
}
......
......@@ -27,7 +27,7 @@
0x0c U+2640
0x0d U+266a
0x0e U+266b
0x0f U+263c
0x0f U+263c U+00a4
0x10 U+25b6 U+25ba
0x11 U+25c0 U+25c4
0x12 U+2195
......@@ -55,7 +55,7 @@
0x24 U+0024
0x25 U+0025
0x26 U+0026
0x27 U+0027
0x27 U+0027 U+00b4
0x28 U+0028
0x29 U+0029
0x2a U+002a
......@@ -84,7 +84,7 @@
0x41 U+0041 U+00c0 U+00c1 U+00c2 U+00c3
0x42 U+0042
0x43 U+0043 U+00a9
0x44 U+0044
0x44 U+0044 U+00d0
0x45 U+0045 U+00c8 U+00ca U+00cb
0x46 U+0046
0x47 U+0047
......@@ -140,7 +140,7 @@
0x79 U+0079 U+00fd
0x7a U+007a
0x7b U+007b
0x7c U+007c U+00a5
0x7c U+007c U+00a6
0x7d U+007d
0x7e U+007e
#
......@@ -263,10 +263,10 @@
0xe8 U+03a6 U+00d8
0xe9 U+0398
0xea U+03a9 U+2126
0xeb U+03b4
0xeb U+03b4 U+00f0
0xec U+221e
0xed U+03c6 U+00f8
0xee U+03b5
0xee U+03b5 U+2208
0xef U+2229
0xf0 U+2261
0xf1 U+00b1
......
......@@ -2274,7 +2274,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
continue; /* nothing to display */
}
/* Glyph not found */
if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) {
if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) {
/* In legacy mode use the glyph we get by a 1:1 mapping.
This would make absolutely no sense with Unicode in mind,
but do this for ASCII characters since a font may lack
......
......@@ -92,7 +92,7 @@ static void highlander_i2c_setup(struct highlander_i2c_dev *dev)
static void smbus_write_data(u8 *src, u16 *dst, int len)
{
for (; len > 1; len -= 2) {
*dst++ = be16_to_cpup((u16 *)src);
*dst++ = be16_to_cpup((__be16 *)src);
src += 2;
}
......@@ -103,7 +103,7 @@ static void smbus_write_data(u8 *src, u16 *dst, int len)
static void smbus_read_data(u16 *src, u8 *dst, int len)
{
for (; len > 1; len -= 2) {
*(u16 *)dst = cpu_to_be16p(src++);
*(__be16 *)dst = cpu_to_be16p(src++);
dst += 2;
}
......
......@@ -486,7 +486,7 @@ static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
if (cmd->type == MSP_TWI_CMD_WRITE ||
cmd->type == MSP_TWI_CMD_WRITE_READ) {
__be64 tmp = cpu_to_be64p((u64 *)cmd->write_data);
u64 tmp = be64_to_cpup((__be64 *)cmd->write_data);
tmp >>= (MSP_MAX_BYTES_PER_RW - cmd->write_len) * 8;
dev_dbg(&pmcmsptwi_adapter.dev, "Writing 0x%016llx\n", tmp);
pmcmsptwi_writel(tmp & 0x00000000ffffffffLL,
......
......@@ -1893,12 +1893,17 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl |= E1000_CTRL_PHY_RST;
}
ret_val = e1000_acquire_swflag_ich8lan(hw);
/* Whether or not the swflag was acquired, we need to reset the part */
hw_dbg(hw, "Issuing a global reset to ich8lan");
ew32(CTRL, (ctrl | E1000_CTRL_RST));
msleep(20);
/* release the swflag because it is not reset by hardware reset */
if (!ret_val) {
/* release the swflag because it is not reset by
* hardware reset
*/
e1000_release_swflag_ich8lan(hw);
}
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val) {
......
......@@ -1142,6 +1142,70 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
static void gem_pcs_reset(struct gem *gp)
{
int limit;
u32 val;
/* Reset PCS unit. */
val = readl(gp->regs + PCS_MIICTRL);
val |= PCS_MIICTRL_RST;
writel(val, gp->regs + PCS_MIICTRL);
limit = 32;
while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
udelay(100);
if (limit-- <= 0)
break;
}
if (limit <= 0)
printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
gp->dev->name);
}
static void gem_pcs_reinit_adv(struct gem *gp)
{
u32 val;
/* Make sure PCS is disabled while changing advertisement
* configuration.
*/
val = readl(gp->regs + PCS_CFG);
val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
writel(val, gp->regs + PCS_CFG);
/* Advertise all capabilities except assymetric
* pause.
*/
val = readl(gp->regs + PCS_MIIADV);
val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
PCS_MIIADV_SP | PCS_MIIADV_AP);
writel(val, gp->regs + PCS_MIIADV);
/* Enable and restart auto-negotiation, disable wrapback/loopback,
* and re-enable PCS.
*/
val = readl(gp->regs + PCS_MIICTRL);
val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
val &= ~PCS_MIICTRL_WB;
writel(val, gp->regs + PCS_MIICTRL);
val = readl(gp->regs + PCS_CFG);
val |= PCS_CFG_ENABLE;
writel(val, gp->regs + PCS_CFG);
/* Make sure serialink loopback is off. The meaning
* of this bit is logically inverted based upon whether
* you are in Serialink or SERDES mode.
*/
val = readl(gp->regs + PCS_SCTRL);
if (gp->phy_type == phy_serialink)
val &= ~PCS_SCTRL_LOOP;
else
val |= PCS_SCTRL_LOOP;
writel(val, gp->regs + PCS_SCTRL);
}
#define STOP_TRIES 32
/* Must be invoked under gp->lock and gp->tx_lock. */
......@@ -1168,6 +1232,9 @@ static void gem_reset(struct gem *gp)
if (limit <= 0)
printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
gem_pcs_reinit_adv(gp);
}
/* Must be invoked under gp->lock and gp->tx_lock. */
......@@ -1324,7 +1391,7 @@ static int gem_set_link_modes(struct gem *gp)
gp->phy_type == phy_serdes) {
u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
if (pcs_lpa & PCS_MIIADV_FD)
if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
full_duplex = 1;
speed = SPEED_1000;
}
......@@ -1488,6 +1555,9 @@ static void gem_link_timer(unsigned long data)
val = readl(gp->regs + PCS_MIISTAT);
if ((val & PCS_MIISTAT_LS) != 0) {
if (gp->lstate == link_up)
goto restart;
gp->lstate = link_up;
netif_carrier_on(gp->dev);
(void)gem_set_link_modes(gp);
......@@ -1708,61 +1778,8 @@ static void gem_init_phy(struct gem *gp)
if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
gp->phy_mii.def->ops->init(&gp->phy_mii);
} else {
u32 val;
int limit;
/* Reset PCS unit. */
val = readl(gp->regs + PCS_MIICTRL);
val |= PCS_MIICTRL_RST;
writel(val, gp->regs + PCS_MIICTRL);
limit = 32;
while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
udelay(100);
if (limit-- <= 0)
break;
}
if (limit <= 0)
printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
gp->dev->name);
/* Make sure PCS is disabled while changing advertisement
* configuration.
*/
val = readl(gp->regs + PCS_CFG);
val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
writel(val, gp->regs + PCS_CFG);
/* Advertise all capabilities except assymetric
* pause.
*/
val = readl(gp->regs + PCS_MIIADV);
val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
PCS_MIIADV_SP | PCS_MIIADV_AP);
writel(val, gp->regs + PCS_MIIADV);
/* Enable and restart auto-negotiation, disable wrapback/loopback,
* and re-enable PCS.
*/
val = readl(gp->regs + PCS_MIICTRL);
val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
val &= ~PCS_MIICTRL_WB;
writel(val, gp->regs + PCS_MIICTRL);
val = readl(gp->regs + PCS_CFG);
val |= PCS_CFG_ENABLE;
writel(val, gp->regs + PCS_CFG);
/* Make sure serialink loopback is off. The meaning
* of this bit is logically inverted based upon whether
* you are in Serialink or SERDES mode.
*/
val = readl(gp->regs + PCS_SCTRL);
if (gp->phy_type == phy_serialink)
val &= ~PCS_SCTRL_LOOP;
else
val |= PCS_SCTRL_LOOP;
writel(val, gp->regs + PCS_SCTRL);
gem_pcs_reset(gp);
gem_pcs_reinit_adv(gp);
}
/* Default aneg parameters */
......@@ -2680,6 +2697,21 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = 0;
cmd->duplex = cmd->port = cmd->phy_address =
cmd->transceiver = cmd->autoneg = 0;
/* serdes means usually a Fibre connector, with most fixed */
if (gp->phy_type == phy_serdes) {
cmd->port = PORT_FIBRE;
cmd->supported = (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE | SUPPORTED_Autoneg |
SUPPORTED_Pause | SUPPORTED_Asym_Pause);
cmd->advertising = cmd->supported;
cmd->transceiver = XCVR_INTERNAL;
if (gp->lstate == link_up)
cmd->speed = SPEED_1000;
cmd->duplex = DUPLEX_FULL;
cmd->autoneg = 1;
}
}
cmd->maxtxpkt = cmd->maxrxpkt = 0;
......
......@@ -334,6 +334,6 @@ static void __exit bfin_cf_exit(void)
module_init(bfin_cf_init);
module_exit(bfin_cf_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>")
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver");
MODULE_LICENSE("GPL");
......@@ -107,6 +107,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
struct request *req;
int ret;
retry:
req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
if (!req)
return SCSI_DH_RES_TEMP_UNAVAIL;
......@@ -121,7 +122,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
req->sense_len = 0;
retry:
ret = blk_execute_rq(req->q, NULL, req, 1);
if (ret == -EIO) {
if (req->sense_len > 0) {
......@@ -136,8 +136,10 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
h->path_state = HP_SW_PATH_ACTIVE;
ret = SCSI_DH_OK;
}
if (ret == SCSI_DH_IMM_RETRY)
if (ret == SCSI_DH_IMM_RETRY) {
blk_put_request(req);
goto retry;
}
if (ret == SCSI_DH_DEV_OFFLINED) {
h->path_state = HP_SW_PATH_PASSIVE;
ret = SCSI_DH_OK;
......@@ -200,6 +202,7 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
struct request *req;
int ret, retry;
retry:
req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
if (!req)
return SCSI_DH_RES_TEMP_UNAVAIL;
......@@ -216,7 +219,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
req->sense_len = 0;
retry = h->retries;
retry:
ret = blk_execute_rq(req->q, NULL, req, 1);
if (ret == -EIO) {
if (req->sense_len > 0) {
......@@ -231,8 +233,10 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
ret = SCSI_DH_OK;
if (ret == SCSI_DH_RETRY) {
if (--retry)
if (--retry) {
blk_put_request(req);
goto retry;
}
ret = SCSI_DH_IO;
}
......
This diff is collapsed.
......@@ -66,7 +66,7 @@ static int radeon_bl_update_status(struct backlight_device *bd)
level = bd->props.brightness;
del_timer_sync(&rinfo->lvds_timer);
radeon_engine_idle(rinfo);
radeon_engine_idle();
lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
if (level > 0) {
......
......@@ -282,8 +282,6 @@ static int backlight = 1;
static int backlight = 0;
#endif
int accel_cexp = 0;
/*
* prototypes
*/
......@@ -854,6 +852,7 @@ static int radeonfb_pan_display (struct fb_var_screeninfo *var,
if (rinfo->asleep)
return 0;
radeon_fifo_wait(2);
OUTREG(CRTC_OFFSET, ((var->yoffset * var->xres_virtual + var->xoffset)
* var->bits_per_pixel / 8) & ~7);
return 0;
......@@ -883,6 +882,7 @@ static int radeonfb_ioctl (struct fb_info *info, unsigned int cmd,
if (rc)
return rc;
radeon_fifo_wait(2);
if (value & 0x01) {
tmp = INREG(LVDS_GEN_CNTL);
......@@ -940,7 +940,7 @@ int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch)
if (rinfo->lock_blank)
return 0;
radeon_engine_idle(rinfo);
radeon_engine_idle();
val = INREG(CRTC_EXT_CNTL);
val &= ~(CRTC_DISPLAY_DIS | CRTC_HSYNC_DIS |
......@@ -1074,6 +1074,8 @@ static int radeon_setcolreg (unsigned regno, unsigned red, unsigned green,
pindex = regno;
if (!rinfo->asleep) {
radeon_fifo_wait(9);
if (rinfo->bpp == 16) {
pindex = regno * 8;
......@@ -1242,6 +1244,8 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
{
int i;
radeon_fifo_wait(20);
/* Workaround from XFree */
if (rinfo->is_mobility) {
/* A temporal workaround for the occational blanking on certain laptop
......@@ -1337,7 +1341,7 @@ static void radeon_lvds_timer_func(unsigned long data)
{
struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
radeon_engine_idle(rinfo);
radeon_engine_idle();
OUTREG(LVDS_GEN_CNTL, rinfo->pending_lvds_gen_cntl);
}
......@@ -1355,11 +1359,10 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
if (nomodeset)
return;
radeon_engine_idle(rinfo);
if (!regs_only)
radeon_screen_blank(rinfo, FB_BLANK_NORMAL, 0);
radeon_fifo_wait(31);
for (i=0; i<10; i++)
OUTREG(common_regs[i].reg, common_regs[i].val);
......@@ -1387,6 +1390,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
radeon_write_pll_regs(rinfo, mode);
if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
radeon_fifo_wait(10);
OUTREG(FP_CRTC_H_TOTAL_DISP, mode->fp_crtc_h_total_disp);
OUTREG(FP_CRTC_V_TOTAL_DISP, mode->fp_crtc_v_total_disp);
OUTREG(FP_H_SYNC_STRT_WID, mode->fp_h_sync_strt_wid);
......@@ -1401,6 +1405,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
if (!regs_only)
radeon_screen_blank(rinfo, FB_BLANK_UNBLANK, 0);
radeon_fifo_wait(2);
OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl);
return;
......@@ -1551,7 +1556,7 @@ static int radeonfb_set_par(struct fb_info *info)
/* We always want engine to be idle on a mode switch, even
* if we won't actually change the mode
*/
radeon_engine_idle(rinfo);
radeon_engine_idle();
hSyncStart = mode->xres + mode->right_margin;
hSyncEnd = hSyncStart + mode->hsync_len;
......@@ -1846,6 +1851,7 @@ static int radeonfb_set_par(struct fb_info *info)
return 0;
}
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = radeonfb_check_var,
......@@ -1869,7 +1875,6 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
info->par = rinfo;
info->pseudo_palette = rinfo->pseudo_palette;
info->flags = FBINFO_DEFAULT
| FBINFO_HWACCEL_IMAGEBLIT
| FBINFO_HWACCEL_COPYAREA
| FBINFO_HWACCEL_FILLRECT
| FBINFO_HWACCEL_XPAN
......@@ -1877,7 +1882,6 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
info->fbops = &radeonfb_ops;
info->screen_base = rinfo->fb_base;
info->screen_size = rinfo->mapped_vram;
/* Fill fix common fields */
strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
info->fix.smem_start = rinfo->fb_base_phys;
......@@ -1892,25 +1896,8 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
info->fix.mmio_len = RADEON_REGSIZE;
info->fix.accel = FB_ACCEL_ATI_RADEON;
/* Allocate colormap */
fb_alloc_cmap(&info->cmap, 256, 0);
/* Setup pixmap used for acceleration */
#define PIXMAP_SIZE (2048 * 4)
info->pixmap.addr = kmalloc(PIXMAP_SIZE, GFP_KERNEL);
if (!info->pixmap.addr) {
printk(KERN_ERR "radeonfb: Failed to allocate pixmap !\n");
noaccel = 1;
goto bail;
}
info->pixmap.size = PIXMAP_SIZE;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 4;
info->pixmap.buf_align = 4;
info->pixmap.access_align = 32;
bail:
if (noaccel)
info->flags |= FBINFO_HWACCEL_DISABLED;
......@@ -2019,6 +2006,7 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
u32 tom = INREG(NB_TOM);
tmp = ((((tom >> 16) - (tom & 0xffff) + 1) << 6) * 1024);
radeon_fifo_wait(6);
OUTREG(MC_FB_LOCATION, tom);
OUTREG(DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
OUTREG(CRTC2_DISPLAY_BASE_ADDR, (tom & 0xffff) << 16);
......@@ -2522,8 +2510,6 @@ static int __init radeonfb_setup (char *options)
} else if (!strncmp(this_opt, "ignore_devlist", 14)) {
ignore_devlist = 1;
#endif
} else if (!strncmp(this_opt, "accel_cexp", 12)) {
accel_cexp = 1;
} else
mode_option = this_opt;
}
......@@ -2571,8 +2557,6 @@ module_param(monitor_layout, charp, 0);
MODULE_PARM_DESC(monitor_layout, "Specify monitor mapping (like XFree86)");
module_param(force_measure_pll, bool, 0);
MODULE_PARM_DESC(force_measure_pll, "Force measurement of PLL (debug)");
module_param(accel_cexp, bool, 0);
MODULE_PARM_DESC(accel_cexp, "Use acceleration engine for color expansion");
#ifdef CONFIG_MTRR
module_param(nomtrr, bool, 0);
MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers");
......
......@@ -2653,9 +2653,9 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
if (!(info->flags & FBINFO_HWACCEL_DISABLED)) {
/* Make sure engine is reset */
radeon_engine_idle(rinfo);
radeon_engine_idle();
radeonfb_engine_reset(rinfo);
radeon_engine_idle(rinfo);
radeon_engine_idle();
}
/* Blank display and LCD */
......@@ -2767,7 +2767,7 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
rinfo->asleep = 0;
} else
radeon_engine_idle(rinfo);
radeon_engine_idle();
/* Restore display & engine */
radeon_write_mode (rinfo, &rinfo->state, 1);
......
......@@ -336,15 +336,7 @@ struct radeonfb_info {
int mon2_type;
u8 *mon2_EDID;
/* accel bits */
u32 dp_gui_mc_base;
u32 dp_gui_mc_cache;
u32 dp_cntl_cache;
u32 dp_brush_fg_cache;
u32 dp_brush_bg_cache;
u32 dp_src_fg_cache;
u32 dp_src_bg_cache;
u32 fifo_free;
u32 dp_gui_master_cntl;
struct pll_info pll;
......@@ -356,7 +348,6 @@ struct radeonfb_info {
int lock_blank;
int dynclk;
int no_schedule;
int gfx_mode;
enum radeon_pm_mode pm_mode;
reinit_function_ptr reinit_func;
......@@ -401,14 +392,8 @@ static inline void _radeon_msleep(struct radeonfb_info *rinfo, unsigned long ms)
#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr)
#define INREG16(addr) readw((rinfo->mmio_base)+addr)
#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr)
#ifdef CONFIG_PPC
#define INREG(addr) ({ eieio(); ld_le32(rinfo->mmio_base+(addr)); })
#define OUTREG(addr,val) do { eieio(); st_le32(rinfo->mmio_base+(addr),(val)); } while(0)
#else
#define INREG(addr) readl((rinfo->mmio_base)+addr)
#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr)
#endif
static inline void _OUTREGP(struct radeonfb_info *rinfo, u32 addr,
u32 val, u32 mask)
......@@ -550,7 +535,17 @@ static inline u32 radeon_get_dstbpp(u16 depth)
* 2D Engine helper routines
*/
extern void radeon_fifo_update_and_wait(struct radeonfb_info *rinfo, int entries);
static inline void _radeon_fifo_wait(struct radeonfb_info *rinfo, int entries)
{
int i;
for (i=0; i<2000000; i++) {
if ((INREG(RBBM_STATUS) & 0x7f) >= entries)
return;
udelay(1);
}
printk(KERN_ERR "radeonfb: FIFO Timeout !\n");
}
static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
{
......@@ -563,7 +558,7 @@ static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
/* Ensure FIFO is empty, ie, make sure the flush commands
* has reached the cache
*/
radeon_fifo_update_and_wait(rinfo, 64);
_radeon_fifo_wait (rinfo, 64);
/* Wait for the flush to complete */
for (i=0; i < 2000000; i++) {
......@@ -575,12 +570,12 @@ static inline void radeon_engine_flush (struct radeonfb_info *rinfo)
}
static inline void radeon_engine_idle(struct radeonfb_info *rinfo)
static inline void _radeon_engine_idle(struct radeonfb_info *rinfo)
{
int i;
/* ensure FIFO is empty before waiting for idle */
radeon_fifo_update_and_wait (rinfo, 64);
_radeon_fifo_wait (rinfo, 64);
for (i=0; i<2000000; i++) {
if (((INREG(RBBM_STATUS) & GUI_ACTIVE)) == 0) {
......@@ -593,6 +588,8 @@ static inline void radeon_engine_idle(struct radeonfb_info *rinfo)
}
#define radeon_engine_idle() _radeon_engine_idle(rinfo)
#define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries)
#define radeon_msleep(ms) _radeon_msleep(rinfo,ms)
......@@ -622,7 +619,6 @@ extern void radeonfb_imageblit(struct fb_info *p, const struct fb_image *image);
extern int radeonfb_sync(struct fb_info *info);
extern void radeonfb_engine_init (struct radeonfb_info *rinfo);
extern void radeonfb_engine_reset(struct radeonfb_info *rinfo);
extern void radeon_fixup_mem_offset(struct radeonfb_info *rinfo);
/* Other functions */
extern int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_switch);
......@@ -638,6 +634,4 @@ static inline void radeonfb_bl_init(struct radeonfb_info *rinfo) {}
static inline void radeonfb_bl_exit(struct radeonfb_info *rinfo) {}
#endif
extern int accel_cexp;
#endif /* __RADEONFB_H__ */
This diff is collapsed.
/*
* linux/drivers/video/imxfb.h
*
* Freescale i.MX Frame Buffer device driver
*
* Copyright (C) 2004 S.Hauer, Pengutronix
*
* Copyright (C) 1999 Eric A. Thomas
* Based on acornfb.c Copyright (C) Russell King.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* These are the bitfields for each
* display depth that we support.
*/
struct imxfb_rgb {
struct fb_bitfield red;
struct fb_bitfield green;
struct fb_bitfield blue;
struct fb_bitfield transp;
};
#define RGB_16 (0)
#define RGB_8 (1)
#define NR_RGB 2
struct imxfb_info {
struct device *dev;
struct imxfb_rgb *rgb[NR_RGB];
u_int max_bpp;
u_int max_xres;
u_int max_yres;
/*
* These are the addresses we mapped
* the framebuffer memory region to.
*/
dma_addr_t map_dma;
u_char * map_cpu;
u_int map_size;
u_char * screen_cpu;
dma_addr_t screen_dma;
u_int palette_size;
dma_addr_t dbar1;
dma_addr_t dbar2;
u_int pcr;
u_int pwmr;
u_int lscr1;
u_int dmacr;
u_int cmap_inverse:1,
cmap_static:1,
unused:30;
void (*lcd_power)(int);
void (*backlight_power)(int);
};
#define IMX_NAME "IMX"
/*
* Minimum X and Y resolutions
*/
#define MIN_XRES 64
#define MIN_YRES 64
......@@ -319,6 +319,7 @@ enum
{
NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
};
extern void __napi_schedule(struct napi_struct *n);
......@@ -1497,6 +1498,12 @@ static inline void netif_rx_complete(struct net_device *dev,
{
unsigned long flags;
/*
* don't let napi dequeue from the cpu poll list
* just in case its running on a different cpu
*/
if (unlikely(test_bit(NAPI_STATE_NPSVC, &napi->state)))
return;
local_irq_save(flags);
__netif_rx_complete(dev, napi);
local_irq_restore(flags);
......
......@@ -146,6 +146,8 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
#define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info))
static inline void init_call_single_data(void)
{
}
......
......@@ -525,9 +525,6 @@
#define CRTC_DISPLAY_DIS (1 << 10)
#define CRTC_CRT_ON (1 << 15)
/* DSTCACHE_MODE bits constants */
#define RB2D_DC_AUTOFLUSH_ENABLE (1 << 8)
#define RB2D_DC_DC_DISABLE_IGNORE_PE (1 << 17)
/* DSTCACHE_CTLSTAT bit constants */
#define RB2D_DC_FLUSH_2D (1 << 0)
......@@ -869,10 +866,15 @@
#define GMC_DST_16BPP_YVYU422 0x00000c00
#define GMC_DST_32BPP_AYUV444 0x00000e00
#define GMC_DST_16BPP_ARGB4444 0x00000f00
#define GMC_SRC_MONO 0x00000000
#define GMC_SRC_MONO_LBKGD 0x00001000
#define GMC_SRC_DSTCOLOR 0x00003000
#define GMC_BYTE_ORDER_MSB_TO_LSB 0x00000000
#define GMC_BYTE_ORDER_LSB_TO_MSB 0x00004000
#define GMC_DP_CONVERSION_TEMP_9300 0x00008000
#define GMC_DP_CONVERSION_TEMP_6500 0x00000000
#define GMC_DP_SRC_RECT 0x02000000
#define GMC_DP_SRC_HOST 0x03000000
#define GMC_DP_SRC_HOST_BYTEALIGN 0x04000000
#define GMC_3D_FCN_EN_CLR 0x00000000
#define GMC_3D_FCN_EN_SET 0x08000000
......@@ -883,9 +885,6 @@
#define GMC_WRITE_MASK_LEAVE 0x00000000
#define GMC_WRITE_MASK_SET 0x40000000
#define GMC_CLR_CMP_CNTL_DIS (1 << 28)
#define GMC_SRC_DATATYPE_MASK (3 << 12)
#define GMC_SRC_DATATYPE_MONO_FG_BG (0 << 12)
#define GMC_SRC_DATATYPE_MONO_FG_LA (1 << 12)
#define GMC_SRC_DATATYPE_COLOR (3 << 12)
#define ROP3_S 0x00cc0000
#define ROP3_SRCCOPY 0x00cc0000
......@@ -894,7 +893,6 @@
#define DP_SRC_SOURCE_MASK (7 << 24)
#define GMC_BRUSH_NONE (15 << 4)
#define DP_SRC_SOURCE_MEMORY (2 << 24)
#define DP_SRC_SOURCE_HOST_DATA (3 << 24)
#define GMC_BRUSH_SOLIDCOLOR 0x000000d0
/* DP_MIX bit constants */
......@@ -980,12 +978,6 @@
#define DISP_PWR_MAN_TV_ENABLE_RST (1 << 25)
#define DISP_PWR_MAN_AUTO_PWRUP_EN (1 << 26)
/* RBBM_GUICNTL constants */
#define RBBM_GUICNTL_HOST_DATA_SWAP_NONE (0 << 0)
#define RBBM_GUICNTL_HOST_DATA_SWAP_16BIT (1 << 0)
#define RBBM_GUICNTL_HOST_DATA_SWAP_32BIT (2 << 0)
#define RBBM_GUICNTL_HOST_DATA_SWAP_HDW (3 << 0)
/* masks */
#define CONFIG_MEMSIZE_MASK 0x1f000000
......
......@@ -702,7 +702,7 @@ static int rebind_subsystems(struct cgroupfs_root *root,
* any child cgroups exist. This is theoretically supportable
* but involves complex error handling, so it's being left until
* later */
if (!list_empty(&cgrp->children))
if (root->number_of_cgroups > 1)
return -EBUSY;
/* Process each subsystem */
......
......@@ -119,12 +119,12 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
/*
* scd->clock = clamp(scd->tick_gtod + delta,
* max(scd->tick_gtod, scd->clock),
* max(scd->clock, scd->tick_gtod + TICK_NSEC));
* scd->tick_gtod + TICK_NSEC);
*/
clock = scd->tick_gtod + delta;
min_clock = wrap_max(scd->tick_gtod, scd->clock);
max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
max_clock = scd->tick_gtod + TICK_NSEC;
clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock);
......
......@@ -535,7 +535,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
struct kmem_cache *c;
c = slob_alloc(sizeof(struct kmem_cache),
flags, ARCH_KMALLOC_MINALIGN, -1);
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
if (c) {
c->name = name;
......
......@@ -133,9 +133,11 @@ static int poll_one_napi(struct netpoll_info *npinfo,
npinfo->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
set_bit(NAPI_STATE_NPSVC, &napi->state);
work = napi->poll(napi, budget);
clear_bit(NAPI_STATE_NPSVC, &napi->state);
atomic_dec(&trapped);
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
......
......@@ -61,7 +61,7 @@ static struct
static struct xt_table nat_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
.lock = __RW_LOCK_UNLOCKED(__nat_table.lock),
.lock = __RW_LOCK_UNLOCKED(nat_table.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
......
......@@ -40,18 +40,14 @@
#include "tcp_vegas.h"
/* Default values of the Vegas variables, in fixed-point representation
* with V_PARAM_SHIFT bits to the right of the binary point.
*/
#define V_PARAM_SHIFT 1
static int alpha = 2<<V_PARAM_SHIFT;
static int beta = 4<<V_PARAM_SHIFT;
static int gamma = 1<<V_PARAM_SHIFT;
static int alpha = 2;
static int beta = 4;
static int gamma = 1;
module_param(alpha, int, 0644);
MODULE_PARM_DESC(alpha, "lower bound of packets in network (scale by 2)");
MODULE_PARM_DESC(alpha, "lower bound of packets in network");
module_param(beta, int, 0644);
MODULE_PARM_DESC(beta, "upper bound of packets in network (scale by 2)");
MODULE_PARM_DESC(beta, "upper bound of packets in network");
module_param(gamma, int, 0644);
MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
......@@ -172,49 +168,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
return;
}
/* The key players are v_beg_snd_una and v_beg_snd_nxt.
*
* These are so named because they represent the approximate values
* of snd_una and snd_nxt at the beginning of the current RTT. More
* precisely, they represent the amount of data sent during the RTT.
* At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
* we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
* bytes of data have been ACKed during the course of the RTT, giving
* an "actual" rate of:
*
* (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
*
* Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
* because delayed ACKs can cover more than one segment, so they
* don't line up nicely with the boundaries of RTTs.
*
* Another unfortunate fact of life is that delayed ACKs delay the
* advance of the left edge of our send window, so that the number
* of bytes we send in an RTT is often less than our cwnd will allow.
* So we keep track of our cwnd separately, in v_beg_snd_cwnd.
*/
if (after(ack, vegas->beg_snd_nxt)) {
/* Do the Vegas once-per-RTT cwnd adjustment. */
u32 old_wnd, old_snd_cwnd;
/* Here old_wnd is essentially the window of data that was
* sent during the previous RTT, and has all
* been acknowledged in the course of the RTT that ended
* with the ACK we just received. Likewise, old_snd_cwnd
* is the cwnd during the previous RTT.
*/
old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
tp->mss_cache;
old_snd_cwnd = vegas->beg_snd_cwnd;
/* Save the extent of the current window so we can use this
* at the end of the next RTT.
*/
vegas->beg_snd_una = vegas->beg_snd_nxt;
vegas->beg_snd_nxt = tp->snd_nxt;
vegas->beg_snd_cwnd = tp->snd_cwnd;
/* We do the Vegas calculations only if we got enough RTT
* samples that we can be reasonably sure that we got
......@@ -252,22 +212,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
*
* This is:
* (actual rate in segments) * baseRTT
* We keep it as a fixed point number with
* V_PARAM_SHIFT bits to the right of the binary point.
*/
target_cwnd = ((u64)old_wnd * vegas->baseRTT);
target_cwnd <<= V_PARAM_SHIFT;
do_div(target_cwnd, rtt);
target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
/* Calculate the difference between the window we had,
* and the window we would like to have. This quantity
* is the "Diff" from the Arizona Vegas papers.
*
* Again, this is a fixed point number with
* V_PARAM_SHIFT bits to the right of the binary
* point.
*/
diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
if (diff > gamma && tp->snd_ssthresh > 2 ) {
/* Going too fast. Time to slow down
......@@ -282,16 +234,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* truncation robs us of full link
* utilization.
*/
tp->snd_cwnd = min(tp->snd_cwnd,
((u32)target_cwnd >>
V_PARAM_SHIFT)+1);
tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
} else if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* Slow start. */
tcp_slow_start(tp);
} else {
/* Congestion avoidance. */
u32 next_snd_cwnd;
/* Figure out where we would like cwnd
* to be.
......@@ -300,26 +249,17 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
/* The old window was too fast, so
* we slow down.
*/
next_snd_cwnd = old_snd_cwnd - 1;
tp->snd_cwnd--;
} else if (diff < alpha) {
/* We don't have enough extra packets
* in the network, so speed up.
*/
next_snd_cwnd = old_snd_cwnd + 1;
tp->snd_cwnd++;
} else {
/* Sending just as fast as we
* should be.
*/
next_snd_cwnd = old_snd_cwnd;
}
/* Adjust cwnd upward or downward, toward the
* desired value.
*/
if (next_snd_cwnd > tp->snd_cwnd)
tp->snd_cwnd++;
else if (next_snd_cwnd < tp->snd_cwnd)
tp->snd_cwnd--;
}
if (tp->snd_cwnd < 2)
......
......@@ -912,7 +912,12 @@ static void ndisc_recv_na(struct sk_buff *skb)
is invalid, but ndisc specs say nothing
about it. It could be misconfiguration, or
an smart proxy agent tries to help us :-)
We should not print the error if NA has been
received from loopback - it is just our own
unsolicited advertisement.
*/
if (skb->pkt_type != PACKET_LOOPBACK)
ND_PRINTK1(KERN_WARNING
"ICMPv6 NA: someone advertises our address on %s!\n",
ifp->idev->dev->name);
......
......@@ -562,7 +562,6 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
const struct in_addr *mask,
struct netlbl_audit *audit_info)
{
int ret_val = 0;
struct netlbl_af4list *list_entry;
struct netlbl_unlhsh_addr4 *entry;
struct audit_buffer *audit_buf;
......@@ -577,7 +576,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
if (list_entry != NULL)
entry = netlbl_unlhsh_addr4_entry(list_entry);
else
ret_val = -ENOENT;
entry = NULL;
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
audit_info);
......@@ -588,19 +587,21 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
addr->s_addr, mask->s_addr);
if (dev != NULL)
dev_put(dev);
if (entry && security_secid_to_secctx(entry->secid,
&secctx,
&secctx_len) == 0) {
if (entry != NULL &&
security_secid_to_secctx(entry->secid,
&secctx, &secctx_len) == 0) {
audit_log_format(audit_buf, " sec_obj=%s", secctx);
security_release_secctx(secctx, secctx_len);
}
audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
audit_log_end(audit_buf);
}
if (ret_val == 0)
if (entry == NULL)
return -ENOENT;
call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4);
return ret_val;
return 0;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
......@@ -624,7 +625,6 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
const struct in6_addr *mask,
struct netlbl_audit *audit_info)
{
int ret_val = 0;
struct netlbl_af6list *list_entry;
struct netlbl_unlhsh_addr6 *entry;
struct audit_buffer *audit_buf;
......@@ -638,7 +638,7 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
if (list_entry != NULL)
entry = netlbl_unlhsh_addr6_entry(list_entry);
else
ret_val = -ENOENT;
entry = NULL;
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
audit_info);
......@@ -649,19 +649,21 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
addr, mask);
if (dev != NULL)
dev_put(dev);
if (entry && security_secid_to_secctx(entry->secid,
&secctx,
&secctx_len) == 0) {
if (entry != NULL &&
security_secid_to_secctx(entry->secid,
&secctx, &secctx_len) == 0) {
audit_log_format(audit_buf, " sec_obj=%s", secctx);
security_release_secctx(secctx, secctx_len);
}
audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
audit_log_end(audit_buf);
}
if (ret_val == 0)
if (entry == NULL)
return -ENOENT;
call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6);
return ret_val;
return 0;
}
#endif /* IPv6 */
......
......@@ -155,12 +155,13 @@ static void gprs_data_ready(struct sock *sk, int len)
static void gprs_write_space(struct sock *sk)
{
struct gprs_dev *dev = sk->sk_user_data;
struct net_device *net = dev->net;
unsigned credits = pep_writeable(sk);
spin_lock_bh(&dev->tx_lock);
dev->tx_max = credits;
if (credits > skb_queue_len(&dev->tx_queue))
netif_wake_queue(dev->net);
if (credits > skb_queue_len(&dev->tx_queue) && netif_running(net))
netif_wake_queue(net);
spin_unlock_bh(&dev->tx_lock);
}
......@@ -168,6 +169,23 @@ static void gprs_write_space(struct sock *sk)
* Network device callbacks
*/
static int gprs_open(struct net_device *dev)
{
struct gprs_dev *gp = netdev_priv(dev);
gprs_write_space(gp->sk);
return 0;
}
static int gprs_close(struct net_device *dev)
{
struct gprs_dev *gp = netdev_priv(dev);
netif_stop_queue(dev);
flush_work(&gp->tx_work);
return 0;
}
static int gprs_xmit(struct sk_buff *skb, struct net_device *net)
{
struct gprs_dev *dev = netdev_priv(net);
......@@ -254,6 +272,8 @@ static void gprs_setup(struct net_device *net)
net->tx_queue_len = 10;
net->destructor = free_netdev;
net->open = gprs_open;
net->stop = gprs_close;
net->hard_start_xmit = gprs_xmit; /* mandatory */
net->change_mtu = gprs_set_mtu;
net->get_stats = gprs_get_stats;
......@@ -318,7 +338,6 @@ int gprs_attach(struct sock *sk)
dev->sk = sk;
printk(KERN_DEBUG"%s: attached\n", net->name);
gprs_write_space(sk); /* kick off TX */
return net->ifindex;
out_rel:
......@@ -341,7 +360,5 @@ void gprs_detach(struct sock *sk)
printk(KERN_DEBUG"%s: detached\n", net->name);
unregister_netdev(net);
flush_scheduled_work();
sock_put(sk);
skb_queue_purge(&dev->tx_queue);
}
......@@ -46,9 +46,6 @@
layering other disciplines. It does not need to do bandwidth
control either since that can be handled by using token
bucket or other rate control.
The simulator is limited by the Linux timer resolution
and will create packet bursts on the HZ boundary (1ms).
*/
struct netem_sched_data {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment