Commit 647df518 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] SN specific version of dma_get_required_mask()
  [IA64] generic_defconfig: Enable SATA_VITESSE
  [IA64] dump stack on kernel unaligned warnings
  [IA64] Turn on CONFIG_HAVE_UNSTABLE_CLOCK
  [IA64] Update to use account_{steal,idle}_ticks
parents cbbc4987 175add19
...@@ -170,16 +170,15 @@ Returns: 0 if successful and a negative error if not. ...@@ -170,16 +170,15 @@ Returns: 0 if successful and a negative error if not.
u64 u64
dma_get_required_mask(struct device *dev) dma_get_required_mask(struct device *dev)
After setting the mask with dma_set_mask(), this API returns the This API returns the mask that the platform requires to
actual mask (within that already set) that the platform actually operate efficiently. Usually this means the returned mask
requires to operate efficiently. Usually this means the returned mask
is the minimum required to cover all of memory. Examining the is the minimum required to cover all of memory. Examining the
required mask gives drivers with variable descriptor sizes the required mask gives drivers with variable descriptor sizes the
opportunity to use smaller descriptors as necessary. opportunity to use smaller descriptors as necessary.
Requesting the required mask does not alter the current mask. If you Requesting the required mask does not alter the current mask. If you
wish to take advantage of it, you should issue another dma_set_mask() wish to take advantage of it, you should issue a dma_set_mask()
call to lower the mask again. call to set the mask to the value returned.
Part Id - Streaming DMA mappings Part Id - Streaming DMA mappings
......
...@@ -17,6 +17,7 @@ config IA64 ...@@ -17,6 +17,7 @@ config IA64
select ACPI if (!IA64_HP_SIM) select ACPI if (!IA64_HP_SIM)
select PM if (!IA64_HP_SIM) select PM if (!IA64_HP_SIM)
select ARCH_SUPPORTS_MSI select ARCH_SUPPORTS_MSI
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE select HAVE_IDE
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_KPROBES select HAVE_KPROBES
......
...@@ -578,7 +578,7 @@ CONFIG_ATA_PIIX=y ...@@ -578,7 +578,7 @@ CONFIG_ATA_PIIX=y
# CONFIG_SATA_SIS is not set # CONFIG_SATA_SIS is not set
# CONFIG_SATA_ULI is not set # CONFIG_SATA_ULI is not set
# CONFIG_SATA_VIA is not set # CONFIG_SATA_VIA is not set
# CONFIG_SATA_VITESSE is not set CONFIG_SATA_VITESSE=y
# CONFIG_SATA_INIC162X is not set # CONFIG_SATA_INIC162X is not set
# CONFIG_PATA_ACPI is not set # CONFIG_PATA_ACPI is not set
# CONFIG_PATA_ALI is not set # CONFIG_PATA_ALI is not set
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
struct dma_mapping_ops { struct dma_mapping_ops {
int (*mapping_error)(struct device *dev, int (*mapping_error)(struct device *dev,
dma_addr_t dma_addr); dma_addr_t dma_addr);
......
...@@ -62,6 +62,7 @@ typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t ...@@ -62,6 +62,7 @@ typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t
typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
typedef u64 ia64_mv_dma_get_required_mask (struct device *);
/* /*
* WARNING: The legacy I/O space is _architected_. Platforms are * WARNING: The legacy I/O space is _architected_. Platforms are
...@@ -159,6 +160,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); ...@@ -159,6 +160,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device # define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
# define platform_dma_mapping_error ia64_mv.dma_mapping_error # define platform_dma_mapping_error ia64_mv.dma_mapping_error
# define platform_dma_supported ia64_mv.dma_supported # define platform_dma_supported ia64_mv.dma_supported
# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
# define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_irq_to_vector ia64_mv.irq_to_vector
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
...@@ -213,6 +215,7 @@ struct ia64_machine_vector { ...@@ -213,6 +215,7 @@ struct ia64_machine_vector {
ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
ia64_mv_dma_mapping_error *dma_mapping_error; ia64_mv_dma_mapping_error *dma_mapping_error;
ia64_mv_dma_supported *dma_supported; ia64_mv_dma_supported *dma_supported;
ia64_mv_dma_get_required_mask *dma_get_required_mask;
ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_irq_to_vector *irq_to_vector;
ia64_mv_local_vector_to_irq *local_vector_to_irq; ia64_mv_local_vector_to_irq *local_vector_to_irq;
ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
...@@ -263,6 +266,7 @@ struct ia64_machine_vector { ...@@ -263,6 +266,7 @@ struct ia64_machine_vector {
platform_dma_sync_sg_for_device, \ platform_dma_sync_sg_for_device, \
platform_dma_mapping_error, \ platform_dma_mapping_error, \
platform_dma_supported, \ platform_dma_supported, \
platform_dma_get_required_mask, \
platform_irq_to_vector, \ platform_irq_to_vector, \
platform_local_vector_to_irq, \ platform_local_vector_to_irq, \
platform_pci_get_legacy_mem, \ platform_pci_get_legacy_mem, \
...@@ -366,6 +370,9 @@ extern void machvec_init_from_cmdline(const char *cmdline); ...@@ -366,6 +370,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
#ifndef platform_dma_supported #ifndef platform_dma_supported
# define platform_dma_supported swiotlb_dma_supported # define platform_dma_supported swiotlb_dma_supported
#endif #endif
#ifndef platform_dma_get_required_mask
# define platform_dma_get_required_mask ia64_dma_get_required_mask
#endif
#ifndef platform_irq_to_vector #ifndef platform_irq_to_vector
# define platform_irq_to_vector __ia64_irq_to_vector # define platform_irq_to_vector __ia64_irq_to_vector
#endif #endif
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
extern ia64_mv_send_ipi_t ia64_send_ipi; extern ia64_mv_send_ipi_t ia64_send_ipi;
extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge; extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask;
extern ia64_mv_irq_to_vector __ia64_irq_to_vector; extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq; extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem; extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
......
...@@ -67,6 +67,7 @@ extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; ...@@ -67,6 +67,7 @@ extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
extern ia64_mv_dma_mapping_error sn_dma_mapping_error; extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
extern ia64_mv_dma_supported sn_dma_supported; extern ia64_mv_dma_supported sn_dma_supported;
extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
extern ia64_mv_migrate_t sn_migrate; extern ia64_mv_migrate_t sn_migrate;
extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
...@@ -123,6 +124,7 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; ...@@ -123,6 +124,7 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device #define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
#define platform_dma_mapping_error sn_dma_mapping_error #define platform_dma_mapping_error sn_dma_mapping_error
#define platform_dma_supported sn_dma_supported #define platform_dma_supported sn_dma_supported
#define platform_dma_get_required_mask sn_dma_get_required_mask
#define platform_migrate sn_migrate #define platform_migrate sn_migrate
#define platform_kernel_launch_event sn_kernel_launch_event #define platform_kernel_launch_event sn_kernel_launch_event
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
......
...@@ -59,6 +59,7 @@ dump (const char *str, void *vp, size_t len) ...@@ -59,6 +59,7 @@ dump (const char *str, void *vp, size_t len)
* (i.e. don't allow attacker to fill up logs with unaligned accesses). * (i.e. don't allow attacker to fill up logs with unaligned accesses).
*/ */
int no_unaligned_warning; int no_unaligned_warning;
int unaligned_dump_stack;
static int noprint_warning; static int noprint_warning;
/* /*
...@@ -1371,9 +1372,12 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1371,9 +1372,12 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
} }
} }
} else { } else {
if (within_logging_rate_limit()) if (within_logging_rate_limit()) {
printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n", printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
ifa, regs->cr_iip + ipsr->ri); ifa, regs->cr_iip + ipsr->ri);
if (unaligned_dump_stack)
dump_stack();
}
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
} }
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -748,6 +749,32 @@ static void __init set_pci_cacheline_size(void) ...@@ -748,6 +749,32 @@ static void __init set_pci_cacheline_size(void)
pci_cache_line_size = (1 << cci.pcci_line_size) / 4; pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
} }
u64 ia64_dma_get_required_mask(struct device *dev)
{
u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
u64 mask;
if (!high_totalram) {
/* convert to mask just covering totalram */
low_totalram = (1 << (fls(low_totalram) - 1));
low_totalram += low_totalram - 1;
mask = low_totalram;
} else {
high_totalram = (1 << (fls(high_totalram) - 1));
high_totalram += high_totalram - 1;
mask = (((u64)high_totalram) << 32) + 0xffffffff;
}
return mask;
}
EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
u64 dma_get_required_mask(struct device *dev)
{
return platform_dma_get_required_mask(dev);
}
EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init pcibios_init(void) static int __init pcibios_init(void)
{ {
set_pci_cacheline_size(); set_pci_cacheline_size();
......
...@@ -356,6 +356,12 @@ int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -356,6 +356,12 @@ int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
} }
EXPORT_SYMBOL(sn_dma_mapping_error); EXPORT_SYMBOL(sn_dma_mapping_error);
u64 sn_dma_get_required_mask(struct device *dev)
{
return DMA_64BIT_MASK;
}
EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
char *sn_pci_get_legacy_mem(struct pci_bus *bus) char *sn_pci_get_legacy_mem(struct pci_bus *bus)
{ {
if (!SN_PCIBUS_BUSSOFT(bus)) if (!SN_PCIBUS_BUSSOFT(bus))
......
...@@ -129,8 +129,8 @@ consider_steal_time(unsigned long new_itm) ...@@ -129,8 +129,8 @@ consider_steal_time(unsigned long new_itm)
blocked = stolentick; blocked = stolentick;
if (stolen > 0 || blocked > 0) { if (stolen > 0 || blocked > 0) {
account_steal_time(NULL, jiffies_to_cputime(stolen)); account_steal_ticks(stolen);
account_steal_time(idle_task(cpu), jiffies_to_cputime(blocked)); account_idle_ticks(blocked);
run_local_timers(); run_local_timers();
if (rcu_pending(cpu)) if (rcu_pending(cpu))
......
...@@ -144,6 +144,7 @@ extern int acct_parm[]; ...@@ -144,6 +144,7 @@ extern int acct_parm[];
#ifdef CONFIG_IA64 #ifdef CONFIG_IA64
extern int no_unaligned_warning; extern int no_unaligned_warning;
extern int unaligned_dump_stack;
#endif #endif
#ifdef CONFIG_RT_MUTEXES #ifdef CONFIG_RT_MUTEXES
...@@ -781,6 +782,14 @@ static struct ctl_table kern_table[] = { ...@@ -781,6 +782,14 @@ static struct ctl_table kern_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec, .proc_handler = &proc_dointvec,
}, },
{
.ctl_name = CTL_UNNUMBERED,
.procname = "unaligned-dump-stack",
.data = &unaligned_dump_stack,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
#endif #endif
#ifdef CONFIG_DETECT_SOFTLOCKUP #ifdef CONFIG_DETECT_SOFTLOCKUP
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment