Commit ec0e6bd3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Martin Schwidefsky:
 "One performance optimization for page_clear and a couple of bug fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/mm: fix incorrect ASCE after crst_table_downgrade
  s390/ftrace: fix crashes when switching tracers / add notrace to cpu_relax()
  s390/pci: unify pci_iomap symbol exports
  s390/pci: fix [un]map_resources sequence
  s390: let the compiler do page clearing
  s390/pci: fix possible information leak in mmio syscall
  s390/dcss: array index 'i' is used before limits check.
  s390/scm_block: fix off by one during cluster reservation
  s390/jump label: improve and fix sanity check
  s390/jump label: add missing jump_label_apply_nops() call
parents e7901af1 691d5264
...@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
if (prev == next) if (prev == next)
return; return;
if (MACHINE_HAS_TLB_LC) if (MACHINE_HAS_TLB_LC)
...@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
atomic_dec(&prev->context.attach_count); atomic_dec(&prev->context.attach_count);
if (MACHINE_HAS_TLB_LC) if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
} }
#define finish_arch_post_lock_switch finish_arch_post_lock_switch #define finish_arch_post_lock_switch finish_arch_post_lock_switch
......
...@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end ...@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
#endif #endif
} }
static inline void clear_page(void *page) #define clear_page(page) memset((page), 0, PAGE_SIZE)
{
register unsigned long reg1 asm ("1") = 0;
register void *reg2 asm ("2") = page;
register unsigned long reg3 asm ("3") = 4096;
asm volatile(
" mvcl 2,0"
: "+d" (reg2), "+d" (reg3) : "d" (reg1)
: "memory", "cc");
}
/* /*
* copy_page uses the mvcl instruction with 0xb0 padding byte in order to * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
......
...@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) ...@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
insn->offset = (entry->target - entry->code) >> 1; insn->offset = (entry->target - entry->code) >> 1;
} }
static void jump_label_bug(struct jump_entry *entry, struct insn *insn) static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
struct insn *new)
{ {
unsigned char *ipc = (unsigned char *)entry->code; unsigned char *ipc = (unsigned char *)entry->code;
unsigned char *ipe = (unsigned char *)insn; unsigned char *ipe = (unsigned char *)expected;
unsigned char *ipn = (unsigned char *)new;
pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
pr_emerg("New: %02x %02x %02x %02x %02x %02x\n",
ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
panic("Corrupted kernel text"); panic("Corrupted kernel text");
} }
...@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry, ...@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry,
} }
if (init) { if (init) {
if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
jump_label_bug(entry, &old); jump_label_bug(entry, &orignop, &new);
} else { } else {
if (memcmp((void *)entry->code, &old, sizeof(old))) if (memcmp((void *)entry->code, &old, sizeof(old)))
jump_label_bug(entry, &old); jump_label_bug(entry, &old, &new);
} }
probe_kernel_write((void *)entry->code, &new, sizeof(new)); probe_kernel_write((void *)entry->code, &new, sizeof(new));
} }
......
...@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr, ...@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, const Elf_Shdr *sechdrs,
struct module *me) struct module *me)
{ {
jump_label_apply_nops(me);
vfree(me->arch.syminfo); vfree(me->arch.syminfo);
me->arch.syminfo = NULL; me->arch.syminfo = NULL;
return 0; return 0;
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
static DEFINE_PER_CPU(struct cpuid, cpu_id); static DEFINE_PER_CPU(struct cpuid, cpu_id);
void cpu_relax(void) void notrace cpu_relax(void)
{ {
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
asm volatile("diag 0,0,0x44"); asm volatile("diag 0,0,0x44");
......
...@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, ...@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
return (void __iomem *) addr + offset; return (void __iomem *) addr + offset;
} }
EXPORT_SYMBOL_GPL(pci_iomap_range); EXPORT_SYMBOL(pci_iomap_range);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{ {
...@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) ...@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
} }
spin_unlock(&zpci_iomap_lock); spin_unlock(&zpci_iomap_lock);
} }
EXPORT_SYMBOL_GPL(pci_iounmap); EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val) int size, u32 *val)
...@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) ...@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
} }
static void zpci_map_resources(struct zpci_dev *zdev) static void zpci_map_resources(struct pci_dev *pdev)
{ {
struct pci_dev *pdev = zdev->pdev;
resource_size_t len; resource_size_t len;
int i; int i;
...@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev) ...@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
} }
} }
static void zpci_unmap_resources(struct zpci_dev *zdev) static void zpci_unmap_resources(struct pci_dev *pdev)
{ {
struct pci_dev *pdev = zdev->pdev;
resource_size_t len; resource_size_t len;
int i; int i;
...@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev) ...@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev)
zdev->pdev = pdev; zdev->pdev = pdev;
pdev->dev.groups = zpci_attr_groups; pdev->dev.groups = zpci_attr_groups;
zpci_map_resources(zdev); zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) { for (i = 0; i < PCI_BAR_COUNT; i++) {
res = &pdev->resource[i]; res = &pdev->resource[i];
...@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev) ...@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev)
return 0; return 0;
} }
void pcibios_release_device(struct pci_dev *pdev)
{
zpci_unmap_resources(pdev);
}
int pcibios_enable_device(struct pci_dev *pdev, int mask) int pcibios_enable_device(struct pci_dev *pdev, int mask)
{ {
struct zpci_dev *zdev = get_zdev(pdev); struct zpci_dev *zdev = get_zdev(pdev);
...@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) ...@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
zdev->pdev = pdev; zdev->pdev = pdev;
zpci_debug_init_device(zdev); zpci_debug_init_device(zdev);
zpci_fmb_enable_device(zdev); zpci_fmb_enable_device(zdev);
zpci_map_resources(zdev);
return pci_enable_resources(pdev, mask); return pci_enable_resources(pdev, mask);
} }
...@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev) ...@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev)
{ {
struct zpci_dev *zdev = get_zdev(pdev); struct zpci_dev *zdev = get_zdev(pdev);
zpci_unmap_resources(zdev);
zpci_fmb_disable_device(zdev); zpci_fmb_disable_device(zdev);
zpci_debug_exit_device(zdev); zpci_debug_exit_device(zdev);
zdev->pdev = NULL; zdev->pdev = NULL;
...@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev) ...@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev)
#ifdef CONFIG_HIBERNATE_CALLBACKS #ifdef CONFIG_HIBERNATE_CALLBACKS
static int zpci_restore(struct device *dev) static int zpci_restore(struct device *dev)
{ {
struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); struct pci_dev *pdev = to_pci_dev(dev);
struct zpci_dev *zdev = get_zdev(pdev);
int ret = 0; int ret = 0;
if (zdev->state != ZPCI_FN_STATE_ONLINE) if (zdev->state != ZPCI_FN_STATE_ONLINE)
...@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev) ...@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev)
if (ret) if (ret)
goto out; goto out;
zpci_map_resources(zdev); zpci_map_resources(pdev);
zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
zdev->start_dma + zdev->iommu_size - 1, zdev->start_dma + zdev->iommu_size - 1,
(u64) zdev->dma_table); (u64) zdev->dma_table);
...@@ -709,12 +711,14 @@ static int zpci_restore(struct device *dev) ...@@ -709,12 +711,14 @@ static int zpci_restore(struct device *dev)
static int zpci_freeze(struct device *dev) static int zpci_freeze(struct device *dev)
{ {
struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); struct pci_dev *pdev = to_pci_dev(dev);
struct zpci_dev *zdev = get_zdev(pdev);
if (zdev->state != ZPCI_FN_STATE_ONLINE) if (zdev->state != ZPCI_FN_STATE_ONLINE)
return 0; return 0;
zpci_unregister_ioat(zdev, 0); zpci_unregister_ioat(zdev, 0);
zpci_unmap_resources(pdev);
return clp_disable_fh(zdev); return clp_disable_fh(zdev);
} }
......
...@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, ...@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
if (copy_from_user(buf, user_buffer, length)) if (copy_from_user(buf, user_buffer, length))
goto out; goto out;
memcpy_toio(io_addr, buf, length); ret = zpci_memcpy_toio(io_addr, buf, length);
ret = 0;
out: out:
if (buf != local_buf) if (buf != local_buf)
kfree(buf); kfree(buf);
...@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, ...@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
goto out; goto out;
io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
ret = -EFAULT; ret = -EFAULT;
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
goto out; goto out;
}
memcpy_fromio(buf, io_addr, length); ret = zpci_memcpy_fromio(buf, io_addr, length);
if (ret)
if (copy_to_user(user_buffer, buf, length))
goto out; goto out;
if (copy_to_user(user_buffer, buf, length))
ret = -EFAULT;
ret = 0;
out: out:
if (buf != local_buf) if (buf != local_buf)
kfree(buf); kfree(buf);
......
...@@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char ...@@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
* parse input * parse input
*/ */
num_of_segments = 0; num_of_segments = 0;
for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
for (j = i; (buf[j] != ':') && for (j = i; (buf[j] != ':') &&
(buf[j] != '\0') && (buf[j] != '\0') &&
(buf[j] != '\n') && (buf[j] != '\n') &&
......
...@@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq) ...@@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq)
add = 0; add = 0;
continue; continue;
} }
for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { for (pos = 0; pos < iter->aob->request.msb_count; pos++) {
if (clusters_intersect(req, iter->request[pos]) && if (clusters_intersect(req, iter->request[pos]) &&
(rq_data_dir(req) == WRITE || (rq_data_dir(req) == WRITE ||
rq_data_dir(iter->request[pos]) == WRITE)) { rq_data_dir(iter->request[pos]) == WRITE)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment