Commit 286e050b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Martin Schwidefsky:
 "Recent bug fixes, one of them touches a common code file.

  It adds two #ifndef/#endif pairs to asm-generic/io.h to be able to
  override xlate_dev_kmem_ptr and xlate_dev_mem_ptr."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/pgtable: Fix gmap notifier address
  s390/dasd: fix handling of gone paths
  s390/pgtable: Fix check for pgste/storage key handling
  arch: s390: appldata: using strncpy() and strnlen() instead of sprintf()
  s390/smp: lost IPIs on cpu hotplug
  kernel: Fix s390 absolute memory access for /dev/mem
  s390/dma: do not call debug_dma after free
parents 7d80fea4 e86cbd87
...@@ -212,7 +212,9 @@ appldata_timer_handler(ctl_table *ctl, int write, ...@@ -212,7 +212,9 @@ appldata_timer_handler(ctl_table *ctl, int write,
return 0; return 0;
} }
if (!write) { if (!write) {
len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n"); strncpy(buf, appldata_timer_active ? "1\n" : "0\n",
ARRAY_SIZE(buf));
len = strnlen(buf, ARRAY_SIZE(buf));
if (len > *lenp) if (len > *lenp)
len = *lenp; len = *lenp;
if (copy_to_user(buffer, buf, len)) if (copy_to_user(buffer, buf, len))
...@@ -317,7 +319,8 @@ appldata_generic_handler(ctl_table *ctl, int write, ...@@ -317,7 +319,8 @@ appldata_generic_handler(ctl_table *ctl, int write,
return 0; return 0;
} }
if (!write) { if (!write) {
len = sprintf(buf, ops->active ? "1\n" : "0\n"); strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf));
len = strnlen(buf, ARRAY_SIZE(buf));
if (len > *lenp) if (len > *lenp)
len = *lenp; len = *lenp;
if (copy_to_user(buffer, buf, len)) { if (copy_to_user(buffer, buf, len)) {
......
...@@ -71,8 +71,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size, ...@@ -71,8 +71,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); struct dma_map_ops *dma_ops = get_dma_ops(dev);
dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
} }
#endif /* _ASM_S390_DMA_MAPPING_H */ #endif /* _ASM_S390_DMA_MAPPING_H */
...@@ -36,6 +36,7 @@ static inline void * phys_to_virt(unsigned long address) ...@@ -36,6 +36,7 @@ static inline void * phys_to_virt(unsigned long address)
} }
void *xlate_dev_mem_ptr(unsigned long phys); void *xlate_dev_mem_ptr(unsigned long phys);
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
void unxlate_dev_mem_ptr(unsigned long phys, void *addr); void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
/* /*
......
...@@ -646,7 +646,7 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) ...@@ -646,7 +646,7 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
unsigned long address, bits; unsigned long address, bits;
unsigned char skey; unsigned char skey;
if (!pte_present(*ptep)) if (pte_val(*ptep) & _PAGE_INVALID)
return pgste; return pgste;
address = pte_val(*ptep) & PAGE_MASK; address = pte_val(*ptep) & PAGE_MASK;
skey = page_get_storage_key(address); skey = page_get_storage_key(address);
...@@ -680,7 +680,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) ...@@ -680,7 +680,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
int young; int young;
if (!pte_present(*ptep)) if (pte_val(*ptep) & _PAGE_INVALID)
return pgste; return pgste;
/* Get referenced bit from storage key */ /* Get referenced bit from storage key */
young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
...@@ -706,7 +706,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) ...@@ -706,7 +706,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
unsigned long address; unsigned long address;
unsigned long okey, nkey; unsigned long okey, nkey;
if (!pte_present(entry)) if (pte_val(entry) & _PAGE_INVALID)
return; return;
address = pte_val(entry) & PAGE_MASK; address = pte_val(entry) & PAGE_MASK;
okey = nkey = page_get_storage_key(address); okey = nkey = page_get_storage_key(address);
...@@ -1098,6 +1098,9 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, ...@@ -1098,6 +1098,9 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
pte = *ptep; pte = *ptep;
if (!mm_exclusive(mm)) if (!mm_exclusive(mm))
__ptep_ipte(address, ptep); __ptep_ipte(address, ptep);
if (mm_has_pgste(mm))
pgste = pgste_update_all(&pte, pgste);
return pte; return pte;
} }
...@@ -1105,9 +1108,13 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, ...@@ -1105,9 +1108,13 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long address, unsigned long address,
pte_t *ptep, pte_t pte) pte_t *ptep, pte_t pte)
{ {
pgste_t pgste;
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
pgste = *(pgste_t *)(ptep + PTRS_PER_PTE);
pgste_set_key(ptep, pgste, pte);
pgste_set_pte(ptep, pte); pgste_set_pte(ptep, pte);
pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); pgste_set_unlock(ptep, pgste);
} else } else
*ptep = pte; *ptep = pte;
} }
......
...@@ -428,34 +428,27 @@ void smp_stop_cpu(void) ...@@ -428,34 +428,27 @@ void smp_stop_cpu(void)
* This is the main routine where commands issued by other * This is the main routine where commands issued by other
* cpus are handled. * cpus are handled.
*/ */
static void do_ext_call_interrupt(struct ext_code ext_code, static void smp_handle_ext_call(void)
unsigned int param32, unsigned long param64)
{ {
unsigned long bits; unsigned long bits;
int cpu;
cpu = smp_processor_id();
if (ext_code.code == 0x1202)
inc_irq_stat(IRQEXT_EXC);
else
inc_irq_stat(IRQEXT_EMS);
/*
* handle bit signal external calls
*/
bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
/* handle bit signal external calls */
bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
if (test_bit(ec_stop_cpu, &bits)) if (test_bit(ec_stop_cpu, &bits))
smp_stop_cpu(); smp_stop_cpu();
if (test_bit(ec_schedule, &bits)) if (test_bit(ec_schedule, &bits))
scheduler_ipi(); scheduler_ipi();
if (test_bit(ec_call_function, &bits)) if (test_bit(ec_call_function, &bits))
generic_smp_call_function_interrupt(); generic_smp_call_function_interrupt();
if (test_bit(ec_call_function_single, &bits)) if (test_bit(ec_call_function_single, &bits))
generic_smp_call_function_single_interrupt(); generic_smp_call_function_single_interrupt();
}
static void do_ext_call_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
smp_handle_ext_call();
} }
void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
...@@ -760,6 +753,8 @@ int __cpu_disable(void) ...@@ -760,6 +753,8 @@ int __cpu_disable(void)
{ {
unsigned long cregs[16]; unsigned long cregs[16];
/* Handle possible pending IPIs */
smp_handle_ext_call();
set_cpu_online(smp_processor_id(), false); set_cpu_online(smp_processor_id(), false);
/* Disable pseudo page faults on this cpu. */ /* Disable pseudo page faults on this cpu. */
pfault_fini(); pfault_fini();
......
...@@ -492,7 +492,7 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment, ...@@ -492,7 +492,7 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
mp = (struct gmap_pgtable *) page->index; mp = (struct gmap_pgtable *) page->index;
rmap->gmap = gmap; rmap->gmap = gmap;
rmap->entry = segment_ptr; rmap->entry = segment_ptr;
rmap->vmaddr = address; rmap->vmaddr = address & PMD_MASK;
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
if (*segment_ptr == segment) { if (*segment_ptr == segment) {
list_add(&rmap->list, &mp->mapper); list_add(&rmap->list, &mp->mapper);
......
...@@ -3440,8 +3440,16 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) ...@@ -3440,8 +3440,16 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
device->path_data.opm &= ~eventlpm; device->path_data.opm &= ~eventlpm;
device->path_data.ppm &= ~eventlpm; device->path_data.ppm &= ~eventlpm;
device->path_data.npm &= ~eventlpm; device->path_data.npm &= ~eventlpm;
if (oldopm && !device->path_data.opm) if (oldopm && !device->path_data.opm) {
dasd_generic_last_path_gone(device); dev_warn(&device->cdev->dev,
"No verified channel paths remain "
"for the device\n");
DBF_DEV_EVENT(DBF_WARNING, device,
"%s", "last verified path gone");
dasd_eer_write(device, NULL, DASD_EER_NOPATH);
dasd_device_set_stop_bits(device,
DASD_STOPPED_DC_WAIT);
}
} }
if (path_event[chp] & PE_PATH_AVAILABLE) { if (path_event[chp] & PE_PATH_AVAILABLE) {
device->path_data.opm &= ~eventlpm; device->path_data.opm &= ~eventlpm;
......
...@@ -343,8 +343,12 @@ extern void ioport_unmap(void __iomem *p); ...@@ -343,8 +343,12 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_GENERIC_IOMAP */ #endif /* CONFIG_GENERIC_IOMAP */
#endif /* CONFIG_HAS_IOPORT */ #endif /* CONFIG_HAS_IOPORT */
#ifndef xlate_dev_kmem_ptr
#define xlate_dev_kmem_ptr(p) p #define xlate_dev_kmem_ptr(p) p
#endif
#ifndef xlate_dev_mem_ptr
#define xlate_dev_mem_ptr(p) __va(p) #define xlate_dev_mem_ptr(p) __va(p)
#endif
#ifdef CONFIG_VIRT_TO_BUS #ifdef CONFIG_VIRT_TO_BUS
#ifndef virt_to_bus #ifndef virt_to_bus
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment