Commit ebbc9570 authored by Alexander Gordeev's avatar Alexander Gordeev

s390/crash: support multi-segment iterators

Make it possible to handle not only single-, but also multi-
segment iterators in copy_oldmem_iter() callback. Change the
semantics of called functions to match the iterator model -
instead of an error code the exact number of bytes copied is
returned.

The swap page used to copy data to user space is adopted for
kernel space too. That does not bring any performance impact.
Suggested-by: default avatarMatthew Wilcox <willy@infradead.org>
Fixes: cc02e6e2 ("s390/crash: add missing iterator advance in copy_oldmem_page()")
Acked-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Tested-by: default avatarAlexander Egorenkov <egorenar@linux.ibm.com>
Link: https://lore.kernel.org/r/5af6da3a0bffe48a90b0b7139ecf6a818b2d18e8.1658206891.git.agordeev@linux.ibm.comSigned-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent 6d2e5a4a
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#ifndef _ASM_S390_OS_INFO_H #ifndef _ASM_S390_OS_INFO_H
#define _ASM_S390_OS_INFO_H #define _ASM_S390_OS_INFO_H
#include <linux/uio.h>
#define OS_INFO_VERSION_MAJOR 1 #define OS_INFO_VERSION_MAJOR 1
#define OS_INFO_VERSION_MINOR 1 #define OS_INFO_VERSION_MINOR 1
#define OS_INFO_MAGIC 0x4f53494e464f535aULL /* OSINFOSZ */ #define OS_INFO_MAGIC 0x4f53494e464f535aULL /* OSINFOSZ */
...@@ -39,7 +41,20 @@ u32 os_info_csum(struct os_info *os_info); ...@@ -39,7 +41,20 @@ u32 os_info_csum(struct os_info *os_info);
#ifdef CONFIG_CRASH_DUMP #ifdef CONFIG_CRASH_DUMP
void *os_info_old_entry(int nr, unsigned long *size); void *os_info_old_entry(int nr, unsigned long *size);
int copy_oldmem_kernel(void *dst, unsigned long src, size_t count); size_t copy_oldmem_iter(struct iov_iter *iter, unsigned long src, size_t count);
static inline int copy_oldmem_kernel(void *dst, unsigned long src, size_t count)
{
struct iov_iter iter;
struct kvec kvec;
kvec.iov_base = dst;
kvec.iov_len = count;
iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
if (copy_oldmem_iter(&iter, src, count) < count)
return -EFAULT;
return 0;
}
#else #else
static inline void *os_info_old_entry(int nr, unsigned long *size) static inline void *os_info_old_entry(int nr, unsigned long *size)
{ {
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#define EXT_SCCB_READ_CPU (3 * PAGE_SIZE) #define EXT_SCCB_READ_CPU (3 * PAGE_SIZE)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/uio.h>
#include <asm/chpid.h> #include <asm/chpid.h>
#include <asm/cpu.h> #include <asm/cpu.h>
...@@ -142,8 +143,7 @@ int sclp_pci_deconfigure(u32 fid); ...@@ -142,8 +143,7 @@ int sclp_pci_deconfigure(u32 fid);
int sclp_ap_configure(u32 apid); int sclp_ap_configure(u32 apid);
int sclp_ap_deconfigure(u32 apid); int sclp_ap_deconfigure(u32 apid);
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid); int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid);
int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count); size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count);
int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
void sclp_ocf_cpc_name_copy(char *dst); void sclp_ocf_cpc_name_copy(char *dst);
static inline int sclp_get_core_info(struct sclp_core_info *info, int early) static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
......
...@@ -116,102 +116,35 @@ void __init save_area_add_vxrs(struct save_area *sa, __vector128 *vxrs) ...@@ -116,102 +116,35 @@ void __init save_area_add_vxrs(struct save_area *sa, __vector128 *vxrs)
memcpy(sa->vxrs_high, vxrs + 16, 16 * sizeof(__vector128)); memcpy(sa->vxrs_high, vxrs + 16, 16 * sizeof(__vector128));
} }
/* static size_t copy_to_iter_real(struct iov_iter *iter, unsigned long src, size_t count)
* Return physical address for virtual address
*/
static inline void *load_real_addr(void *addr)
{
unsigned long real_addr;
asm volatile(
" lra %0,0(%1)\n"
" jz 0f\n"
" la %0,0\n"
"0:"
: "=a" (real_addr) : "a" (addr) : "cc");
return (void *)real_addr;
}
/*
* Copy memory of the old, dumped system to a kernel space virtual address
*/
int copy_oldmem_kernel(void *dst, unsigned long src, size_t count)
{
unsigned long len;
void *ra;
int rc;
while (count) {
if (!oldmem_data.start && src < sclp.hsa_size) {
/* Copy from zfcp/nvme dump HSA area */
len = min(count, sclp.hsa_size - src);
rc = memcpy_hsa_kernel(dst, src, len);
if (rc)
return rc;
} else {
/* Check for swapped kdump oldmem areas */
if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
src -= oldmem_data.start;
len = min(count, oldmem_data.size - src);
} else if (oldmem_data.start && src < oldmem_data.size) {
len = min(count, oldmem_data.size - src);
src += oldmem_data.start;
} else {
len = count;
}
if (is_vmalloc_or_module_addr(dst)) {
ra = load_real_addr(dst);
len = min(PAGE_SIZE - offset_in_page(ra), len);
} else {
ra = dst;
}
if (memcpy_real(ra, src, len))
return -EFAULT;
}
dst += len;
src += len;
count -= len;
}
return 0;
}
/*
* Copy memory from kernel (real) to user (virtual)
*/
static int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count)
{ {
unsigned long offs = 0, size; size_t len, copied, res = 0;
mutex_lock(&memcpy_real_mutex); mutex_lock(&memcpy_real_mutex);
while (offs < count) { while (count) {
size = min(PAGE_SIZE, count - offs); len = min(PAGE_SIZE, count);
if (memcpy_real(memcpy_real_buf, src + offs, size)) if (memcpy_real(memcpy_real_buf, src, len))
break; break;
if (copy_to_user(dest + offs, memcpy_real_buf, size)) copied = copy_to_iter(memcpy_real_buf, len, iter);
count -= copied;
src += copied;
res += copied;
if (copied < len)
break; break;
offs += size;
} }
mutex_unlock(&memcpy_real_mutex); mutex_unlock(&memcpy_real_mutex);
if (offs < count) return res;
return -EFAULT;
return 0;
} }
/* size_t copy_oldmem_iter(struct iov_iter *iter, unsigned long src, size_t count)
* Copy memory of the old, dumped system to a user space virtual address
*/
static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
{ {
unsigned long len; size_t len, copied, res = 0;
int rc;
while (count) { while (count) {
if (!oldmem_data.start && src < sclp.hsa_size) { if (!oldmem_data.start && src < sclp.hsa_size) {
/* Copy from zfcp/nvme dump HSA area */ /* Copy from zfcp/nvme dump HSA area */
len = min(count, sclp.hsa_size - src); len = min(count, sclp.hsa_size - src);
rc = memcpy_hsa_user(dst, src, len); copied = memcpy_hsa_iter(iter, src, len);
if (rc)
return rc;
} else { } else {
/* Check for swapped kdump oldmem areas */ /* Check for swapped kdump oldmem areas */
if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) { if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) {
...@@ -223,15 +156,15 @@ static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count) ...@@ -223,15 +156,15 @@ static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count)
} else { } else {
len = count; len = count;
} }
rc = copy_to_user_real(dst, src, len); copied = copy_to_iter_real(iter, src, len);
if (rc)
return rc;
} }
dst += len; count -= copied;
src += len; src += copied;
count -= len; res += copied;
if (copied < len)
break;
} }
return 0; return res;
} }
/* /*
...@@ -241,26 +174,9 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, ...@@ -241,26 +174,9 @@ ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
unsigned long offset) unsigned long offset)
{ {
unsigned long src; unsigned long src;
int rc;
if (!(iter_is_iovec(iter) || iov_iter_is_kvec(iter)))
return -EINVAL;
/* Multi-segment iterators are not supported */
if (iter->nr_segs > 1)
return -EINVAL;
if (!csize)
return 0;
src = pfn_to_phys(pfn) + offset; src = pfn_to_phys(pfn) + offset;
return copy_oldmem_iter(iter, src, csize);
/* XXX: pass the iov_iter down to a common function */
if (iter_is_iovec(iter))
rc = copy_oldmem_user(iter->iov->iov_base, src, csize);
else
rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize);
if (rc < 0)
return rc;
iov_iter_advance(iter, csize);
return csize;
} }
/* /*
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/panic_notifier.h> #include <linux/panic_notifier.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/uio.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/ipl.h> #include <asm/ipl.h>
...@@ -54,38 +55,37 @@ static DEFINE_MUTEX(hsa_buf_mutex); ...@@ -54,38 +55,37 @@ static DEFINE_MUTEX(hsa_buf_mutex);
static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE); static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE);
/* /*
* Copy memory from HSA to user memory (not reentrant): * Copy memory from HSA to iterator (not reentrant):
* *
* @dest: User buffer where memory should be copied to * @iter: Iterator where memory should be copied to
* @src: Start address within HSA where data should be copied * @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied * @count: Size of buffer, which should be copied
*/ */
int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count)
{ {
unsigned long offset, bytes; size_t bytes, copied, res = 0;
unsigned long offset;
if (!hsa_available) if (!hsa_available)
return -ENODATA; return 0;
mutex_lock(&hsa_buf_mutex); mutex_lock(&hsa_buf_mutex);
while (count) { while (count) {
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) { if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n"); TRACE("sclp_sdias_copy() failed\n");
mutex_unlock(&hsa_buf_mutex); break;
return -EIO;
} }
offset = src % PAGE_SIZE; offset = src % PAGE_SIZE;
bytes = min(PAGE_SIZE - offset, count); bytes = min(PAGE_SIZE - offset, count);
if (copy_to_user(dest, hsa_buf + offset, bytes)) { copied = copy_to_iter(hsa_buf + offset, bytes, iter);
mutex_unlock(&hsa_buf_mutex); count -= copied;
return -EFAULT; src += copied;
} res += copied;
src += bytes; if (copied < bytes)
dest += bytes; break;
count -= bytes;
} }
mutex_unlock(&hsa_buf_mutex); mutex_unlock(&hsa_buf_mutex);
return 0; return res;
} }
/* /*
...@@ -95,28 +95,16 @@ int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) ...@@ -95,28 +95,16 @@ int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
* @src: Start address within HSA where data should be copied * @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied * @count: Size of buffer, which should be copied
*/ */
int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) static inline int memcpy_hsa_kernel(void *dst, unsigned long src, size_t count)
{ {
unsigned long offset, bytes; struct iov_iter iter;
struct kvec kvec;
if (!hsa_available) kvec.iov_base = dst;
return -ENODATA; kvec.iov_len = count;
iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
mutex_lock(&hsa_buf_mutex); if (memcpy_hsa_iter(&iter, src, count) < count)
while (count) { return -EIO;
if (sclp_sdias_copy(hsa_buf, src / PAGE_SIZE + 2, 1)) {
TRACE("sclp_sdias_copy() failed\n");
mutex_unlock(&hsa_buf_mutex);
return -EIO;
}
offset = src % PAGE_SIZE;
bytes = min(PAGE_SIZE - offset, count);
memcpy(dest, hsa_buf + offset, bytes);
src += bytes;
dest += bytes;
count -= bytes;
}
mutex_unlock(&hsa_buf_mutex);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment