Commit 302d1858 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-6.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Alexander Gordeev:

 - do not enable by default the support of 31-bit Enterprise Systems
   Architecture (ESA) ELF binaries

 - drop automatic CONFIG_KEXEC selection, while set CONFIG_KEXEC=y
   explicitly for defconfig and debug_defconfig only

 - fix zpci_get_max_io_size() to allow PCI block stores where normal PCI
   stores were used otherwise

 - remove unneeded tsk variable in do_exception() fault handler

 - __load_fpu_regs() is only called from the core kernel code.
   Therefore, remove not needed EXPORT_SYMBOL.

 - remove leftover comment from s390_fpregs_set() callback

 - few cleanups to Processor Activity Instrumentation (PAI) code (which
   perf framework is based on)

 - replace Wenjia Zhang with Thorsten Winkler as s390 Inter-User
   Communication Vehicle (IUCV) networking maintainer

 - Fix all scenarios where queues previously removed from a guest's
   Adjunct-Processor (AP) configuration do not re-appear in a reset
   state when they are subsequently made available to a guest again

* tag 's390-6.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/vfio-ap: do not reset queue removed from host config
  s390/vfio-ap: reset queues associated with adapter for queue unbound from driver
  s390/vfio-ap: reset queues filtered from the guest's AP config
  s390/vfio-ap: let on_scan_complete() callback filter matrix and update guest's APCB
  s390/vfio-ap: loop over the shadow APCB when filtering guest's AP configuration
  s390/vfio-ap: always filter entire AP matrix
  s390/net: add Thorsten Winkler as maintainer
  s390/pai_ext: split function paiext_push_sample
  s390/pai_ext: rework function paiext_copy argments
  s390/pai: rework paiXXX_start and paiXXX_stop functions
  s390/pai_crypto: split function paicrypt_push_sample
  s390/pai: rework paixxxx_getctr interface
  s390/ptrace: remove leftover comment
  s390/fpu: remove __load_fpu_regs() export
  s390/mm,fault: remove not needed tsk variable
  s390/pci: fix max size calculation in zpci_memcpy_toio()
  s390/kexec: do not automatically select KEXEC option
  s390/compat: change default for CONFIG_COMPAT to "n"
parents b4442cad b9bd10c4
......@@ -19124,7 +19124,7 @@ F: drivers/iommu/s390-iommu.c
S390 IUCV NETWORK LAYER
M: Alexandra Winter <wintera@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
M: Thorsten Winkler <twinkler@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
......@@ -19143,7 +19143,7 @@ F: arch/s390/mm
S390 NETWORK DRIVERS
M: Alexandra Winter <wintera@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
M: Thorsten Winkler <twinkler@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
......
......@@ -216,7 +216,6 @@ config S390
select HAVE_VIRT_CPU_ACCOUNTING_IDLE
select IOMMU_HELPER if PCI
select IOMMU_SUPPORT if PCI
select KEXEC
select MMU_GATHER_MERGE_VMAS
select MMU_GATHER_NO_GATHER
select MMU_GATHER_RCU_TABLE_FREE
......@@ -443,7 +442,7 @@ config COMMAND_LINE_SIZE
line.
config COMPAT
def_bool y
def_bool n
prompt "Kernel support for 31 bit emulation"
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
......@@ -454,7 +453,9 @@ config COMPAT
Select this option if you want to enable your system kernel to
handle system-calls from ELF binaries for 31 bit ESA. This option
(and some other stuff like libraries and such) is needed for
executing 31 bit applications. It is safe to say "Y".
executing 31 bit applications.
If unsure say N.
config SMP
def_bool y
......
......@@ -40,6 +40,7 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_CRASH_DUMP=y
......
......@@ -38,6 +38,7 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_CRASH_DUMP=y
......
......@@ -10,7 +10,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_CRASH_DUMP=y
CONFIG_MARCH_Z13=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
CONFIG_HZ_100=y
# CONFIG_CHSC_SCH is not set
......
......@@ -11,6 +11,8 @@
/* I/O size constraints */
#define ZPCI_MAX_READ_SIZE 8
#define ZPCI_MAX_WRITE_SIZE 128
#define ZPCI_BOUNDARY_SIZE (1 << 12)
#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1)
/* I/O Map */
#define ZPCI_IOMAP_SHIFT 48
......@@ -125,16 +127,18 @@ static inline int zpci_read_single(void *dst, const volatile void __iomem *src,
int zpci_write_block(volatile void __iomem *dst, const void *src,
unsigned long len);
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max)
{
int count = len > max ? max : len, size = 1;
int offset = dst & ZPCI_BOUNDARY_MASK;
int size;
while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
dst = dst >> 1;
src = src >> 1;
size = size << 1;
}
return size;
size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max);
if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8))
return size;
if (size >= 8)
return 8;
return rounddown_pow_of_two(size);
}
static inline int zpci_memcpy_fromio(void *dst,
......@@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst,
int size, rc = 0;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src,
(u64) dst, n,
ZPCI_MAX_READ_SIZE);
size = zpci_get_max_io_size((u64 __force) src,
(u64) dst, n,
ZPCI_MAX_READ_SIZE);
rc = zpci_read_single(dst, src, size);
if (rc)
break;
......@@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
return -EINVAL;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst,
(u64) src, n,
ZPCI_MAX_WRITE_SIZE);
size = zpci_get_max_io_size((u64 __force) dst,
(u64) src, n,
ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */
rc = zpci_write_block(dst, src, size);
else
......
......@@ -208,7 +208,6 @@ void __load_fpu_regs(void)
}
clear_cpu_flag(CIF_FPU);
}
EXPORT_SYMBOL(__load_fpu_regs);
void load_fpu_regs(void)
{
......
......@@ -111,11 +111,11 @@ static void paicrypt_event_destroy(struct perf_event *event)
mutex_unlock(&pai_reserve_mutex);
}
static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
{
if (kernel)
nr += PAI_CRYPTO_MAXCTR;
return cpump->page[nr];
return page[nr];
}
/* Read the counter values. Return value from location in CMP. For event
......@@ -129,13 +129,13 @@ static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
int i;
if (event->attr.config != PAI_CRYPTO_BASE) {
return paicrypt_getctr(cpump,
return paicrypt_getctr(cpump->page,
event->attr.config - PAI_CRYPTO_BASE,
kernel);
}
for (i = 1; i <= paicrypt_cnt; i++) {
u64 val = paicrypt_getctr(cpump, i, kernel);
u64 val = paicrypt_getctr(cpump->page, i, kernel);
if (!val)
continue;
......@@ -317,10 +317,14 @@ static void paicrypt_start(struct perf_event *event, int flags)
* Events are added, deleted and re-added when 2 or more events
* are active at the same time.
*/
if (!event->hw.last_tag) {
event->hw.last_tag = 1;
sum = paicrypt_getall(event); /* Get current value */
local64_set(&event->hw.prev_count, sum);
if (!event->attr.sample_period) { /* Counting */
if (!event->hw.last_tag) {
event->hw.last_tag = 1;
sum = paicrypt_getall(event); /* Get current value */
local64_set(&event->hw.prev_count, sum);
}
} else { /* Sampling */
perf_sched_cb_inc(event->pmu);
}
}
......@@ -336,19 +340,18 @@ static int paicrypt_add(struct perf_event *event, int flags)
local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
}
cpump->event = event;
if (flags & PERF_EF_START && !event->attr.sample_period) {
/* Only counting needs initial counter value */
if (flags & PERF_EF_START)
paicrypt_start(event, PERF_EF_RELOAD);
}
event->hw.state = 0;
if (event->attr.sample_period)
perf_sched_cb_inc(event->pmu);
return 0;
}
static void paicrypt_stop(struct perf_event *event, int flags)
{
paicrypt_read(event);
if (!event->attr.sample_period) /* Counting */
paicrypt_read(event);
else /* Sampling */
perf_sched_cb_dec(event->pmu);
event->hw.state = PERF_HES_STOPPED;
}
......@@ -357,11 +360,7 @@ static void paicrypt_del(struct perf_event *event, int flags)
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
struct paicrypt_map *cpump = mp->mapptr;
if (event->attr.sample_period)
perf_sched_cb_dec(event->pmu);
if (!event->attr.sample_period)
/* Only counting needs to read counter */
paicrypt_stop(event, PERF_EF_UPDATE);
paicrypt_stop(event, PERF_EF_UPDATE);
if (--cpump->active_events == 0) {
local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
WRITE_ONCE(S390_lowcore.ccd, 0);
......@@ -373,8 +372,7 @@ static void paicrypt_del(struct perf_event *event, int flags)
* 2 bytes: Number of counter
* 8 bytes: Value of counter
*/
static size_t paicrypt_copy(struct pai_userdata *userdata,
struct paicrypt_map *cpump,
static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
bool exclude_user, bool exclude_kernel)
{
int i, outidx = 0;
......@@ -383,9 +381,9 @@ static size_t paicrypt_copy(struct pai_userdata *userdata,
u64 val = 0;
if (!exclude_kernel)
val += paicrypt_getctr(cpump, i, true);
val += paicrypt_getctr(page, i, true);
if (!exclude_user)
val += paicrypt_getctr(cpump, i, false);
val += paicrypt_getctr(page, i, false);
if (val) {
userdata[outidx].num = i;
userdata[outidx].value = val;
......@@ -395,25 +393,14 @@ static size_t paicrypt_copy(struct pai_userdata *userdata,
return outidx * sizeof(struct pai_userdata);
}
static int paicrypt_push_sample(void)
static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
struct perf_event *event)
{
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
struct paicrypt_map *cpump = mp->mapptr;
struct perf_event *event = cpump->event;
struct perf_sample_data data;
struct perf_raw_record raw;
struct pt_regs regs;
size_t rawsize;
int overflow;
if (!cpump->event) /* No event active */
return 0;
rawsize = paicrypt_copy(cpump->save, cpump,
cpump->event->attr.exclude_user,
cpump->event->attr.exclude_kernel);
if (!rawsize) /* No incremented counters */
return 0;
/* Setup perf sample */
memset(&regs, 0, sizeof(regs));
memset(&raw, 0, sizeof(raw));
......@@ -444,6 +431,25 @@ static int paicrypt_push_sample(void)
return overflow;
}
/* Check if there is data to be saved on schedule out of a task. */
static int paicrypt_have_sample(void)
{
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
struct paicrypt_map *cpump = mp->mapptr;
struct perf_event *event = cpump->event;
size_t rawsize;
int rc = 0;
if (!event) /* No event active */
return 0;
rawsize = paicrypt_copy(cpump->save, cpump->page,
cpump->event->attr.exclude_user,
cpump->event->attr.exclude_kernel);
if (rawsize) /* No incremented counters */
rc = paicrypt_push_sample(rawsize, cpump, event);
return rc;
}
/* Called on schedule-in and schedule-out. No access to event structure,
* but for sampling only event CRYPTO_ALL is allowed.
*/
......@@ -453,7 +459,7 @@ static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sch
* results on schedule_out and if page was dirty, clear values.
*/
if (!sched_in)
paicrypt_push_sample();
paicrypt_have_sample();
}
/* Attribute definitions for paicrypt interface. As with other CPU
......
......@@ -276,9 +276,9 @@ static int paiext_event_init(struct perf_event *event)
return 0;
}
static u64 paiext_getctr(struct paiext_map *cpump, int nr)
static u64 paiext_getctr(unsigned long *area, int nr)
{
return cpump->area[nr];
return area[nr];
}
/* Read the counter values. Return value from location in buffer. For event
......@@ -292,10 +292,11 @@ static u64 paiext_getdata(struct perf_event *event)
int i;
if (event->attr.config != PAI_NNPA_BASE)
return paiext_getctr(cpump, event->attr.config - PAI_NNPA_BASE);
return paiext_getctr(cpump->area,
event->attr.config - PAI_NNPA_BASE);
for (i = 1; i <= paiext_cnt; i++)
sum += paiext_getctr(cpump, i);
sum += paiext_getctr(cpump->area, i);
return sum;
}
......@@ -320,11 +321,15 @@ static void paiext_start(struct perf_event *event, int flags)
{
u64 sum;
if (event->hw.last_tag)
return;
event->hw.last_tag = 1;
sum = paiext_getall(event); /* Get current value */
local64_set(&event->hw.prev_count, sum);
if (!event->attr.sample_period) { /* Counting */
if (!event->hw.last_tag) {
event->hw.last_tag = 1;
sum = paiext_getall(event); /* Get current value */
local64_set(&event->hw.prev_count, sum);
}
} else { /* Sampling */
perf_sched_cb_inc(event->pmu);
}
}
static int paiext_add(struct perf_event *event, int flags)
......@@ -341,21 +346,19 @@ static int paiext_add(struct perf_event *event, int flags)
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
__func__, S390_lowcore.aicd, pcb->acc);
}
if (flags & PERF_EF_START && !event->attr.sample_period) {
/* Only counting needs initial counter value */
cpump->event = event;
if (flags & PERF_EF_START)
paiext_start(event, PERF_EF_RELOAD);
}
event->hw.state = 0;
if (event->attr.sample_period) {
cpump->event = event;
perf_sched_cb_inc(event->pmu);
}
return 0;
}
static void paiext_stop(struct perf_event *event, int flags)
{
paiext_read(event);
if (!event->attr.sample_period) /* Counting */
paiext_read(event);
else /* Sampling */
perf_sched_cb_dec(event->pmu);
event->hw.state = PERF_HES_STOPPED;
}
......@@ -365,12 +368,7 @@ static void paiext_del(struct perf_event *event, int flags)
struct paiext_map *cpump = mp->mapptr;
struct paiext_cb *pcb = cpump->paiext_cb;
if (event->attr.sample_period)
perf_sched_cb_dec(event->pmu);
if (!event->attr.sample_period) {
/* Only counting needs to read counter */
paiext_stop(event, PERF_EF_UPDATE);
}
paiext_stop(event, PERF_EF_UPDATE);
if (--cpump->active_events == 0) {
/* Disable CPU instruction lookup for PAIE1 control block */
local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
......@@ -386,13 +384,12 @@ static void paiext_del(struct perf_event *event, int flags)
* 2 bytes: Number of counter
* 8 bytes: Value of counter
*/
static size_t paiext_copy(struct paiext_map *cpump)
static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area)
{
struct pai_userdata *userdata = cpump->save;
int i, outidx = 0;
for (i = 1; i <= paiext_cnt; i++) {
u64 val = paiext_getctr(cpump, i);
u64 val = paiext_getctr(area, i);
if (val) {
userdata[outidx].num = i;
......@@ -418,21 +415,14 @@ static size_t paiext_copy(struct paiext_map *cpump)
* sched_task() callback. That callback is not active after paiext_del()
* returns and has deleted the event on that CPU.
*/
static int paiext_push_sample(void)
static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump,
struct perf_event *event)
{
struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
struct paiext_map *cpump = mp->mapptr;
struct perf_event *event = cpump->event;
struct perf_sample_data data;
struct perf_raw_record raw;
struct pt_regs regs;
size_t rawsize;
int overflow;
rawsize = paiext_copy(cpump);
if (!rawsize) /* No incremented counters */
return 0;
/* Setup perf sample */
memset(&regs, 0, sizeof(regs));
memset(&raw, 0, sizeof(raw));
......@@ -461,6 +451,23 @@ static int paiext_push_sample(void)
return overflow;
}
/* Check if there is data to be saved on schedule out of a task. */
static int paiext_have_sample(void)
{
struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
struct paiext_map *cpump = mp->mapptr;
struct perf_event *event = cpump->event;
size_t rawsize;
int rc = 0;
if (!event)
return 0;
rawsize = paiext_copy(cpump->save, cpump->area);
if (rawsize) /* Incremented counters */
rc = paiext_push_sample(rawsize, cpump, event);
return rc;
}
/* Called on schedule-in and schedule-out. No access to event structure,
* but for sampling only event NNPA_ALL is allowed.
*/
......@@ -470,7 +477,7 @@ static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched
* results on schedule_out and if page was dirty, clear values.
*/
if (!sched_in)
paiext_push_sample();
paiext_have_sample();
}
/* Attribute definitions for pai extension1 interface. As with other CPU
......
......@@ -917,7 +917,6 @@ static int s390_fpregs_set(struct task_struct *target,
else
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
......
......@@ -280,7 +280,6 @@ static void do_sigbus(struct pt_regs *regs)
static void do_exception(struct pt_regs *regs, int access)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
unsigned long address;
struct mm_struct *mm;
enum fault_type type;
......@@ -289,7 +288,6 @@ static void do_exception(struct pt_regs *regs, int access)
vm_fault_t fault;
bool is_write;
tsk = current;
/*
* The instruction that caused the program check has
* been nullified. Don't signal single step via SIGTRAP.
......@@ -297,7 +295,7 @@ static void do_exception(struct pt_regs *regs, int access)
clear_thread_flag(TIF_PER_TRAP);
if (kprobe_page_fault(regs, 14))
return;
mm = tsk->mm;
mm = current->mm;
address = get_fault_address(regs);
is_write = fault_is_write(regs);
type = get_fault_type(regs);
......
......@@ -97,9 +97,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst,
return -EINVAL;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst,
(u64 __force) src, n,
ZPCI_MAX_WRITE_SIZE);
size = zpci_get_max_io_size((u64 __force) dst,
(u64 __force) src, n,
ZPCI_MAX_WRITE_SIZE);
if (size > 8) /* main path */
rc = __pcistb_mio_inuser(dst, src, size, &status);
else
......@@ -242,9 +242,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst,
u8 status;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src,
(u64 __force) dst, n,
ZPCI_MAX_READ_SIZE);
size = zpci_get_max_io_size((u64 __force) src,
(u64 __force) dst, n,
ZPCI_MAX_READ_SIZE);
rc = __pcilg_mio_inuser(dst, src, size, &status);
if (rc)
break;
......
This diff is collapsed.
......@@ -133,6 +133,8 @@ struct ap_matrix_mdev {
* @apqn: the APQN of the AP queue device
* @saved_isc: the guest ISC registered with the GIB interface
* @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
* @reset_qnode: allows the vfio_ap_queue struct to be added to a list of queues
* that need to be reset
* @reset_status: the status from the last reset of the queue
* @reset_work: work to wait for queue reset to complete
*/
......@@ -143,6 +145,7 @@ struct vfio_ap_queue {
#define VFIO_AP_ISC_INVALID 0xff
unsigned char saved_isc;
struct hlist_node mdev_qnode;
struct list_head reset_qnode;
struct ap_queue_status reset_status;
struct work_struct reset_work;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment