Commit f0d43b3a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-5.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Heiko Carstens:
 "Besides all the small improvements and cleanups the most notable part
  is the fast vector/SIMD implementation of the ChaCha20 stream cipher,
  which is an adaptation of Andy Polyakov's code for the kernel.

  Summary:

   - add fast vector/SIMD implementation of the ChaCha20 stream cipher,
     which mainly adapts Andy Polyakov's code for the kernel

   - add status attribute to AP queue device so users can easily figure
     out its status

   - fix race in page table release code, and and lots of documentation

   - remove uevent suppress from cio device driver, since it turned out
     that it generated more problems than it solved problems

   - quite a lot of virtual vs physical address confusion fixes

   - various other small improvements and cleanups all over the place"

* tag 's390-5.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (39 commits)
  s390/dasd: use default_groups in kobj_type
  s390/sclp_sd: use default_groups in kobj_type
  s390/pci: simplify __pciwb_mio() inline asm
  s390: remove unused TASK_SIZE_OF
  s390/crash_dump: fix virtual vs physical address handling
  s390/crypto: fix compile error for ChaCha20 module
  s390/mm: check 2KB-fragment page on release
  s390/mm: better annotate 2KB pagetable fragments handling
  s390/mm: fix 2KB pgtable release race
  s390/sclp: release SCLP early buffer after kernel initialization
  s390/nmi: disable interrupts on extended save area update
  s390/zcrypt: CCA control CPRB sending
  s390/disassembler: update opcode table
  s390/uv: fix memblock virtual vs physical address confusion
  s390/smp: fix memblock_phys_free() vs memblock_free() confusion
  s390/sclp: fix memblock_phys_free() vs memblock_free() confusion
  s390/exit: remove dead reference to do_exit from copy_thread
  s390/ap: add missing virt_to_phys address conversion
  s390/pgalloc: use pointers instead of unsigned long values
  s390/pgalloc: add virt/phys address handling to base asce functions
  ...
parents 9b9e2113 0704a858
...@@ -770,6 +770,7 @@ CONFIG_CRYPTO_SHA3_256_S390=m ...@@ -770,6 +770,7 @@ CONFIG_CRYPTO_SHA3_256_S390=m
CONFIG_CRYPTO_SHA3_512_S390=m CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CRYPTO_DEV_VIRTIO=m
......
...@@ -757,6 +757,7 @@ CONFIG_CRYPTO_SHA3_256_S390=m ...@@ -757,6 +757,7 @@ CONFIG_CRYPTO_SHA3_256_S390=m
CONFIG_CRYPTO_SHA3_512_S390=m CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CRYPTO_DEV_VIRTIO=m
......
...@@ -11,9 +11,11 @@ obj-$(CONFIG_CRYPTO_SHA3_512_S390) += sha3_512_s390.o sha_common.o ...@@ -11,9 +11,11 @@ obj-$(CONFIG_CRYPTO_SHA3_512_S390) += sha3_512_s390.o sha_common.o
obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o
obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o obj-$(CONFIG_S390_PRNG) += prng.o
obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
obj-$(CONFIG_ARCH_RANDOM) += arch_random.o obj-$(CONFIG_ARCH_RANDOM) += arch_random.o
crc32-vx_s390-y := crc32-vx.o crc32le-vx.o crc32be-vx.o crc32-vx_s390-y := crc32-vx.o crc32le-vx.o crc32be-vx.o
chacha_s390-y := chacha-glue.o chacha-s390.o
// SPDX-License-Identifier: GPL-2.0
/*
* s390 ChaCha stream cipher.
*
* Copyright IBM Corp. 2021
*/
#define KMSG_COMPONENT "chacha_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <crypto/internal/chacha.h>
#include <crypto/internal/skcipher.h>
#include <crypto/algapi.h>
#include <linux/cpufeature.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/fpu/api.h>
#include "chacha-s390.h"
static void chacha20_crypt_s390(u32 *state, u8 *dst, const u8 *src,
unsigned int nbytes, const u32 *key,
u32 *counter)
{
struct kernel_fpu vxstate;
kernel_fpu_begin(&vxstate, KERNEL_VXR);
chacha20_vx(dst, src, nbytes, key, counter);
kernel_fpu_end(&vxstate, KERNEL_VXR);
*counter += round_up(nbytes, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
}
static int chacha20_s390(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 state[CHACHA_STATE_WORDS] __aligned(16);
struct skcipher_walk walk;
unsigned int nbytes;
int rc;
rc = skcipher_walk_virt(&walk, req, false);
chacha_init_generic(state, ctx->key, req->iv);
while (walk.nbytes > 0) {
nbytes = walk.nbytes;
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
if (nbytes <= CHACHA_BLOCK_SIZE) {
chacha_crypt_generic(state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
ctx->nrounds);
} else {
chacha20_crypt_s390(state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
&state[4], &state[12]);
}
rc = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return rc;
}
static struct skcipher_alg chacha_algs[] = {
{
.base.cra_name = "chacha20",
.base.cra_driver_name = "chacha20-s390",
.base.cra_priority = 900,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = chacha20_s390,
.decrypt = chacha20_s390,
}
};
static int __init chacha_mod_init(void)
{
return crypto_register_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs));
}
static void __exit chacha_mod_fini(void)
{
crypto_unregister_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs));
}
module_cpu_feature_match(VXRS, chacha_mod_init);
module_exit(chacha_mod_fini);
MODULE_DESCRIPTION("ChaCha20 stream cipher");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("chacha20");
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/*
* s390 ChaCha stream cipher.
*
* Copyright IBM Corp. 2021
*/
#ifndef _CHACHA_S390_H
#define _CHACHA_S390_H
void chacha20_vx(u8 *out, const u8 *inp, size_t len, const u32 *key,
const u32 *counter);
#endif /* _CHACHA_S390_H */
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#ifndef _ASM_S390_AP_H_ #ifndef _ASM_S390_AP_H_
#define _ASM_S390_AP_H_ #define _ASM_S390_AP_H_
#include <linux/io.h>
/** /**
* The ap_qid_t identifier of an ap queue. * The ap_qid_t identifier of an ap queue.
* If the AP facilities test (APFT) facility is available, * If the AP facilities test (APFT) facility is available,
...@@ -238,7 +240,7 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, ...@@ -238,7 +240,7 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
struct ap_qirq_ctrl qirqctrl; struct ap_qirq_ctrl qirqctrl;
struct ap_queue_status status; struct ap_queue_status status;
} reg1; } reg1;
void *reg2 = ind; unsigned long reg2 = virt_to_phys(ind);
reg1.qirqctrl = qirqctrl; reg1.qirqctrl = qirqctrl;
......
...@@ -47,8 +47,8 @@ static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) ...@@ -47,8 +47,8 @@ static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
{ {
unsigned long start_addr, end_addr; unsigned long start_addr, end_addr;
start_addr = start_pfn << PAGE_SHIFT; start_addr = pfn_to_phys(start_pfn);
end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; end_addr = pfn_to_phys(start_pfn + num_pfn - 1);
diag_stat_inc(DIAG_STAT_X010); diag_stat_inc(DIAG_STAT_X010);
asm volatile( asm volatile(
......
...@@ -98,9 +98,9 @@ struct mcesa { ...@@ -98,9 +98,9 @@ struct mcesa {
struct pt_regs; struct pt_regs;
void nmi_alloc_boot_cpu(struct lowcore *lc); void nmi_alloc_mcesa_early(u64 *mcesad);
int nmi_alloc_per_cpu(struct lowcore *lc); int nmi_alloc_mcesa(u64 *mcesad);
void nmi_free_per_cpu(struct lowcore *lc); void nmi_free_mcesa(u64 *mcesad);
void s390_handle_mcck(void); void s390_handle_mcck(void);
void __s390_handle_mcck(void); void __s390_handle_mcck(void);
......
...@@ -97,23 +97,23 @@ static inline unsigned int calc_px(dma_addr_t ptr) ...@@ -97,23 +97,23 @@ static inline unsigned int calc_px(dma_addr_t ptr)
return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK; return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
} }
static inline void set_pt_pfaa(unsigned long *entry, void *pfaa) static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
{ {
*entry &= ZPCI_PTE_FLAG_MASK; *entry &= ZPCI_PTE_FLAG_MASK;
*entry |= ((unsigned long) pfaa & ZPCI_PTE_ADDR_MASK); *entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
} }
static inline void set_rt_sto(unsigned long *entry, void *sto) static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
{ {
*entry &= ZPCI_RTE_FLAG_MASK; *entry &= ZPCI_RTE_FLAG_MASK;
*entry |= ((unsigned long) sto & ZPCI_RTE_ADDR_MASK); *entry |= (sto & ZPCI_RTE_ADDR_MASK);
*entry |= ZPCI_TABLE_TYPE_RTX; *entry |= ZPCI_TABLE_TYPE_RTX;
} }
static inline void set_st_pto(unsigned long *entry, void *pto) static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
{ {
*entry &= ZPCI_STE_FLAG_MASK; *entry &= ZPCI_STE_FLAG_MASK;
*entry |= ((unsigned long) pto & ZPCI_STE_ADDR_MASK); *entry |= (pto & ZPCI_STE_ADDR_MASK);
*entry |= ZPCI_TABLE_TYPE_SX; *entry |= ZPCI_TABLE_TYPE_SX;
} }
...@@ -169,16 +169,19 @@ static inline int pt_entry_isvalid(unsigned long entry) ...@@ -169,16 +169,19 @@ static inline int pt_entry_isvalid(unsigned long entry)
static inline unsigned long *get_rt_sto(unsigned long entry) static inline unsigned long *get_rt_sto(unsigned long entry)
{ {
return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
? (unsigned long *) (entry & ZPCI_RTE_ADDR_MASK) return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
: NULL; else
return NULL;
} }
static inline unsigned long *get_st_pto(unsigned long entry) static inline unsigned long *get_st_pto(unsigned long entry)
{ {
return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
? (unsigned long *) (entry & ZPCI_STE_ADDR_MASK) return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
: NULL; else
return NULL;
} }
/* Prototypes */ /* Prototypes */
...@@ -186,7 +189,7 @@ void dma_free_seg_table(unsigned long); ...@@ -186,7 +189,7 @@ void dma_free_seg_table(unsigned long);
unsigned long *dma_alloc_cpu_table(void); unsigned long *dma_alloc_cpu_table(void);
void dma_cleanup_tables(unsigned long *); void dma_cleanup_tables(unsigned long *);
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr); unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags); void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags);
extern const struct dma_map_ops s390_pci_dma_ops; extern const struct dma_map_ops s390_pci_dma_ops;
......
...@@ -88,11 +88,10 @@ extern void __bpon(void); ...@@ -88,11 +88,10 @@ extern void __bpon(void);
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/ */
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_31BIT) ? \ #define TASK_SIZE (test_thread_flag(TIF_31BIT) ? \
_REGION3_SIZE : TASK_SIZE_MAX) _REGION3_SIZE : TASK_SIZE_MAX)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1)) (_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1))
#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_SIZE_MAX (-PAGE_SIZE) #define TASK_SIZE_MAX (-PAGE_SIZE)
#define STACK_TOP (test_thread_flag(TIF_31BIT) ? \ #define STACK_TOP (test_thread_flag(TIF_31BIT) ? \
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1) #define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
#define QDIO_BUFNR(num) ((num) & QDIO_MAX_BUFFERS_MASK) #define QDIO_BUFNR(num) ((num) & QDIO_MAX_BUFFERS_MASK)
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16 #define QDIO_MAX_ELEMENTS_PER_BUFFER 16
#define QDIO_SBAL_SIZE 256
#define QDIO_QETH_QFMT 0 #define QDIO_QETH_QFMT 0
#define QDIO_ZFCP_QFMT 1 #define QDIO_ZFCP_QFMT 1
...@@ -92,8 +91,8 @@ struct qdr { ...@@ -92,8 +91,8 @@ struct qdr {
* @pfmt: implementation dependent parameter format * @pfmt: implementation dependent parameter format
* @rflags: QEBSM * @rflags: QEBSM
* @ac: adapter characteristics * @ac: adapter characteristics
* @isliba: absolute address of first input SLIB * @isliba: logical address of first input SLIB
* @osliba: absolute address of first output SLIB * @osliba: logical address of first output SLIB
* @ebcnam: adapter identifier in EBCDIC * @ebcnam: adapter identifier in EBCDIC
* @parm: implementation dependent parameters * @parm: implementation dependent parameters
*/ */
...@@ -313,7 +312,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, ...@@ -313,7 +312,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* @qib_rflags: rflags to set * @qib_rflags: rflags to set
* @no_input_qs: number of input queues * @no_input_qs: number of input queues
* @no_output_qs: number of output queues * @no_output_qs: number of output queues
* @input_handler: handler to be called for input queues * @input_handler: handler to be called for input queues, and device-wide errors
* @output_handler: handler to be called for output queues * @output_handler: handler to be called for output queues
* @irq_poll: Data IRQ polling handler * @irq_poll: Data IRQ polling handler
* @scan_threshold: # of in-use buffers that triggers scan on output queue * @scan_threshold: # of in-use buffers that triggers scan on output queue
...@@ -337,9 +336,6 @@ struct qdio_initialize { ...@@ -337,9 +336,6 @@ struct qdio_initialize {
struct qdio_buffer ***output_sbal_addr_array; struct qdio_buffer ***output_sbal_addr_array;
}; };
#define QDIO_FLAG_SYNC_INPUT 0x01
#define QDIO_FLAG_SYNC_OUTPUT 0x02
int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count); int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count);
void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count); void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count);
void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count); void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count);
...@@ -349,13 +345,18 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, ...@@ -349,13 +345,18 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
extern int qdio_establish(struct ccw_device *cdev, extern int qdio_establish(struct ccw_device *cdev,
struct qdio_initialize *init_data); struct qdio_initialize *init_data);
extern int qdio_activate(struct ccw_device *); extern int qdio_activate(struct ccw_device *);
extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
unsigned int bufnr, unsigned int count, struct qaob *aob);
extern int qdio_start_irq(struct ccw_device *cdev); extern int qdio_start_irq(struct ccw_device *cdev);
extern int qdio_stop_irq(struct ccw_device *cdev); extern int qdio_stop_irq(struct ccw_device *cdev);
extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, extern int qdio_inspect_input_queue(struct ccw_device *cdev, unsigned int nr,
bool is_input, unsigned int *bufnr, unsigned int *bufnr, unsigned int *error);
unsigned int *error); extern int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr,
unsigned int *bufnr, unsigned int *error);
extern int qdio_add_bufs_to_input_queue(struct ccw_device *cdev,
unsigned int q_nr, unsigned int bufnr,
unsigned int count);
extern int qdio_add_bufs_to_output_queue(struct ccw_device *cdev,
unsigned int q_nr, unsigned int bufnr,
unsigned int count, struct qaob *aob);
extern int qdio_shutdown(struct ccw_device *, int); extern int qdio_shutdown(struct ccw_device *, int);
extern int qdio_free(struct ccw_device *); extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
......
...@@ -372,6 +372,16 @@ ...@@ -372,6 +372,16 @@
MRXBOPC \hint, 0x36, v1, v3 MRXBOPC \hint, 0x36, v1, v3
.endm .endm
/* VECTOR STORE */
.macro VST vr1, disp, index="%r0", base
VX_NUM v1, \vr1
GR_NUM x2, \index
GR_NUM b2, \base /* Base register */
.word 0xE700 | ((v1&15) << 4) | (x2&15)
.word (b2 << 12) | (\disp)
MRXBOPC 0, 0x0E, v1
.endm
/* VECTOR STORE MULTIPLE */ /* VECTOR STORE MULTIPLE */
.macro VSTM vfrom, vto, disp, base, hint=3 .macro VSTM vfrom, vto, disp, base, hint=3
VX_NUM v1, \vfrom VX_NUM v1, \vfrom
...@@ -411,6 +421,81 @@ ...@@ -411,6 +421,81 @@
VUPLL \vr1, \vr2, 2 VUPLL \vr1, \vr2, 2
.endm .endm
/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
.macro VPDI vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0x84, v1, v2, v3
.endm
/* VECTOR REPLICATE */
.macro VREP vr1, vr3, imm2, m4
VX_NUM v1, \vr1
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v3&15)
.word \imm2
MRXBOPC \m4, 0x4D, v1, v3
.endm
.macro VREPB vr1, vr3, imm2
VREP \vr1, \vr3, \imm2, 0
.endm
.macro VREPH vr1, vr3, imm2
VREP \vr1, \vr3, \imm2, 1
.endm
.macro VREPF vr1, vr3, imm2
VREP \vr1, \vr3, \imm2, 2
.endm
.macro VREPG vr1, vr3, imm2
VREP \vr1, \vr3, \imm2, 3
.endm
/* VECTOR MERGE HIGH */
.macro VMRH vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0x61, v1, v2, v3
.endm
.macro VMRHB vr1, vr2, vr3
VMRH \vr1, \vr2, \vr3, 0
.endm
.macro VMRHH vr1, vr2, vr3
VMRH \vr1, \vr2, \vr3, 1
.endm
.macro VMRHF vr1, vr2, vr3
VMRH \vr1, \vr2, \vr3, 2
.endm
.macro VMRHG vr1, vr2, vr3
VMRH \vr1, \vr2, \vr3, 3
.endm
/* VECTOR MERGE LOW */
.macro VMRL vr1, vr2, vr3, m4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12)
MRXBOPC \m4, 0x60, v1, v2, v3
.endm
.macro VMRLB vr1, vr2, vr3
VMRL \vr1, \vr2, \vr3, 0
.endm
.macro VMRLH vr1, vr2, vr3
VMRL \vr1, \vr2, \vr3, 1
.endm
.macro VMRLF vr1, vr2, vr3
VMRL \vr1, \vr2, \vr3, 2
.endm
.macro VMRLG vr1, vr2, vr3
VMRL \vr1, \vr2, \vr3, 3
.endm
/* Vector integer instructions */ /* Vector integer instructions */
...@@ -557,5 +642,37 @@ ...@@ -557,5 +642,37 @@
VESRAV \vr1, \vr2, \vr3, 3 VESRAV \vr1, \vr2, \vr3, 3
.endm .endm
/* VECTOR ELEMENT ROTATE LEFT LOGICAL */
.macro VERLL vr1, vr3, disp, base="%r0", m4
VX_NUM v1, \vr1
VX_NUM v3, \vr3
GR_NUM b2, \base
.word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC \m4, 0x33, v1, v3
.endm
.macro VERLLB vr1, vr3, disp, base="%r0"
VERLL \vr1, \vr3, \disp, \base, 0
.endm
.macro VERLLH vr1, vr3, disp, base="%r0"
VERLL \vr1, \vr3, \disp, \base, 1
.endm
.macro VERLLF vr1, vr3, disp, base="%r0"
VERLL \vr1, \vr3, \disp, \base, 2
.endm
.macro VERLLG vr1, vr3, disp, base="%r0"
VERLL \vr1, \vr3, \disp, \base, 3
.endm
/* VECTOR SHIFT LEFT DOUBLE BY BYTE */
.macro VSLDB vr1, vr2, vr3, imm4
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
.word 0xE700 | ((v1&15) << 4) | (v2&15)
.word ((v3&15) << 12) | (\imm4)
MRXBOPC 0, 0x77, v1, v2, v3
.endm
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_VX_INSN_H */ #endif /* __ASM_S390_VX_INSN_H */
...@@ -60,7 +60,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu) ...@@ -60,7 +60,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
{ {
struct save_area *sa; struct save_area *sa;
sa = (void *) memblock_phys_alloc(sizeof(*sa), 8); sa = memblock_alloc(sizeof(*sa), 8);
if (!sa) if (!sa)
panic("Failed to allocate save area\n"); panic("Failed to allocate save area\n");
......
...@@ -278,6 +278,7 @@ static const unsigned char formats[][6] = { ...@@ -278,6 +278,7 @@ static const unsigned char formats[][6] = {
[INSTR_SIL_RDI] = { D_20, B_16, I16_32, 0, 0, 0 }, [INSTR_SIL_RDI] = { D_20, B_16, I16_32, 0, 0, 0 },
[INSTR_SIL_RDU] = { D_20, B_16, U16_32, 0, 0, 0 }, [INSTR_SIL_RDU] = { D_20, B_16, U16_32, 0, 0, 0 },
[INSTR_SIY_IRD] = { D20_20, B_16, I8_8, 0, 0, 0 }, [INSTR_SIY_IRD] = { D20_20, B_16, I8_8, 0, 0, 0 },
[INSTR_SIY_RD] = { D20_20, B_16, 0, 0, 0, 0 },
[INSTR_SIY_URD] = { D20_20, B_16, U8_8, 0, 0, 0 }, [INSTR_SIY_URD] = { D20_20, B_16, U8_8, 0, 0, 0 },
[INSTR_SI_RD] = { D_20, B_16, 0, 0, 0, 0 }, [INSTR_SI_RD] = { D_20, B_16, 0, 0, 0, 0 },
[INSTR_SI_URD] = { D_20, B_16, U8_8, 0, 0, 0 }, [INSTR_SI_URD] = { D_20, B_16, U8_8, 0, 0, 0 },
......
...@@ -86,7 +86,7 @@ static noinline void __machine_kdump(void *image) ...@@ -86,7 +86,7 @@ static noinline void __machine_kdump(void *image)
continue; continue;
} }
/* Store status of the boot CPU */ /* Store status of the boot CPU */
mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK); mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
if (MACHINE_HAS_VX) if (MACHINE_HAS_VX)
save_vx_regs((__vector128 *) mcesa->vector_save_area); save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) { if (MACHINE_HAS_GS) {
......
...@@ -58,27 +58,27 @@ static inline unsigned long nmi_get_mcesa_size(void) ...@@ -58,27 +58,27 @@ static inline unsigned long nmi_get_mcesa_size(void)
/* /*
* The initial machine check extended save area for the boot CPU. * The initial machine check extended save area for the boot CPU.
* It will be replaced by nmi_init() with an allocated structure. * It will be replaced on the boot CPU reinit with an allocated
* The structure is required for machine check happening early in * structure. The structure is required for machine check happening
* the boot process. * early in the boot process.
*/ */
static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE); static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
void __init nmi_alloc_boot_cpu(struct lowcore *lc) void __init nmi_alloc_mcesa_early(u64 *mcesad)
{ {
if (!nmi_needs_mcesa()) if (!nmi_needs_mcesa())
return; return;
lc->mcesad = (unsigned long) &boot_mcesa; *mcesad = __pa(&boot_mcesa);
if (MACHINE_HAS_GS) if (MACHINE_HAS_GS)
lc->mcesad |= ilog2(MCESA_MAX_SIZE); *mcesad |= ilog2(MCESA_MAX_SIZE);
} }
static int __init nmi_init(void) static void __init nmi_alloc_cache(void)
{ {
unsigned long origin, cr0, size; unsigned long size;
if (!nmi_needs_mcesa()) if (!nmi_needs_mcesa())
return 0; return;
size = nmi_get_mcesa_size(); size = nmi_get_mcesa_size();
if (size > MCESA_MIN_SIZE) if (size > MCESA_MIN_SIZE)
mcesa_origin_lc = ilog2(size); mcesa_origin_lc = ilog2(size);
...@@ -86,40 +86,31 @@ static int __init nmi_init(void) ...@@ -86,40 +86,31 @@ static int __init nmi_init(void)
mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL); mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
if (!mcesa_cache) if (!mcesa_cache)
panic("Couldn't create nmi save area cache"); panic("Couldn't create nmi save area cache");
origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
if (!origin)
panic("Couldn't allocate nmi save area");
/* The pointer is stored with mcesa_bits ORed in */
kmemleak_not_leak((void *) origin);
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28); /* disable lowcore protection */
/* Replace boot_mcesa on the boot CPU */
S390_lowcore.mcesad = origin | mcesa_origin_lc;
__ctl_load(cr0, 0, 0);
return 0;
} }
early_initcall(nmi_init);
int nmi_alloc_per_cpu(struct lowcore *lc) int __ref nmi_alloc_mcesa(u64 *mcesad)
{ {
unsigned long origin; unsigned long origin;
*mcesad = 0;
if (!nmi_needs_mcesa()) if (!nmi_needs_mcesa())
return 0; return 0;
if (!mcesa_cache)
nmi_alloc_cache();
origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL); origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
if (!origin) if (!origin)
return -ENOMEM; return -ENOMEM;
/* The pointer is stored with mcesa_bits ORed in */ /* The pointer is stored with mcesa_bits ORed in */
kmemleak_not_leak((void *) origin); kmemleak_not_leak((void *) origin);
lc->mcesad = origin | mcesa_origin_lc; *mcesad = __pa(origin) | mcesa_origin_lc;
return 0; return 0;
} }
void nmi_free_per_cpu(struct lowcore *lc) void nmi_free_mcesa(u64 *mcesad)
{ {
if (!nmi_needs_mcesa()) if (!nmi_needs_mcesa())
return; return;
kmem_cache_free(mcesa_cache, (void *)(lc->mcesad & MCESA_ORIGIN_MASK)); kmem_cache_free(mcesa_cache, __va(*mcesad & MCESA_ORIGIN_MASK));
} }
static notrace void s390_handle_damage(void) static notrace void s390_handle_damage(void)
...@@ -246,7 +237,7 @@ static int notrace s390_validate_registers(union mci mci, int umode) ...@@ -246,7 +237,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
: "Q" (S390_lowcore.fpt_creg_save_area)); : "Q" (S390_lowcore.fpt_creg_save_area));
} }
mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK); mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
if (!MACHINE_HAS_VX) { if (!MACHINE_HAS_VX) {
/* Validate floating point registers */ /* Validate floating point registers */
asm volatile( asm volatile(
......
...@@ -139,7 +139,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, ...@@ -139,7 +139,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
(unsigned long)__ret_from_fork; (unsigned long)__ret_from_fork;
frame->childregs.gprs[9] = new_stackp; /* function */ frame->childregs.gprs[9] = new_stackp; /* function */
frame->childregs.gprs[10] = arg; frame->childregs.gprs[10] = arg;
frame->childregs.gprs[11] = (unsigned long)do_exit;
frame->childregs.orig_gpr2 = -1; frame->childregs.orig_gpr2 = -1;
frame->childregs.last_break = 1; frame->childregs.last_break = 1;
return 0; return 0;
......
...@@ -445,7 +445,7 @@ static void __init setup_lowcore_dat_off(void) ...@@ -445,7 +445,7 @@ static void __init setup_lowcore_dat_off(void)
lc->lpp = LPP_MAGIC; lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags; lc->machine_flags = S390_lowcore.machine_flags;
lc->preempt_count = S390_lowcore.preempt_count; lc->preempt_count = S390_lowcore.preempt_count;
nmi_alloc_boot_cpu(lc); nmi_alloc_mcesa_early(&lc->mcesad);
lc->sys_enter_timer = S390_lowcore.sys_enter_timer; lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer; lc->exit_timer = S390_lowcore.exit_timer;
lc->user_timer = S390_lowcore.user_timer; lc->user_timer = S390_lowcore.user_timer;
......
...@@ -212,7 +212,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) ...@@ -212,7 +212,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED; lc->preempt_count = PREEMPT_DISABLED;
if (nmi_alloc_per_cpu(lc)) if (nmi_alloc_mcesa(&lc->mcesad))
goto out; goto out;
lowcore_ptr[cpu] = lc; lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
...@@ -239,7 +239,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) ...@@ -239,7 +239,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET; mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[cpu] = NULL; lowcore_ptr[cpu] = NULL;
nmi_free_per_cpu(lc); nmi_free_mcesa(&lc->mcesad);
stack_free(async_stack); stack_free(async_stack);
stack_free(mcck_stack); stack_free(mcck_stack);
free_pages(nodat_stack, THREAD_SIZE_ORDER); free_pages(nodat_stack, THREAD_SIZE_ORDER);
...@@ -622,7 +622,7 @@ int smp_store_status(int cpu) ...@@ -622,7 +622,7 @@ int smp_store_status(int cpu)
return -EIO; return -EIO;
if (!MACHINE_HAS_VX && !MACHINE_HAS_GS) if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
return 0; return 0;
pa = __pa(lc->mcesad & MCESA_ORIGIN_MASK); pa = lc->mcesad & MCESA_ORIGIN_MASK;
if (MACHINE_HAS_GS) if (MACHINE_HAS_GS)
pa |= lc->mcesad & MCESA_LC_MASK; pa |= lc->mcesad & MCESA_LC_MASK;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
...@@ -658,26 +658,22 @@ int smp_store_status(int cpu) ...@@ -658,26 +658,22 @@ int smp_store_status(int cpu)
* deactivates the elfcorehdr= kernel parameter * deactivates the elfcorehdr= kernel parameter
*/ */
static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr, static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
bool is_boot_cpu, unsigned long page) bool is_boot_cpu, __vector128 *vxrs)
{ {
__vector128 *vxrs = (__vector128 *) page;
if (is_boot_cpu) if (is_boot_cpu)
vxrs = boot_cpu_vector_save_area; vxrs = boot_cpu_vector_save_area;
else else
__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page); __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(vxrs));
save_area_add_vxrs(sa, vxrs); save_area_add_vxrs(sa, vxrs);
} }
static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr, static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
bool is_boot_cpu, unsigned long page) bool is_boot_cpu, void *regs)
{ {
void *regs = (void *) page;
if (is_boot_cpu) if (is_boot_cpu)
copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512); copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
else else
__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page); __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(regs));
save_area_add_regs(sa, regs); save_area_add_regs(sa, regs);
} }
...@@ -685,14 +681,14 @@ void __init smp_save_dump_cpus(void) ...@@ -685,14 +681,14 @@ void __init smp_save_dump_cpus(void)
{ {
int addr, boot_cpu_addr, max_cpu_addr; int addr, boot_cpu_addr, max_cpu_addr;
struct save_area *sa; struct save_area *sa;
unsigned long page;
bool is_boot_cpu; bool is_boot_cpu;
void *page;
if (!(oldmem_data.start || is_ipl_type_dump())) if (!(oldmem_data.start || is_ipl_type_dump()))
/* No previous system present, normal boot. */ /* No previous system present, normal boot. */
return; return;
/* Allocate a page as dumping area for the store status sigps */ /* Allocate a page as dumping area for the store status sigps */
page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31); page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!page) if (!page)
panic("ERROR: Failed to allocate %lx bytes below %lx\n", panic("ERROR: Failed to allocate %lx bytes below %lx\n",
PAGE_SIZE, 1UL << 31); PAGE_SIZE, 1UL << 31);
...@@ -723,7 +719,7 @@ void __init smp_save_dump_cpus(void) ...@@ -723,7 +719,7 @@ void __init smp_save_dump_cpus(void)
/* Get the CPU registers */ /* Get the CPU registers */
smp_save_cpu_regs(sa, addr, is_boot_cpu, page); smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
} }
memblock_phys_free(page, PAGE_SIZE); memblock_free(page, PAGE_SIZE);
diag_amode31_ops.diag308_reset(); diag_amode31_ops.diag308_reset();
pcpu_set_smt(0); pcpu_set_smt(0);
} }
...@@ -880,7 +876,7 @@ void __init smp_detect_cpus(void) ...@@ -880,7 +876,7 @@ void __init smp_detect_cpus(void)
/* Add CPUs present at boot */ /* Add CPUs present at boot */
__smp_rescan_cpus(info, true); __smp_rescan_cpus(info, true);
memblock_phys_free((unsigned long)info, sizeof(*info)); memblock_free(info, sizeof(*info));
} }
/* /*
...@@ -1271,14 +1267,15 @@ static int __init smp_reinit_ipl_cpu(void) ...@@ -1271,14 +1267,15 @@ static int __init smp_reinit_ipl_cpu(void)
{ {
unsigned long async_stack, nodat_stack, mcck_stack; unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc, *lc_ipl; struct lowcore *lc, *lc_ipl;
unsigned long flags; unsigned long flags, cr0;
u64 mcesad;
lc_ipl = lowcore_ptr[0]; lc_ipl = lowcore_ptr[0];
lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
async_stack = stack_alloc(); async_stack = stack_alloc();
mcck_stack = stack_alloc(); mcck_stack = stack_alloc();
if (!lc || !nodat_stack || !async_stack || !mcck_stack) if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad))
panic("Couldn't allocate memory"); panic("Couldn't allocate memory");
local_irq_save(flags); local_irq_save(flags);
...@@ -1287,6 +1284,10 @@ static int __init smp_reinit_ipl_cpu(void) ...@@ -1287,6 +1284,10 @@ static int __init smp_reinit_ipl_cpu(void)
S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET; S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET; S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET; S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
__ctl_store(cr0, 0, 0);
__ctl_clear_bit(0, 28); /* disable lowcore protection */
S390_lowcore.mcesad = mcesad;
__ctl_load(cr0, 0, 0);
lowcore_ptr[0] = lc; lowcore_ptr[0] = lc;
local_mcck_enable(); local_mcck_enable();
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -30,7 +30,7 @@ int __bootdata_preserved(prot_virt_host); ...@@ -30,7 +30,7 @@ int __bootdata_preserved(prot_virt_host);
EXPORT_SYMBOL(prot_virt_host); EXPORT_SYMBOL(prot_virt_host);
EXPORT_SYMBOL(uv_info); EXPORT_SYMBOL(uv_info);
static int __init uv_init(unsigned long stor_base, unsigned long stor_len) static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
{ {
struct uv_cb_init uvcb = { struct uv_cb_init uvcb = {
.header.cmd = UVC_CMD_INIT_UV, .header.cmd = UVC_CMD_INIT_UV,
...@@ -49,12 +49,12 @@ static int __init uv_init(unsigned long stor_base, unsigned long stor_len) ...@@ -49,12 +49,12 @@ static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
void __init setup_uv(void) void __init setup_uv(void)
{ {
unsigned long uv_stor_base; void *uv_stor_base;
if (!is_prot_virt_host()) if (!is_prot_virt_host())
return; return;
uv_stor_base = (unsigned long)memblock_alloc_try_nid( uv_stor_base = memblock_alloc_try_nid(
uv_info.uv_base_stor_len, SZ_1M, SZ_2G, uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
if (!uv_stor_base) { if (!uv_stor_base) {
...@@ -63,8 +63,8 @@ void __init setup_uv(void) ...@@ -63,8 +63,8 @@ void __init setup_uv(void)
goto fail; goto fail;
} }
if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) { if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
memblock_phys_free(uv_stor_base, uv_info.uv_base_stor_len); memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
goto fail; goto fail;
} }
......
...@@ -90,7 +90,7 @@ static long cmm_alloc_pages(long nr, long *counter, ...@@ -90,7 +90,7 @@ static long cmm_alloc_pages(long nr, long *counter,
} else } else
free_page((unsigned long) npa); free_page((unsigned long) npa);
} }
diag10_range(addr >> PAGE_SHIFT, 1); diag10_range(virt_to_pfn(addr), 1);
pa->pages[pa->index++] = addr; pa->pages[pa->index++] = addr;
(*counter)++; (*counter)++;
spin_unlock(&cmm_lock); spin_unlock(&cmm_lock);
......
...@@ -115,7 +115,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address) ...@@ -115,7 +115,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
pr_cont("R1:%016lx ", *table); pr_cont("R1:%016lx ", *table);
if (*table & _REGION_ENTRY_INVALID) if (*table & _REGION_ENTRY_INVALID)
goto out; goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = __va(*table & _REGION_ENTRY_ORIGIN);
fallthrough; fallthrough;
case _ASCE_TYPE_REGION2: case _ASCE_TYPE_REGION2:
table += (address & _REGION2_INDEX) >> _REGION2_SHIFT; table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
...@@ -124,7 +124,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address) ...@@ -124,7 +124,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
pr_cont("R2:%016lx ", *table); pr_cont("R2:%016lx ", *table);
if (*table & _REGION_ENTRY_INVALID) if (*table & _REGION_ENTRY_INVALID)
goto out; goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = __va(*table & _REGION_ENTRY_ORIGIN);
fallthrough; fallthrough;
case _ASCE_TYPE_REGION3: case _ASCE_TYPE_REGION3:
table += (address & _REGION3_INDEX) >> _REGION3_SHIFT; table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
...@@ -133,7 +133,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address) ...@@ -133,7 +133,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
pr_cont("R3:%016lx ", *table); pr_cont("R3:%016lx ", *table);
if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
goto out; goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); table = __va(*table & _REGION_ENTRY_ORIGIN);
fallthrough; fallthrough;
case _ASCE_TYPE_SEGMENT: case _ASCE_TYPE_SEGMENT:
table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
...@@ -142,7 +142,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address) ...@@ -142,7 +142,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
pr_cont("S:%016lx ", *table); pr_cont("S:%016lx ", *table);
if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
goto out; goto out;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
} }
table += (address & _PAGE_INDEX) >> _PAGE_SHIFT; table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
if (bad_address(table)) if (bad_address(table))
......
...@@ -215,6 +215,9 @@ void free_initmem(void) ...@@ -215,6 +215,9 @@ void free_initmem(void)
__set_memory((unsigned long)_sinittext, __set_memory((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT, (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RW | SET_MEMORY_NX); SET_MEMORY_RW | SET_MEMORY_NX);
free_reserved_area(sclp_early_sccb,
sclp_early_sccb + EXT_SCCB_READ_SCP,
POISON_FREE_INITMEM, "unused early sccb");
free_initmem_default(POISON_FREE_INITMEM); free_initmem_default(POISON_FREE_INITMEM);
} }
......
This diff is collapsed.
...@@ -771,7 +771,7 @@ int zpci_hot_reset_device(struct zpci_dev *zdev) ...@@ -771,7 +771,7 @@ int zpci_hot_reset_device(struct zpci_dev *zdev)
if (zdev->dma_table) if (zdev->dma_table)
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64)zdev->dma_table); virt_to_phys(zdev->dma_table));
else else
rc = zpci_dma_init_device(zdev); rc = zpci_dma_init_device(zdev);
if (rc) { if (rc) {
......
...@@ -74,7 +74,7 @@ static unsigned long *dma_get_seg_table_origin(unsigned long *entry) ...@@ -74,7 +74,7 @@ static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
if (!sto) if (!sto)
return NULL; return NULL;
set_rt_sto(entry, sto); set_rt_sto(entry, virt_to_phys(sto));
validate_rt_entry(entry); validate_rt_entry(entry);
entry_clr_protected(entry); entry_clr_protected(entry);
} }
...@@ -91,7 +91,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry) ...@@ -91,7 +91,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry)
pto = dma_alloc_page_table(); pto = dma_alloc_page_table();
if (!pto) if (!pto)
return NULL; return NULL;
set_st_pto(entry, pto); set_st_pto(entry, virt_to_phys(pto));
validate_st_entry(entry); validate_st_entry(entry);
entry_clr_protected(entry); entry_clr_protected(entry);
} }
...@@ -117,7 +117,7 @@ unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) ...@@ -117,7 +117,7 @@ unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
return &pto[px]; return &pto[px];
} }
void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags) void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags)
{ {
if (flags & ZPCI_PTE_INVALID) { if (flags & ZPCI_PTE_INVALID) {
invalidate_pt_entry(entry); invalidate_pt_entry(entry);
...@@ -132,11 +132,11 @@ void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags) ...@@ -132,11 +132,11 @@ void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
entry_clr_protected(entry); entry_clr_protected(entry);
} }
static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa, static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
dma_addr_t dma_addr, size_t size, int flags) dma_addr_t dma_addr, size_t size, int flags)
{ {
unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
u8 *page_addr = (u8 *) (pa & PAGE_MASK); phys_addr_t page_addr = (pa & PAGE_MASK);
unsigned long irq_flags; unsigned long irq_flags;
unsigned long *entry; unsigned long *entry;
int i, rc = 0; int i, rc = 0;
...@@ -217,7 +217,7 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr, ...@@ -217,7 +217,7 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
return ret; return ret;
} }
static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, static int dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
dma_addr_t dma_addr, size_t size, int flags) dma_addr_t dma_addr, size_t size, int flags)
{ {
int rc; int rc;
...@@ -400,7 +400,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, ...@@ -400,7 +400,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
{ {
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
struct page *page; struct page *page;
unsigned long pa; phys_addr_t pa;
dma_addr_t map; dma_addr_t map;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
...@@ -411,18 +411,18 @@ static void *s390_dma_alloc(struct device *dev, size_t size, ...@@ -411,18 +411,18 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
pa = page_to_phys(page); pa = page_to_phys(page);
map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0); map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, map)) { if (dma_mapping_error(dev, map)) {
free_pages(pa, get_order(size)); __free_pages(page, get_order(size));
return NULL; return NULL;
} }
atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages); atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
if (dma_handle) if (dma_handle)
*dma_handle = map; *dma_handle = map;
return (void *) pa; return phys_to_virt(pa);
} }
static void s390_dma_free(struct device *dev, size_t size, static void s390_dma_free(struct device *dev, size_t size,
void *pa, dma_addr_t dma_handle, void *vaddr, dma_addr_t dma_handle,
unsigned long attrs) unsigned long attrs)
{ {
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
...@@ -430,7 +430,7 @@ static void s390_dma_free(struct device *dev, size_t size, ...@@ -430,7 +430,7 @@ static void s390_dma_free(struct device *dev, size_t size,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0); s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
free_pages((unsigned long) pa, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
} }
/* Map a segment into a contiguous dma address area */ /* Map a segment into a contiguous dma address area */
...@@ -443,7 +443,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -443,7 +443,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t dma_addr_base, dma_addr; dma_addr_t dma_addr_base, dma_addr;
int flags = ZPCI_PTE_VALID; int flags = ZPCI_PTE_VALID;
struct scatterlist *s; struct scatterlist *s;
unsigned long pa = 0; phys_addr_t pa = 0;
int ret; int ret;
dma_addr_base = dma_alloc_address(dev, nr_pages); dma_addr_base = dma_alloc_address(dev, nr_pages);
...@@ -598,7 +598,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) ...@@ -598,7 +598,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
} }
if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64)zdev->dma_table)) { virt_to_phys(zdev->dma_table))) {
rc = -EIO; rc = -EIO;
goto free_bitmap; goto free_bitmap;
} }
......
...@@ -365,10 +365,7 @@ EXPORT_SYMBOL_GPL(zpci_write_block); ...@@ -365,10 +365,7 @@ EXPORT_SYMBOL_GPL(zpci_write_block);
static inline void __pciwb_mio(void) static inline void __pciwb_mio(void)
{ {
unsigned long unused = 0; asm volatile (".insn rre,0xb9d50000,0,0\n");
asm volatile (".insn rre,0xb9d50000,%[op],%[op]\n"
: [op] "+d" (unused));
} }
void zpci_barrier(void) void zpci_barrier(void)
......
...@@ -45,9 +45,9 @@ static int zpci_set_airq(struct zpci_dev *zdev) ...@@ -45,9 +45,9 @@ static int zpci_set_airq(struct zpci_dev *zdev)
fib.fmt0.isc = PCI_ISC; fib.fmt0.isc = PCI_ISC;
fib.fmt0.sum = 1; /* enable summary notifications */ fib.fmt0.sum = 1; /* enable summary notifications */
fib.fmt0.noi = airq_iv_end(zdev->aibv); fib.fmt0.noi = airq_iv_end(zdev->aibv);
fib.fmt0.aibv = (unsigned long) zdev->aibv->vector; fib.fmt0.aibv = virt_to_phys(zdev->aibv->vector);
fib.fmt0.aibvo = 0; /* each zdev has its own interrupt vector */ fib.fmt0.aibvo = 0; /* each zdev has its own interrupt vector */
fib.fmt0.aisb = (unsigned long) zpci_sbv->vector + (zdev->aisb/64)*8; fib.fmt0.aisb = virt_to_phys(zpci_sbv->vector) + (zdev->aisb / 64) * 8;
fib.fmt0.aisbo = zdev->aisb & 63; fib.fmt0.aisbo = zdev->aisb & 63;
return zpci_mod_fc(req, &fib, &status) ? -EIO : 0; return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
...@@ -422,7 +422,7 @@ static int __init zpci_directed_irq_init(void) ...@@ -422,7 +422,7 @@ static int __init zpci_directed_irq_init(void)
iib.diib.isc = PCI_ISC; iib.diib.isc = PCI_ISC;
iib.diib.nr_cpus = num_possible_cpus(); iib.diib.nr_cpus = num_possible_cpus();
iib.diib.disb_addr = (u64) zpci_sbv->vector; iib.diib.disb_addr = virt_to_phys(zpci_sbv->vector);
__zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT, 0, &iib); __zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT, 0, &iib);
zpci_ibv = kcalloc(num_possible_cpus(), sizeof(*zpci_ibv), zpci_ibv = kcalloc(num_possible_cpus(), sizeof(*zpci_ibv),
......
...@@ -276,6 +276,7 @@ b285 lpctl S_RD ...@@ -276,6 +276,7 @@ b285 lpctl S_RD
b286 qsi S_RD b286 qsi S_RD
b287 lsctl S_RD b287 lsctl S_RD
b28e qctri S_RD b28e qctri S_RD
b28f qpaci S_RD
b299 srnm S_RD b299 srnm S_RD
b29c stfpc S_RD b29c stfpc S_RD
b29d lfpc S_RD b29d lfpc S_RD
...@@ -1098,7 +1099,7 @@ eb61 stric RSY_RDRU ...@@ -1098,7 +1099,7 @@ eb61 stric RSY_RDRU
eb62 mric RSY_RDRU eb62 mric RSY_RDRU
eb6a asi SIY_IRD eb6a asi SIY_IRD
eb6e alsi SIY_IRD eb6e alsi SIY_IRD
eb71 lpswey SIY_URD eb71 lpswey SIY_RD
eb7a agsi SIY_IRD eb7a agsi SIY_IRD
eb7e algsi SIY_IRD eb7e algsi SIY_IRD
eb80 icmh RSY_RURD eb80 icmh RSY_RURD
......
...@@ -213,6 +213,18 @@ config CRYPTO_AES_S390 ...@@ -213,6 +213,18 @@ config CRYPTO_AES_S390
key sizes and XTS mode is hardware accelerated for 256 and key sizes and XTS mode is hardware accelerated for 256 and
512 bit keys. 512 bit keys.
config CRYPTO_CHACHA_S390
tristate "ChaCha20 stream cipher"
depends on S390
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
select CRYPTO_CHACHA20
help
This is the s390 SIMD implementation of the ChaCha20 stream
cipher (RFC 7539).
It is available as of z13.
config S390_PRNG config S390_PRNG
tristate "Pseudo random number generator device driver" tristate "Pseudo random number generator device driver"
depends on S390 depends on S390
......
...@@ -109,7 +109,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, ...@@ -109,7 +109,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
zdev->dma_table = s390_domain->dma_table; zdev->dma_table = s390_domain->dma_table;
cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table); virt_to_phys(zdev->dma_table));
if (cc) { if (cc) {
rc = -EIO; rc = -EIO;
goto out_restore; goto out_restore;
...@@ -205,11 +205,11 @@ static void s390_iommu_release_device(struct device *dev) ...@@ -205,11 +205,11 @@ static void s390_iommu_release_device(struct device *dev)
} }
static int s390_iommu_update_trans(struct s390_domain *s390_domain, static int s390_iommu_update_trans(struct s390_domain *s390_domain,
unsigned long pa, dma_addr_t dma_addr, phys_addr_t pa, dma_addr_t dma_addr,
size_t size, int flags) size_t size, int flags)
{ {
struct s390_domain_device *domain_device; struct s390_domain_device *domain_device;
u8 *page_addr = (u8 *) (pa & PAGE_MASK); phys_addr_t page_addr = pa & PAGE_MASK;
dma_addr_t start_dma_addr = dma_addr; dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags, nr_pages, i; unsigned long irq_flags, nr_pages, i;
unsigned long *entry; unsigned long *entry;
...@@ -274,7 +274,7 @@ static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -274,7 +274,7 @@ static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
if (!(prot & IOMMU_WRITE)) if (!(prot & IOMMU_WRITE))
flags |= ZPCI_TABLE_PROTECTED; flags |= ZPCI_TABLE_PROTECTED;
rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, rc = s390_iommu_update_trans(s390_domain, paddr, iova,
size, flags); size, flags);
return rc; return rc;
...@@ -324,7 +324,7 @@ static size_t s390_iommu_unmap(struct iommu_domain *domain, ...@@ -324,7 +324,7 @@ static size_t s390_iommu_unmap(struct iommu_domain *domain,
if (!paddr) if (!paddr)
return 0; return 0;
rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, rc = s390_iommu_update_trans(s390_domain, paddr, iova,
size, flags); size, flags);
if (rc) if (rc)
return 0; return 0;
......
...@@ -1824,10 +1824,11 @@ static struct attribute *paths_info_attrs[] = { ...@@ -1824,10 +1824,11 @@ static struct attribute *paths_info_attrs[] = {
&path_fcs_attribute.attr, &path_fcs_attribute.attr,
NULL, NULL,
}; };
ATTRIBUTE_GROUPS(paths_info);
static struct kobj_type path_attr_type = { static struct kobj_type path_attr_type = {
.release = dasd_path_release, .release = dasd_path_release,
.default_attrs = paths_info_attrs, .default_groups = paths_info_groups,
.sysfs_ops = &kobj_sysfs_ops, .sysfs_ops = &kobj_sysfs_ops,
}; };
......
...@@ -139,7 +139,7 @@ int __init sclp_early_get_core_info(struct sclp_core_info *info) ...@@ -139,7 +139,7 @@ int __init sclp_early_get_core_info(struct sclp_core_info *info)
} }
sclp_fill_core_info(info, sccb); sclp_fill_core_info(info, sccb);
out: out:
memblock_phys_free((unsigned long)sccb, length); memblock_free(sccb, length);
return rc; return rc;
} }
......
...@@ -438,11 +438,12 @@ static struct attribute *sclp_sd_file_default_attrs[] = { ...@@ -438,11 +438,12 @@ static struct attribute *sclp_sd_file_default_attrs[] = {
&reload_attr.attr, &reload_attr.attr,
NULL, NULL,
}; };
ATTRIBUTE_GROUPS(sclp_sd_file_default);
static struct kobj_type sclp_sd_file_ktype = { static struct kobj_type sclp_sd_file_ktype = {
.sysfs_ops = &kobj_sysfs_ops, .sysfs_ops = &kobj_sysfs_ops,
.release = sclp_sd_file_release, .release = sclp_sd_file_release,
.default_attrs = sclp_sd_file_default_attrs, .default_groups = sclp_sd_file_default_groups,
}; };
/** /**
......
...@@ -72,7 +72,7 @@ static void vmcp_response_alloc(struct vmcp_session *session) ...@@ -72,7 +72,7 @@ static void vmcp_response_alloc(struct vmcp_session *session)
if (order > 2) if (order > 2)
page = cma_alloc(vmcp_cma, nr_pages, 0, false); page = cma_alloc(vmcp_cma, nr_pages, 0, false);
if (page) { if (page) {
session->response = (char *)page_to_phys(page); session->response = (char *)page_to_virt(page);
session->cma_alloc = 1; session->cma_alloc = 1;
return; return;
} }
...@@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session) ...@@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session)
order = get_order(session->bufsize); order = get_order(session->bufsize);
nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT; nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
if (session->cma_alloc) { if (session->cma_alloc) {
page = phys_to_page((unsigned long)session->response); page = virt_to_page((unsigned long)session->response);
cma_release(vmcp_cma, page, nr_pages); cma_release(vmcp_cma, page, nr_pages);
session->cma_alloc = 0; session->cma_alloc = 0;
} else { } else {
......
...@@ -91,11 +91,6 @@ static int chsc_subchannel_probe(struct subchannel *sch) ...@@ -91,11 +91,6 @@ static int chsc_subchannel_probe(struct subchannel *sch)
sch->schid.ssid, sch->schid.sch_no, ret); sch->schid.ssid, sch->schid.sch_no, ret);
dev_set_drvdata(&sch->dev, NULL); dev_set_drvdata(&sch->dev, NULL);
kfree(private); kfree(private);
} else {
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
} }
return ret; return ret;
} }
......
...@@ -470,16 +470,6 @@ int css_register_subchannel(struct subchannel *sch) ...@@ -470,16 +470,6 @@ int css_register_subchannel(struct subchannel *sch)
if (sch->st == SUBCHANNEL_TYPE_IO) if (sch->st == SUBCHANNEL_TYPE_IO)
sch->dev.type = &io_subchannel_type; sch->dev.type = &io_subchannel_type;
/*
* We don't want to generate uevents for I/O subchannels that don't
* have a working ccw device behind them since they will be
* unregistered before they can be used anyway, so we delay the add
* uevent until after device recognition was successful.
* Note that we suppress the uevent for all subchannel types;
* the subchannel driver can decide itself when it wants to inform
* userspace of its existence.
*/
dev_set_uevent_suppress(&sch->dev, 1);
css_update_ssd_info(sch); css_update_ssd_info(sch);
/* make it known to the system */ /* make it known to the system */
ret = css_sch_device_register(sch); ret = css_sch_device_register(sch);
...@@ -488,15 +478,6 @@ int css_register_subchannel(struct subchannel *sch) ...@@ -488,15 +478,6 @@ int css_register_subchannel(struct subchannel *sch)
sch->schid.ssid, sch->schid.sch_no, ret); sch->schid.ssid, sch->schid.sch_no, ret);
return ret; return ret;
} }
if (!sch->driver) {
/*
* No driver matched. Generate the uevent now so that
* a fitting driver module may be loaded based on the
* modalias.
*/
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
return ret; return ret;
} }
......
...@@ -838,14 +838,6 @@ static void io_subchannel_register(struct ccw_device *cdev) ...@@ -838,14 +838,6 @@ static void io_subchannel_register(struct ccw_device *cdev)
adjust_init_count = 0; adjust_init_count = 0;
goto out; goto out;
} }
/*
* Now we know this subchannel will stay, we can throw
* our delayed uevent.
*/
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
/* make it known to the system */ /* make it known to the system */
ret = device_add(&cdev->dev); ret = device_add(&cdev->dev);
if (ret) { if (ret) {
...@@ -1037,14 +1029,8 @@ static int io_subchannel_probe(struct subchannel *sch) ...@@ -1037,14 +1029,8 @@ static int io_subchannel_probe(struct subchannel *sch)
sch->schid.ssid, sch->schid.sch_no, rc); sch->schid.ssid, sch->schid.sch_no, rc);
/* /*
* The console subchannel already has an associated ccw_device. * The console subchannel already has an associated ccw_device.
* Throw the delayed uevent for the subchannel, register * Register it and exit.
* the ccw_device and exit.
*/ */
if (dev_get_uevent_suppress(&sch->dev)) {
/* should always be the case for the console */
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
cdev = sch_get_cdev(sch); cdev = sch_get_cdev(sch);
rc = device_add(&cdev->dev); rc = device_add(&cdev->dev);
if (rc) { if (rc) {
......
...@@ -243,11 +243,6 @@ static int eadm_subchannel_probe(struct subchannel *sch) ...@@ -243,11 +243,6 @@ static int eadm_subchannel_probe(struct subchannel *sch)
spin_lock_irq(&list_lock); spin_lock_irq(&list_lock);
list_add(&private->head, &eadm_list); list_add(&private->head, &eadm_list);
spin_unlock_irq(&list_lock); spin_unlock_irq(&list_lock);
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
out: out:
return ret; return ret;
} }
......
...@@ -236,12 +236,11 @@ struct qdio_irq { ...@@ -236,12 +236,11 @@ struct qdio_irq {
int nr_input_qs; int nr_input_qs;
int nr_output_qs; int nr_output_qs;
struct ccw1 ccw; struct ccw1 *ccw;
struct ciw equeue;
struct ciw aqueue;
struct qdio_ssqd_desc ssqd_desc; struct qdio_ssqd_desc ssqd_desc;
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
qdio_handler_t (*error_handler);
int perf_stat_enabled; int perf_stat_enabled;
...@@ -338,7 +337,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); ...@@ -338,7 +337,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
struct subchannel_id *schid, struct subchannel_id *schid,
struct qdio_ssqd_desc *data); struct qdio_ssqd_desc *data);
int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data); void qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
void qdio_shutdown_irq(struct qdio_irq *irq); void qdio_shutdown_irq(struct qdio_irq *irq);
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr); void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
void qdio_free_queues(struct qdio_irq *irq_ptr); void qdio_free_queues(struct qdio_irq *irq_ptr);
......
This diff is collapsed.
...@@ -351,19 +351,18 @@ static void setup_qib(struct qdio_irq *irq_ptr, ...@@ -351,19 +351,18 @@ static void setup_qib(struct qdio_irq *irq_ptr,
sizeof(irq_ptr->qib.parm)); sizeof(irq_ptr->qib.parm));
} }
int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) void qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
{ {
struct ccw_device *cdev = irq_ptr->cdev; struct ccw_device *cdev = irq_ptr->cdev;
struct ciw *ciw;
irq_ptr->qdioac1 = 0; irq_ptr->qdioac1 = 0;
memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
irq_ptr->debugfs_dev = NULL; irq_ptr->debugfs_dev = NULL;
irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0; irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
irq_ptr->state = QDIO_IRQ_STATE_INACTIVE; irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
irq_ptr->error_handler = init_data->input_handler;
irq_ptr->int_parm = init_data->int_parm; irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs; irq_ptr->nr_input_qs = init_data->no_input_qs;
...@@ -386,23 +385,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) ...@@ -386,23 +385,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
irq_ptr->orig_handler = cdev->handler; irq_ptr->orig_handler = cdev->handler;
cdev->handler = qdio_int_handler; cdev->handler = qdio_int_handler;
spin_unlock_irq(get_ccwdev_lock(cdev)); spin_unlock_irq(get_ccwdev_lock(cdev));
/* get qdio commands */
ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
return -EINVAL;
}
irq_ptr->equeue = *ciw;
ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
return -EINVAL;
}
irq_ptr->aqueue = *ciw;
return 0;
} }
void qdio_shutdown_irq(struct qdio_irq *irq) void qdio_shutdown_irq(struct qdio_irq *irq)
......
...@@ -244,11 +244,6 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) ...@@ -244,11 +244,6 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret) if (ret)
goto out_disable; goto out_disable;
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid, sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no); sch->schid.sch_no);
......
...@@ -17,6 +17,9 @@ ...@@ -17,6 +17,9 @@
#define VFIO_AP_ROOT_NAME "vfio_ap" #define VFIO_AP_ROOT_NAME "vfio_ap"
#define VFIO_AP_DEV_NAME "matrix" #define VFIO_AP_DEV_NAME "matrix"
#define AP_QUEUE_ASSIGNED "assigned"
#define AP_QUEUE_UNASSIGNED "unassigned"
#define AP_QUEUE_IN_USE "in use"
MODULE_AUTHOR("IBM Corporation"); MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018"); MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
...@@ -41,26 +44,95 @@ static struct ap_device_id ap_queue_ids[] = { ...@@ -41,26 +44,95 @@ static struct ap_device_id ap_queue_ids[] = {
MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids); MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
{
struct ap_matrix_mdev *matrix_mdev;
unsigned long apid = AP_QID_CARD(q->apqn);
unsigned long apqi = AP_QID_QUEUE(q->apqn);
list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
test_bit_inv(apqi, matrix_mdev->matrix.aqm))
return matrix_mdev;
}
return NULL;
}
static ssize_t status_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
ssize_t nchars = 0;
struct vfio_ap_queue *q;
struct ap_matrix_mdev *matrix_mdev;
struct ap_device *apdev = to_ap_dev(dev);
mutex_lock(&matrix_dev->lock);
q = dev_get_drvdata(&apdev->device);
matrix_mdev = vfio_ap_mdev_for_queue(q);
if (matrix_mdev) {
if (matrix_mdev->kvm)
nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
AP_QUEUE_IN_USE);
else
nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
AP_QUEUE_ASSIGNED);
} else {
nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
AP_QUEUE_UNASSIGNED);
}
mutex_unlock(&matrix_dev->lock);
return nchars;
}
static DEVICE_ATTR_RO(status);
static struct attribute *vfio_queue_attrs[] = {
&dev_attr_status.attr,
NULL,
};
static const struct attribute_group vfio_queue_attr_group = {
.attrs = vfio_queue_attrs,
};
/** /**
* vfio_ap_queue_dev_probe: Allocate a vfio_ap_queue structure and associate it * vfio_ap_queue_dev_probe: Allocate a vfio_ap_queue structure and associate it
* with the device as driver_data. * with the device as driver_data.
* *
* @apdev: the AP device being probed * @apdev: the AP device being probed
* *
* Return: returns 0 if the probe succeeded; otherwise, returns -ENOMEM if * Return: returns 0 if the probe succeeded; otherwise, returns an error if
* storage could not be allocated for a vfio_ap_queue object. * storage could not be allocated for a vfio_ap_queue object or the
* sysfs 'status' attribute could not be created for the queue device.
*/ */
static int vfio_ap_queue_dev_probe(struct ap_device *apdev) static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
{ {
int ret;
struct vfio_ap_queue *q; struct vfio_ap_queue *q;
q = kzalloc(sizeof(*q), GFP_KERNEL); q = kzalloc(sizeof(*q), GFP_KERNEL);
if (!q) if (!q)
return -ENOMEM; return -ENOMEM;
mutex_lock(&matrix_dev->lock);
dev_set_drvdata(&apdev->device, q); dev_set_drvdata(&apdev->device, q);
q->apqn = to_ap_queue(&apdev->device)->qid; q->apqn = to_ap_queue(&apdev->device)->qid;
q->saved_isc = VFIO_AP_ISC_INVALID; q->saved_isc = VFIO_AP_ISC_INVALID;
return 0;
ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
if (ret) {
dev_set_drvdata(&apdev->device, NULL);
kfree(q);
}
mutex_unlock(&matrix_dev->lock);
return ret;
} }
/** /**
...@@ -75,6 +147,7 @@ static void vfio_ap_queue_dev_remove(struct ap_device *apdev) ...@@ -75,6 +147,7 @@ static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
struct vfio_ap_queue *q; struct vfio_ap_queue *q;
mutex_lock(&matrix_dev->lock); mutex_lock(&matrix_dev->lock);
sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
q = dev_get_drvdata(&apdev->device); q = dev_get_drvdata(&apdev->device);
vfio_ap_mdev_reset_queue(q, 1); vfio_ap_mdev_reset_queue(q, 1);
dev_set_drvdata(&apdev->device, NULL); dev_set_drvdata(&apdev->device, NULL);
......
...@@ -878,14 +878,13 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, ...@@ -878,14 +878,13 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
/* /*
* If a valid target domain is set and this domain is NOT a usage * If a valid target domain is set and this domain is NOT a usage
* domain but a control only domain, use the default domain as target. * domain but a control only domain, autoselect target domain.
*/ */
tdom = *domain; tdom = *domain;
if (tdom < AP_DOMAINS && if (tdom < AP_DOMAINS &&
!ap_test_config_usage_domain(tdom) && !ap_test_config_usage_domain(tdom) &&
ap_test_config_ctrl_domain(tdom) && ap_test_config_ctrl_domain(tdom))
ap_domain_index >= 0) tdom = AUTOSEL_DOM;
tdom = ap_domain_index;
pref_zc = NULL; pref_zc = NULL;
pref_zq = NULL; pref_zq = NULL;
......
...@@ -355,8 +355,8 @@ static int qeth_cq_init(struct qeth_card *card) ...@@ -355,8 +355,8 @@ static int qeth_cq_init(struct qeth_card *card)
qdio_reset_buffers(card->qdio.c_q->qdio_bufs, qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
QDIO_MAX_BUFFERS_PER_Q); QDIO_MAX_BUFFERS_PER_Q);
card->qdio.c_q->next_buf_to_init = 127; card->qdio.c_q->next_buf_to_init = 127;
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 1, 0, 127,
NULL); rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 1, 0, 127);
if (rc) { if (rc) {
QETH_CARD_TEXT_(card, 2, "1err%d", rc); QETH_CARD_TEXT_(card, 2, "1err%d", rc);
goto out; goto out;
...@@ -2926,8 +2926,7 @@ static int qeth_init_qdio_queues(struct qeth_card *card) ...@@ -2926,8 +2926,7 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
} }
card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs, rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, 0, rx_bufs);
NULL);
if (rc) { if (rc) {
QETH_CARD_TEXT_(card, 2, "1err%d", rc); QETH_CARD_TEXT_(card, 2, "1err%d", rc);
return rc; return rc;
...@@ -3415,8 +3414,9 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card, ...@@ -3415,8 +3414,9 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
return 0; return 0;
} }
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0,
queue->next_buf_to_init, count, NULL); queue->next_buf_to_init,
count);
if (rc) { if (rc) {
QETH_CARD_TEXT(card, 2, "qinberr"); QETH_CARD_TEXT(card, 2, "qinberr");
} }
...@@ -3588,7 +3588,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, ...@@ -3588,7 +3588,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
} }
QETH_TXQ_STAT_INC(queue, doorbell); QETH_TXQ_STAT_INC(queue, doorbell);
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no, rc = qdio_add_bufs_to_output_queue(CARD_DDEV(card), queue->queue_no,
index, count, aob); index, count, aob);
switch (rc) { switch (rc) {
...@@ -3739,8 +3739,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, ...@@ -3739,8 +3739,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
} }
qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
} }
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), queue,
cq->next_buf_to_init, count, NULL); cq->next_buf_to_init, count);
if (rc) { if (rc) {
dev_warn(&card->gdev->dev, dev_warn(&card->gdev->dev,
"QDIO reported an error, rc=%i\n", rc); "QDIO reported an error, rc=%i\n", rc);
...@@ -5850,8 +5850,8 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) ...@@ -5850,8 +5850,8 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
/* Fetch completed RX buffers: */ /* Fetch completed RX buffers: */
if (!card->rx.b_count) { if (!card->rx.b_count) {
card->rx.qdio_err = 0; card->rx.qdio_err = 0;
card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card), card->rx.b_count =
0, true, qdio_inspect_input_queue(CARD_DDEV(card), 0,
&card->rx.b_index, &card->rx.b_index,
&card->rx.qdio_err); &card->rx.qdio_err);
if (card->rx.b_count <= 0) { if (card->rx.b_count <= 0) {
...@@ -5900,7 +5900,7 @@ static void qeth_cq_poll(struct qeth_card *card) ...@@ -5900,7 +5900,7 @@ static void qeth_cq_poll(struct qeth_card *card)
unsigned int start, error; unsigned int start, error;
int completed; int completed;
completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start, completed = qdio_inspect_input_queue(CARD_DDEV(card), 1, &start,
&error); &error);
if (completed <= 0) if (completed <= 0)
return; return;
...@@ -6038,7 +6038,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget) ...@@ -6038,7 +6038,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
return 0; return 0;
} }
completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, completed = qdio_inspect_output_queue(CARD_DDEV(card), queue_no,
&start, &error); &start, &error);
if (completed <= 0) { if (completed <= 0) {
/* Ensure we see TX completion for pending work: */ /* Ensure we see TX completion for pending work: */
......
...@@ -79,7 +79,7 @@ static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet) ...@@ -79,7 +79,7 @@ static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
unsigned int start, error; unsigned int start, error;
int completed; int completed;
completed = qdio_inspect_queue(cdev, 0, false, &start, &error); completed = qdio_inspect_output_queue(cdev, 0, &start, &error);
if (completed > 0) { if (completed > 0) {
if (error) { if (error) {
zfcp_qdio_handler_error(qdio, "qdreqt1", error); zfcp_qdio_handler_error(qdio, "qdreqt1", error);
...@@ -154,7 +154,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, ...@@ -154,7 +154,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
/* /*
* put SBALs back to response queue * put SBALs back to response queue
*/ */
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL)) if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count))
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2"); zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
} }
...@@ -169,7 +169,7 @@ static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet) ...@@ -169,7 +169,7 @@ static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
tasklet_schedule(&qdio->request_tasklet); tasklet_schedule(&qdio->request_tasklet);
/* Check the Response Queue: */ /* Check the Response Queue: */
completed = qdio_inspect_queue(cdev, 0, true, &start, &error); completed = qdio_inspect_input_queue(cdev, 0, &start, &error);
if (completed < 0) if (completed < 0)
return; return;
if (completed > 0) if (completed > 0)
...@@ -326,8 +326,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) ...@@ -326,8 +326,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
atomic_sub(sbal_number, &qdio->req_q_free); atomic_sub(sbal_number, &qdio->req_q_free);
retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, retval = qdio_add_bufs_to_output_queue(qdio->adapter->ccw_device, 0,
q_req->sbal_first, sbal_number, NULL); q_req->sbal_first, sbal_number,
NULL);
if (unlikely(retval)) { if (unlikely(retval)) {
/* Failed to submit the IO, roll back our modifications. */ /* Failed to submit the IO, roll back our modifications. */
...@@ -395,7 +396,10 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) ...@@ -395,7 +396,10 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return; return;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ /*
* Clear QDIOUP flag, thus qdio_add_bufs_to_output_queue() is not called
* during qdio_shutdown().
*/
spin_lock_irq(&qdio->req_q_lock); spin_lock_irq(&qdio->req_q_lock);
atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_irq(&qdio->req_q_lock); spin_unlock_irq(&qdio->req_q_lock);
...@@ -498,8 +502,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) ...@@ -498,8 +502,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
sbale->addr = 0; sbale->addr = 0;
} }
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q, if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
NULL))
goto failed_qdio; goto failed_qdio;
/* set index of first available SBALS / number of available SBALS */ /* set index of first available SBALS / number of available SBALS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment