kvm-s390.h 16.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
 * definition for kvm on s390
4
 *
5
 * Copyright IBM Corp. 2008, 2020
6 7 8
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
9
 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
10 11 12 13
 */

#ifndef ARCH_S390_KVM_S390_H
#define ARCH_S390_KVM_S390_H
14

15
#include <linux/hrtimer.h>
16
#include <linux/kvm.h>
17
#include <linux/kvm_host.h>
18
#include <linux/lockdep.h>
19
#include <asm/facility.h>
20
#include <asm/processor.h>
21
#include <asm/sclp.h>
22

23
/* Transactional Memory Execution related macros */
24
#define IS_TE_ENABLED(vcpu)	((vcpu->arch.sie_block->ecb & ECB_TE))
25 26 27
#define TDB_FORMAT1		1
#define IS_ITDB_VALID(vcpu)	((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))

28
extern debug_info_t *kvm_s390_dbf;
29 30 31 32 33 34 35 36 37 38 39
extern debug_info_t *kvm_s390_dbf_uv;

#define KVM_UV_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
do { \
	debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, d_string "\n", \
	  d_args); \
	debug_sprintf_event(kvm_s390_dbf_uv, d_loglevel, \
			    "%d: " d_string "\n", (d_kvm)->userspace_pid, \
			    d_args); \
} while (0)

40 41 42 43 44 45
#define KVM_EVENT(d_loglevel, d_string, d_args...)\
do { \
	debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
	  d_args); \
} while (0)

46 47 48 49 50 51 52 53 54 55 56 57 58
#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
do { \
	debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
	  d_args); \
} while (0)

#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
do { \
	debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
	  "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
	  d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
	  d_args); \
} while (0)
59

60 61 62 63 64
static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
{
	atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
}

65 66 67 68 69
static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
{
	atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
}

70 71 72 73 74
static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
{
	return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
}

75
static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
76
{
77
	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
78 79
}

80 81
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
82
	return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
83 84
}

85 86 87 88 89 90 91 92 93 94
static inline int kvm_is_ucontrol(struct kvm *kvm)
{
#ifdef CONFIG_KVM_S390_UCONTROL
	if (kvm->arch.gmap)
		return 0;
	return 1;
#else
	return 0;
#endif
}
95

96 97 98 99 100 101
#define GUEST_PREFIX_SHIFT 13
static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
}

102 103
static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
{
104 105
	VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
		   prefix);
106
	vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
107
	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
108
	kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
109 110
}

111
static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
112
{
113 114
	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
115

116 117 118
	if (ar)
		*ar = base2;

119 120 121 122
	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}

static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
123
					      u64 *address1, u64 *address2,
124
					      u8 *ar_b1, u8 *ar_b2)
125
{
126 127 128 129
	u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
	u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
	u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
	u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
130 131 132

	*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
	*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
133 134 135 136 137

	if (ar_b1)
		*ar_b1 = base1;
	if (ar_b2)
		*ar_b2 = base2;
138 139
}

140 141
static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
{
142 143 144 145
	if (r1)
		*r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
	if (r2)
		*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
146 147
}

148
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
149
{
150 151
	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
152
			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
153 154 155
	/* The displacement is a 20bit _SIGNED_ value */
	if (disp2 & 0x80000)
		disp2+=0xfff00000;
156

157 158 159
	if (ar)
		*ar = base2;

160
	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
161 162
}

163
static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
164
{
165 166
	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
167

168 169 170
	if (ar)
		*ar = base2;

171 172 173
	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}

174 175 176 177 178 179 180
/* Set the condition code in the guest program status word */
static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
{
	vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
	vcpu->arch.sie_block->gpsw.mask |= cc << 44;
}

181
/* test availability of facility in a kvm instance */
182 183
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
{
184 185
	return __test_facility(nr, kvm->arch.model.fac_mask) &&
		__test_facility(nr, kvm->arch.model.fac_list);
186 187
}

188 189 190 191 192 193 194 195 196 197 198
static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
{
	unsigned char *ptr;

	if (nr >= MAX_FACILITY_BIT)
		return -EINVAL;
	ptr = (unsigned char *) fac_list + (nr >> 3);
	*ptr |= (0x80UL >> (nr & 7));
	return 0;
}

199 200 201 202 203 204
static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
{
	WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
	return test_bit_inv(nr, kvm->arch.cpu_feat);
}

205 206 207 208 209 210
/* are cpu states controlled by user space */
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
{
	return kvm->arch.user_cpu_state_ctrl != 0;
}

211 212 213 214 215 216 217 218 219
static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm)
{
	if (kvm->arch.user_cpu_state_ctrl)
		return;

	VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control");
	kvm->arch.user_cpu_state_ctrl = 1;
}

220 221 222
/* get the end gfn of the last (highest gfn) memslot */
static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
{
223
	struct rb_node *node;
224 225
	struct kvm_memory_slot *ms;

226
	if (WARN_ON(kvm_memslots_empty(slots)))
227 228
		return 0;

229 230
	node = rb_last(&slots->gfn_tree);
	ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]);
231 232 233
	return ms->base_gfn + ms->npages;
}

234 235 236 237 238 239 240 241 242
static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
{
	u32 gd = (u32)(u64)kvm->arch.gisa_int.origin;

	if (gd && sclp.has_gisaf)
		gd |= GISA_FORMAT1;
	return gd;
}

243 244 245 246 247 248 249 250 251
/* implemented in pv.c */
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
			      u16 *rrc);
int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
		       unsigned long tweak, u16 *rc, u16 *rrc);
252
int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
253 254 255 256
int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
				u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc);
int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
			      u16 *rc, u16 *rrc);
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279

static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm)
{
	return kvm->arch.pv.handle;
}

static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.pv.handle;
}

static inline bool kvm_s390_pv_is_protected(struct kvm *kvm)
{
	lockdep_assert_held(&kvm->lock);
	return !!kvm_s390_pv_get_handle(kvm);
}

static inline bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
{
	lockdep_assert_held(&vcpu->mutex);
	return !!kvm_s390_pv_cpu_get_handle(vcpu);
}

280
/* implemented in interrupt.c */
281
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
282
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
283
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
284
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
285
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
286
void kvm_s390_clear_float_irqs(struct kvm *kvm);
287 288 289
int __must_check kvm_s390_inject_vm(struct kvm *kvm,
				    struct kvm_s390_interrupt *s390int);
int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
290
				      struct kvm_s390_irq *irq);
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
					   struct kvm_s390_pgm_info *pgm_info)
{
	struct kvm_s390_irq irq = {
		.type = KVM_S390_PROGRAM_INT,
		.u.pgm = *pgm_info,
	};

	return kvm_s390_inject_vcpu(vcpu, &irq);
}
static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
{
	struct kvm_s390_irq irq = {
		.type = KVM_S390_PROGRAM_INT,
		.u.pgm.code = code,
	};

	return kvm_s390_inject_vcpu(vcpu, &irq);
}
310
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
311
						    u64 isc_mask, u32 schid);
312 313
int kvm_s390_reinject_io_int(struct kvm *kvm,
			     struct kvm_s390_interrupt_info *inti);
314
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
315

316
/* implemented in intercept.c */
317
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
318
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
319 320 321 322 323 324 325 326 327 328 329 330
static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
{
	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;

	sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
}
static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
{
	kvm_s390_rewind_psw(vcpu, -ilen);
}
static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
{
331 332
	/* don't inject PER events if we re-execute the instruction */
	vcpu->arch.sie_block->icptstatus &= ~0x02;
333 334
	kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
}
335

336 337
int handle_sthyi(struct kvm_vcpu *vcpu);

338
/* implemented in priv.c */
339
int is_valid_psw(psw_t *psw);
Fan Zhang's avatar
Fan Zhang committed
340
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
341
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
342
int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
343
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
344
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
345 346
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
347
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
348 349
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
350
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
351

352 353
/* implemented in vsie.c */
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
354
void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
355 356 357 358 359
void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
				 unsigned long end);
void kvm_s390_vsie_init(struct kvm *kvm);
void kvm_s390_vsie_destroy(struct kvm *kvm);

360 361
/* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
362
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
363 364

/* implemented in kvm-s390.c */
365 366
void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
367
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
368 369
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
370 371
int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
372 373
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
374
bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
375
void exit_sie(struct kvm_vcpu *vcpu);
376
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
377 378
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
379 380
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
381

382 383 384
/* implemented in diag.c */
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);

385 386
static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
{
387
	unsigned long i;
388 389 390 391 392 393 394 395 396
	struct kvm_vcpu *vcpu;

	WARN_ON(!mutex_is_locked(&kvm->lock));
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_s390_vcpu_block(vcpu);
}

static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
{
397
	unsigned long i;
398 399 400 401 402 403
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_s390_vcpu_unblock(vcpu);
}

404 405 406 407 408 409 410 411 412 413
static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
{
	u64 rc;

	preempt_disable();
	rc = get_tod_clock_fast() + kvm->arch.epoch;
	preempt_enable();
	return rc;
}

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
/**
 * kvm_s390_inject_prog_cond - conditionally inject a program check
 * @vcpu: virtual cpu
 * @rc: original return/error code
 *
 * This function is supposed to be used after regular guest access functions
 * failed, to conditionally inject a program check to a vcpu. The typical
 * pattern would look like
 *
 * rc = write_guest(vcpu, addr, data, len);
 * if (rc)
 *	return kvm_s390_inject_prog_cond(vcpu, rc);
 *
 * A negative return code from guest access functions implies an internal error
 * like e.g. out of memory. In these cases no program check should be injected
 * to the guest.
 * A positive value implies that an exception happened while accessing a guest's
 * memory. In this case all data belonging to the corresponding program check
 * has been stored in vcpu->arch.pgm and can be injected with
 * kvm_s390_inject_prog_irq().
 *
 * Returns: - the original @rc value if @rc was negative (internal error)
 *	    - zero if @rc was already zero
 *	    - zero or error code from injecting if @rc was positive
 *	      (program check injected to @vcpu)
 */
static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
{
	if (rc <= 0)
		return rc;
	return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}

447 448 449
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
			struct kvm_s390_irq *s390irq);

450
/* implemented in interrupt.c */
451
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
452
int psw_extint_disabled(struct kvm_vcpu *vcpu);
453
void kvm_s390_destroy_adapters(struct kvm *kvm);
454
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
455
extern struct kvm_device_ops kvm_flic_ops;
456
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
457
int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
458
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
459 460 461 462
int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
			   void __user *buf, int len);
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
			   __u8 __user *buf, int len);
463 464 465
void kvm_s390_gisa_init(struct kvm *kvm);
void kvm_s390_gisa_clear(struct kvm *kvm);
void kvm_s390_gisa_destroy(struct kvm *kvm);
466 467
void kvm_s390_gisa_disable(struct kvm *kvm);
void kvm_s390_gisa_enable(struct kvm *kvm);
468 469
int kvm_s390_gib_init(u8 nisc);
void kvm_s390_gib_destroy(void);
470

471 472 473 474 475 476 477 478
/* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
			    struct kvm_guest_debug *dbg);
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
479
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
480
int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
481

482 483 484
/* support for Basic/Extended SCA handling */
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
{
485 486 487
	struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */

	return &sca->ipte_control;
488
}
489 490 491 492 493 494 495 496 497
static inline int kvm_s390_use_sca_entries(void)
{
	/*
	 * Without SIGP interpretation, only SRS interpretation (if available)
	 * might use the entries. By not setting the entries and keeping them
	 * invalid, hardware will not access them but intercept.
	 */
	return sclp.has_sigpif;
}
498 499
void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
				     struct mcck_volatile_info *mcck_info);
500 501 502 503 504 505 506 507 508 509 510 511 512

/**
 * kvm_s390_vcpu_crypto_reset_all
 *
 * Reset the crypto attributes for each vcpu. This can be done while the vcpus
 * are running as each vcpu will be removed from SIE before resetting the crypt
 * attributes and restored to SIE afterward.
 *
 * Note: The kvm->lock must be held while calling this function
 *
 * @kvm: the KVM guest
 */
void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
513 514 515 516 517 518 519 520

/**
 * diag9c_forwarding_hz
 *
 * Set the maximum number of diag9c forwarding per second
 */
extern unsigned int diag9c_forwarding_hz;

521
#endif