intercept.c 15.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * in-kernel handling for sie intercepts
4
 *
5
 * Copyright IBM Corp. 2008, 2020
6 7 8 9 10 11 12 13 14 15
 *
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Christian Borntraeger <borntraeger@de.ibm.com>
 */

#include <linux/kvm_host.h>
#include <linux/errno.h>
#include <linux/pagemap.h>

#include <asm/kvm_host.h>
16
#include <asm/asm-offsets.h>
17
#include <asm/irq.h>
18
#include <asm/sysinfo.h>
19
#include <asm/uv.h>
20 21

#include "kvm-s390.h"
22
#include "gaccess.h"
23
#include "trace.h"
24
#include "trace-s390.h"
25

26
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
27 28
{
	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
29
	u8 ilen = 0;
30

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
	switch (vcpu->arch.sie_block->icptcode) {
	case ICPT_INST:
	case ICPT_INSTPROGI:
	case ICPT_OPEREXC:
	case ICPT_PARTEXEC:
	case ICPT_IOINST:
		/* instruction only stored for these icptcodes */
		ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
		/* Use the length of the EXECUTE instruction if necessary */
		if (sie_block->icptstatus & 1) {
			ilen = (sie_block->icptstatus >> 4) & 0x6;
			if (!ilen)
				ilen = 4;
		}
		break;
	case ICPT_PROGI:
		/* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
		ilen = vcpu->arch.sie_block->pgmilc & 0x6;
		break;
50
	}
51
	return ilen;
52 53
}

54 55
static int handle_stop(struct kvm_vcpu *vcpu)
{
56
	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
57
	int rc = 0;
58
	uint8_t flags, stop_pending;
59

60
	vcpu->stat.exit_stop_request++;
61

62 63 64 65
	/* delay the stop if any non-stop irq is pending */
	if (kvm_s390_vcpu_has_irq(vcpu, 1))
		return 0;

66 67 68 69 70
	/* avoid races with the injection/SIGP STOP code */
	spin_lock(&li->lock);
	flags = li->irq.stop.flags;
	stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
	spin_unlock(&li->lock);
71

72 73
	trace_kvm_s390_stop_request(stop_pending, flags);
	if (!stop_pending)
74
		return 0;
75

76
	if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
77 78
		rc = kvm_s390_vcpu_store_status(vcpu,
						KVM_S390_STORE_STATUS_NOADDR);
79 80 81 82
		if (rc)
			return rc;
	}

83 84
	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
		kvm_s390_vcpu_stop(vcpu);
85
	return -EOPNOTSUPP;
86 87 88 89 90
}

static int handle_validity(struct kvm_vcpu *vcpu)
{
	int viwhy = vcpu->arch.sie_block->ipb >> 16;
91

92
	vcpu->stat.exit_validity++;
93
	trace_kvm_s390_intercept_validity(vcpu, viwhy);
94 95 96 97 98 99 100
	KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
		  current->pid, vcpu->kvm);

	/* do not warn on invalid runtime instrumentation mode */
	WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
		  viwhy);
	return -EINVAL;
101 102
}

103 104 105
static int handle_instruction(struct kvm_vcpu *vcpu)
{
	vcpu->stat.exit_instruction++;
106 107 108
	trace_kvm_s390_intercept_instruction(vcpu,
					     vcpu->arch.sie_block->ipa,
					     vcpu->arch.sie_block->ipb);
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137

	switch (vcpu->arch.sie_block->ipa >> 8) {
	case 0x01:
		return kvm_s390_handle_01(vcpu);
	case 0x82:
		return kvm_s390_handle_lpsw(vcpu);
	case 0x83:
		return kvm_s390_handle_diag(vcpu);
	case 0xaa:
		return kvm_s390_handle_aa(vcpu);
	case 0xae:
		return kvm_s390_handle_sigp(vcpu);
	case 0xb2:
		return kvm_s390_handle_b2(vcpu);
	case 0xb6:
		return kvm_s390_handle_stctl(vcpu);
	case 0xb7:
		return kvm_s390_handle_lctl(vcpu);
	case 0xb9:
		return kvm_s390_handle_b9(vcpu);
	case 0xe3:
		return kvm_s390_handle_e3(vcpu);
	case 0xe5:
		return kvm_s390_handle_e5(vcpu);
	case 0xeb:
		return kvm_s390_handle_eb(vcpu);
	default:
		return -EOPNOTSUPP;
	}
138 139
}

140
static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
141
{
142 143
	struct kvm_s390_pgm_info pgm_info = {
		.code = vcpu->arch.sie_block->iprcc,
144 145
		/* the PSW has already been rewound */
		.flags = KVM_S390_PGM_FLAGS_NO_REWIND,
146
	};
147 148 149 150 151 152 153 154 155 156 157 158

	switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
	case PGM_AFX_TRANSLATION:
	case PGM_ASX_TRANSLATION:
	case PGM_EX_TRANSLATION:
	case PGM_LFX_TRANSLATION:
	case PGM_LSTE_SEQUENCE:
	case PGM_LSX_TRANSLATION:
	case PGM_LX_TRANSLATION:
	case PGM_PRIMARY_AUTHORITY:
	case PGM_SECONDARY_AUTHORITY:
	case PGM_SPACE_SWITCH:
159
		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
160 161 162 163 164 165 166
		break;
	case PGM_ALEN_TRANSLATION:
	case PGM_ALE_SEQUENCE:
	case PGM_ASTE_INSTANCE:
	case PGM_ASTE_SEQUENCE:
	case PGM_ASTE_VALIDITY:
	case PGM_EXTENDED_AUTHORITY:
167
		pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
168 169 170 171 172 173 174
		break;
	case PGM_ASCE_TYPE:
	case PGM_PAGE_TRANSLATION:
	case PGM_REGION_FIRST_TRANS:
	case PGM_REGION_SECOND_TRANS:
	case PGM_REGION_THIRD_TRANS:
	case PGM_SEGMENT_TRANSLATION:
175 176 177
		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
		pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
		pgm_info.op_access_id  = vcpu->arch.sie_block->oai;
178 179
		break;
	case PGM_MONITOR:
180 181
		pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
		pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
182
		break;
Eric Farman's avatar
Eric Farman committed
183
	case PGM_VECTOR_PROCESSING:
184
	case PGM_DATA:
185
		pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
186 187
		break;
	case PGM_PROTECTION:
188 189
		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
		pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
190 191 192 193 194 195
		break;
	default:
		break;
	}

	if (vcpu->arch.sie_block->iprcc & PGM_PER) {
196 197 198 199
		pgm_info.per_code = vcpu->arch.sie_block->perc;
		pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
		pgm_info.per_address = vcpu->arch.sie_block->peraddr;
		pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
200
	}
201
	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
202 203
}

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
/*
 * restore ITDB to program-interruption TDB in guest lowcore
 * and set TX abort indication if required
*/
static int handle_itdb(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_itdb *itdb;
	int rc;

	if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
		return 0;
	if (current->thread.per_flags & PER_FLAG_NO_TE)
		return 0;
	itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
	rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
	if (rc)
		return rc;
	memset(itdb, 0, sizeof(*itdb));

	return 0;
}

226 227
#define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)

228 229
static int handle_prog(struct kvm_vcpu *vcpu)
{
230
	psw_t psw;
231 232
	int rc;

233
	vcpu->stat.exit_program_interruption++;
234

235 236 237 238 239 240 241
	/*
	 * Intercept 8 indicates a loop of specification exceptions
	 * for protected guests.
	 */
	if (kvm_s390_pv_cpu_is_protected(vcpu))
		return -EOPNOTSUPP;

242
	if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
243 244 245
		rc = kvm_s390_handle_per_event(vcpu);
		if (rc)
			return rc;
246 247 248 249 250
		/* the interrupt might have been filtered out completely */
		if (vcpu->arch.sie_block->iprcc == 0)
			return 0;
	}

251
	trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
252 253 254 255 256 257 258 259
	if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
		rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
		if (rc)
			return rc;
		/* Avoid endless loops of specification exceptions */
		if (!is_valid_psw(&psw))
			return -EOPNOTSUPP;
	}
260
	rc = handle_itdb(vcpu);
261 262
	if (rc)
		return rc;
263

264
	return inject_prog_on_prog_intercept(vcpu);
265 266
}

267 268 269 270 271 272 273 274 275 276 277
/**
 * handle_external_interrupt - used for external interruption interceptions
 *
 * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
 * the new PSW does not have external interrupts disabled. In the first case,
 * we've got to deliver the interrupt manually, and in the second case, we
 * drop to userspace to handle the situation there.
 */
static int handle_external_interrupt(struct kvm_vcpu *vcpu)
{
	u16 eic = vcpu->arch.sie_block->eic;
278
	struct kvm_s390_irq irq;
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
	psw_t newpsw;
	int rc;

	vcpu->stat.exit_external_interrupt++;

	rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
	if (rc)
		return rc;
	/* We can not handle clock comparator or timer interrupt with bad PSW */
	if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
	    (newpsw.mask & PSW_MASK_EXT))
		return -EOPNOTSUPP;

	switch (eic) {
	case EXT_IRQ_CLK_COMP:
		irq.type = KVM_S390_INT_CLOCK_COMP;
		break;
	case EXT_IRQ_CPU_TIMER:
		irq.type = KVM_S390_INT_CPU_TIMER;
		break;
	case EXT_IRQ_EXTERNAL_CALL:
		irq.type = KVM_S390_INT_EXTERNAL_CALL;
301
		irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
302 303 304 305 306
		rc = kvm_s390_inject_vcpu(vcpu, &irq);
		/* ignore if another external call is already pending */
		if (rc == -EBUSY)
			return 0;
		return rc;
307 308 309 310 311 312 313
	default:
		return -EOPNOTSUPP;
	}

	return kvm_s390_inject_vcpu(vcpu, &irq);
}

314 315 316 317 318 319 320 321 322 323
/**
 * Handle MOVE PAGE partial execution interception.
 *
 * This interception can only happen for guests with DAT disabled and
 * addresses that are currently not mapped in the host. Thus we try to
 * set up the mappings for the corresponding user pages here (or throw
 * addressing exceptions in case of illegal guest addresses).
 */
static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
{
324
	unsigned long srcaddr, dstaddr;
325 326 327 328 329
	int reg1, reg2, rc;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	/* Make sure that the source is paged-in */
330
	rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
331
				     reg2, &srcaddr, GACC_FETCH);
332 333
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
334 335
	rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
	if (rc != 0)
336 337 338
		return rc;

	/* Make sure that the destination is paged-in */
339
	rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
340
				     reg1, &dstaddr, GACC_STORE);
341 342
	if (rc)
		return kvm_s390_inject_prog_cond(vcpu, rc);
343 344
	rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
	if (rc != 0)
345 346
		return rc;

347
	kvm_s390_retry_instr(vcpu);
348 349 350 351 352 353

	return 0;
}

static int handle_partial_execution(struct kvm_vcpu *vcpu)
{
354 355
	vcpu->stat.exit_pei++;

356 357
	if (vcpu->arch.sie_block->ipa == 0xb254)	/* MVPG */
		return handle_mvpg_pei(vcpu);
358 359
	if (vcpu->arch.sie_block->ipa >> 8 == 0xae)	/* SIGP */
		return kvm_s390_handle_sigp_pei(vcpu);
360 361 362 363

	return -EOPNOTSUPP;
}

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
/*
 * Handle the sthyi instruction that provides the guest with system
 * information, like current CPU resources available at each level of
 * the machine.
 */
int handle_sthyi(struct kvm_vcpu *vcpu)
{
	int reg1, reg2, r = 0;
	u64 code, addr, cc = 0, rc = 0;
	struct sthyi_sctns *sctns = NULL;

	if (!test_kvm_facility(vcpu->kvm, 74))
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
	code = vcpu->run->s.regs.gprs[reg1];
	addr = vcpu->run->s.regs.gprs[reg2];

	vcpu->stat.instruction_sthyi++;
	VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
	trace_kvm_s390_handle_sthyi(vcpu, code, addr);

	if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	if (code & 0xffff) {
		cc = 3;
		rc = 4;
		goto out;
	}

	if (addr & ~PAGE_MASK)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	sctns = (void *)get_zeroed_page(GFP_KERNEL);
	if (!sctns)
		return -ENOMEM;

	cc = sthyi_fill(sctns, &rc);

out:
	if (!cc) {
		r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
		if (r) {
			free_page((unsigned long)sctns);
			return kvm_s390_inject_prog_cond(vcpu, r);
		}
	}

	free_page((unsigned long)sctns);
	vcpu->run->s.regs.gprs[reg2 + 1] = rc;
	kvm_s390_set_psw_cc(vcpu, cc);
	return r;
}

419 420
static int handle_operexc(struct kvm_vcpu *vcpu)
{
421 422 423
	psw_t oldpsw, newpsw;
	int rc;

424 425 426 427
	vcpu->stat.exit_operation_exception++;
	trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
				      vcpu->arch.sie_block->ipb);

428
	if (vcpu->arch.sie_block->ipa == 0xb256)
429 430
		return handle_sthyi(vcpu);

431 432
	if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
		return -EOPNOTSUPP;
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
	rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
	if (rc)
		return rc;
	/*
	 * Avoid endless loops of operation exceptions, if the pgm new
	 * PSW will cause a new operation exception.
	 * The heuristic checks if the pgm new psw is within 6 bytes before
	 * the faulting psw address (with same DAT, AS settings) and the
	 * new psw is not a wait psw and the fault was not triggered by
	 * problem state.
	 */
	oldpsw = vcpu->arch.sie_block->gpsw;
	if (oldpsw.addr - newpsw.addr <= 6 &&
	    !(newpsw.mask & PSW_MASK_WAIT) &&
	    !(oldpsw.mask & PSW_MASK_PSTATE) &&
	    (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
	    (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
		return -EOPNOTSUPP;
451

452 453 454
	return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
}

455 456 457 458 459 460 461 462 463
static int handle_pv_spx(struct kvm_vcpu *vcpu)
{
	u32 pref = *(u32 *)vcpu->arch.sie_block->sidad;

	kvm_s390_set_prefix(vcpu, pref);
	trace_kvm_s390_handle_prefix(vcpu, 1, pref);
	return 0;
}

464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
static int handle_pv_sclp(struct kvm_vcpu *vcpu)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;

	spin_lock(&fi->lock);
	/*
	 * 2 cases:
	 * a: an sccb answering interrupt was already pending or in flight.
	 *    As the sccb value is not known we can simply set some value to
	 *    trigger delivery of a saved SCCB. UV will then use its saved
	 *    copy of the SCCB value.
	 * b: an error SCCB interrupt needs to be injected so we also inject
	 *    a fake SCCB address. Firmware will use the proper one.
	 * This makes sure, that both errors and real sccb returns will only
	 * be delivered after a notification intercept (instruction has
	 * finished) but not after others.
	 */
	fi->srv_signal.ext_params |= 0x43000;
	set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
	clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
	spin_unlock(&fi->lock);
	return 0;
}

488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
static int handle_pv_uvc(struct kvm_vcpu *vcpu)
{
	struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad;
	struct uv_cb_cts uvcb = {
		.header.cmd	= UVC_CMD_UNPIN_PAGE_SHARED,
		.header.len	= sizeof(uvcb),
		.guest_handle	= kvm_s390_pv_get_handle(vcpu->kvm),
		.gaddr		= guest_uvcb->paddr,
	};
	int rc;

	if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) {
		WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n",
			  guest_uvcb->header.cmd);
		return 0;
	}
	rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
	/*
	 * If the unpin did not succeed, the guest will exit again for the UVC
	 * and we will retry the unpin.
	 */
	if (rc == -EINVAL)
		return 0;
	return rc;
}

514 515
static int handle_pv_notification(struct kvm_vcpu *vcpu)
{
516 517
	if (vcpu->arch.sie_block->ipa == 0xb210)
		return handle_pv_spx(vcpu);
518 519
	if (vcpu->arch.sie_block->ipa == 0xb220)
		return handle_pv_sclp(vcpu);
520 521
	if (vcpu->arch.sie_block->ipa == 0xb9a4)
		return handle_pv_uvc(vcpu);
522

523 524 525
	return handle_instruction(vcpu);
}

526 527
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
528 529
	int rc, per_rc = 0;

530 531 532
	if (kvm_is_ucontrol(vcpu->kvm))
		return -EOPNOTSUPP;

533
	switch (vcpu->arch.sie_block->icptcode) {
534
	case ICPT_EXTREQ:
535 536
		vcpu->stat.exit_external_request++;
		return 0;
537
	case ICPT_IOREQ:
538 539
		vcpu->stat.exit_io_request++;
		return 0;
540
	case ICPT_INST:
541 542
		rc = handle_instruction(vcpu);
		break;
543
	case ICPT_PROGI:
544
		return handle_prog(vcpu);
545
	case ICPT_EXTINT:
546
		return handle_external_interrupt(vcpu);
547
	case ICPT_WAIT:
548
		return kvm_s390_handle_wait(vcpu);
549
	case ICPT_VALIDITY:
550
		return handle_validity(vcpu);
551
	case ICPT_STOP:
552
		return handle_stop(vcpu);
553
	case ICPT_OPEREXC:
554 555
		rc = handle_operexc(vcpu);
		break;
556
	case ICPT_PARTEXEC:
557 558
		rc = handle_partial_execution(vcpu);
		break;
559 560 561
	case ICPT_KSS:
		rc = kvm_s390_skey_check_enable(vcpu);
		break;
562 563 564 565 566 567 568 569 570
	case ICPT_MCHKREQ:
	case ICPT_INT_ENABLE:
		/*
		 * PSW bit 13 or a CR (0, 6, 14) changed and we might
		 * now be able to deliver interrupts. The pre-run code
		 * will take care of this.
		 */
		rc = 0;
		break;
571 572 573 574 575 576
	case ICPT_PV_INSTR:
		rc = handle_instruction(vcpu);
		break;
	case ICPT_PV_NOTIFY:
		rc = handle_pv_notification(vcpu);
		break;
577 578 579 580 581 582 583
	case ICPT_PV_PREF:
		rc = 0;
		gmap_convert_to_secure(vcpu->arch.gmap,
				       kvm_s390_get_prefix(vcpu));
		gmap_convert_to_secure(vcpu->arch.gmap,
				       kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
		break;
584
	default:
585
		return -EOPNOTSUPP;
586
	}
587 588 589 590 591 592

	/* process PER, also if the instrution is processed in user space */
	if (vcpu->arch.sie_block->icptstatus & 0x02 &&
	    (!rc || rc == -EOPNOTSUPP))
		per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
	return per_rc ? per_rc : rc;
593
}