// SPDX-License-Identifier: GPL-2.0
/*
* kvm nested virtualization support for s390x
*
* Copyright IBM Corp. 2016, 2018
*
* Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
*/
#include <linux/vmalloc.h>
#include <linux/kvm_host.h>
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/bitmap.h>
#include <linux/sched/signal.h>
#include <linux/io.h>
#include <linux/mman.h>
#include <asm/gmap.h>
#include <asm/mmu_context.h>
#include <asm/sclp.h>
#include <asm/nmi.h>
#include <asm/dis.h>
#include <asm/facility.h>
#include "kvm-s390.h"
#include "gaccess.h"
enum vsie_page_flags {
VSIE_PAGE_IN_USE = 0,
};
struct vsie_page {
struct kvm_s390_sie_block scb_s; /* 0x0000 */
/*
* the backup info for machine check. ensure it's at
* the same offset as that in struct sie_page!
*/
struct mcck_volatile_info mcck_info; /* 0x0200 */
/*
* The pinned original scb. Be aware that other VCPUs can modify
* it while we read from it. Values that are used for conditions or
* are reused conditionally, should be accessed via READ_ONCE.
*/
struct kvm_s390_sie_block *scb_o; /* 0x0218 */
/* the shadow gmap in use by the vsie_page */
struct gmap *gmap; /* 0x0220 */
/* address of the last reported fault to guest2 */
unsigned long fault_addr; /* 0x0228 */
/* calculated guest addresses of satellite control blocks */
gpa_t sca_gpa; /* 0x0230 */
gpa_t itdba_gpa; /* 0x0238 */
gpa_t gvrd_gpa; /* 0x0240 */
gpa_t riccbd_gpa; /* 0x0248 */
gpa_t sdnx_gpa; /* 0x0250 */
/*
* guest address of the original SCB. Remains set for free vsie
* pages, so we can properly look them up in our addr_to_page
* radix tree.
*/
gpa_t scb_gpa; /* 0x0258 */
/*
* Flags: must be set/cleared atomically after the vsie page can be
* looked up by other CPUs.
*/
unsigned long flags; /* 0x0260 */
__u8 reserved[0x0700 - 0x0268]; /* 0x0268 */
struct kvm_s390_crypto_cb crycb; /* 0x0700 */
__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
};
/**
* gmap_shadow_valid() - check if a shadow guest address space matches the
* given properties and is still valid
* @sg: pointer to the shadow guest address space structure
* @asce: ASCE for which the shadow table is requested
* @edat_level: edat level to be used for the shadow translation
*
* Returns 1 if the gmap shadow is still valid and matches the given
* properties, the caller can continue using it. Returns 0 otherwise; the
* caller has to request a new shadow gmap in this case.
*/
int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
{
if (sg->removed)
return 0;
return sg->orig_asce == asce && sg->edat_level == edat_level;
}
/* trigger a validity icpt for the given scb */
static int set_validity_icpt(struct kvm_s390_sie_block *scb,
__u16 reason_code)
{
scb->ipa = 0x1000;
scb->ipb = ((__u32) reason_code) << 16;
scb->icptcode = ICPT_VALIDITY;
return 1;
}
/* mark the prefix as unmapped, this will block the VSIE */
static void prefix_unmapped(struct vsie_page *vsie_page)
{
atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
}
/* mark the prefix as unmapped and wait until the VSIE has been left */
static void prefix_unmapped_sync(struct vsie_page *vsie_page)
{
prefix_unmapped(vsie_page);
if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
cpu_relax();
}
/* mark the prefix as mapped, this will allow the VSIE to run */
static void prefix_mapped(struct vsie_page *vsie_page)
{
atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
}
/* test if the prefix is mapped into the gmap shadow */
static int prefix_is_mapped(struct vsie_page *vsie_page)
{
return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
}
/* copy the updated intervention request bits into the shadow scb */
static void update_intervention_requests(struct vsie_page *vsie_page)
{
const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
int cpuflags;
cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
}
/* shadow (filter and validate) the cpuflags */
static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
/* we don't allow ESA/390 guests */
if (!(cpuflags & CPUSTAT_ZARCH))
return set_validity_icpt(scb_s, 0x0001U);
if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
return set_validity_icpt(scb_s, 0x0001U);
else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
return set_validity_icpt(scb_s, 0x0007U);
/* intervention requests will be set later */
newflags = CPUSTAT_ZARCH;
if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
newflags |= CPUSTAT_GED;
if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
if (cpuflags & CPUSTAT_GED)
return set_validity_icpt(scb_s, 0x0001U);
newflags |= CPUSTAT_GED2;
}
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
newflags |= cpuflags & CPUSTAT_P;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
newflags |= cpuflags & CPUSTAT_SM;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
newflags |= cpuflags & CPUSTAT_IBS;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
newflags |= cpuflags & CPUSTAT_KSS;
atomic_set(&scb_s->cpuflags, newflags);
return 0;
}
/* Copy to APCB FORMAT1 from APCB FORMAT0 */
static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
unsigned long crycb_gpa, struct kvm_s390_apcb1 *apcb_h)
{
struct kvm_s390_apcb0 tmp;
unsigned long apcb_gpa;
apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
if (read_guest_real(vcpu, apcb_gpa, &tmp,
sizeof(struct kvm_s390_apcb0)))
return -EFAULT;
apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
return 0;
}
/**
* setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
* @vcpu: pointer to the virtual CPU
* @apcb_s: pointer to start of apcb in the shadow crycb
* @crycb_gpa: guest physica
|