2014-01-01 23:26:52 +08:00
|
|
|
/*
|
|
|
|
* guest access functions
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2014
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <asm/pgtable.h>
|
2016-03-08 19:16:35 +08:00
|
|
|
#include <asm/gmap.h>
|
2014-01-01 23:26:52 +08:00
|
|
|
#include "kvm-s390.h"
|
|
|
|
#include "gaccess.h"
|
2015-03-09 19:17:25 +08:00
|
|
|
#include <asm/switch_to.h>
|
2014-01-01 23:26:52 +08:00
|
|
|
|
|
|
|
union asce {
|
|
|
|
unsigned long val;
|
|
|
|
struct {
|
|
|
|
unsigned long origin : 52; /* Region- or Segment-Table Origin */
|
|
|
|
unsigned long : 2;
|
|
|
|
unsigned long g : 1; /* Subspace Group Control */
|
|
|
|
unsigned long p : 1; /* Private Space Control */
|
|
|
|
unsigned long s : 1; /* Storage-Alteration-Event Control */
|
|
|
|
unsigned long x : 1; /* Space-Switch-Event Control */
|
|
|
|
unsigned long r : 1; /* Real-Space Control */
|
|
|
|
unsigned long : 1;
|
|
|
|
unsigned long dt : 2; /* Designation-Type Control */
|
|
|
|
unsigned long tl : 2; /* Region- or Segment-Table Length */
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
ASCE_TYPE_SEGMENT = 0,
|
|
|
|
ASCE_TYPE_REGION3 = 1,
|
|
|
|
ASCE_TYPE_REGION2 = 2,
|
|
|
|
ASCE_TYPE_REGION1 = 3
|
|
|
|
};
|
|
|
|
|
|
|
|
union region1_table_entry {
|
|
|
|
unsigned long val;
|
|
|
|
struct {
|
|
|
|
unsigned long rto: 52;/* Region-Table Origin */
|
|
|
|
unsigned long : 2;
|
|
|
|
unsigned long p : 1; /* DAT-Protection Bit */
|
|
|
|
unsigned long : 1;
|
|
|
|
unsigned long tf : 2; /* Region-Second-Table Offset */
|
|
|
|
unsigned long i : 1; /* Region-Invalid Bit */
|
|
|
|
unsigned long : 1;
|
|
|
|
unsigned long tt : 2; /* Table-Type Bits */
|
|
|
|
unsigned long tl : 2; /* Region-Second-Table Length */
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
union region2_table_entry {
|
|
|
|
unsigned long val;
|
|
|
|
struct {
|
|
|
|
unsigned long rto: 52;/* Region-Table Origin */
|
|
|
|
unsigned long : 2;
|
|
|
|
unsigned long p : 1; /* DAT-Protection Bit */
|
|
|
|
unsigned long : 1;
|
|
|
|
unsigned long tf : 2; /* Region-Third-Table Offset */
|
|
|
|
unsigned long i : 1; /* Region-Invalid Bit */
|
|
|
|
unsigned long : 1;
|
|
|
|
unsigned long tt : 2; /* Table-Type Bits */
|
|
|
|
unsigned long tl : 2; /* Region-Third-Table Length */
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
struct region3_table_entry_fc0 {
|
|
|
|
unsigned long sto: 52;/* Segment-Table Origin */
|
|
|
|
unsigned long : 1;
|
|
|
|
unsigned long fc : 1; /* Format-Control */
|
|
|
|
unsigned long p : 1; /* DAT-Protection Bit */
|
|
|
|
unsigned long : 1;
|
|
|
|
unsigned long tf : 2; /* Segment-Table Offset */
|
|
|
|
unsigned long i : 1; /* Region-Invalid Bit */
|
|
|
|
unsigned long cr : 1; /* Common-Region Bit */
|
|
|
|
unsigned long tt : 2; /* Table-Type Bits */
|
|
|
|
unsigned long tl : 2; /* Segment-Table Length */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct region3_table_entry_fc1 {
|
|
|
|
unsigned long rfaa : 33; /* Region-Frame Absolute Address */
|
|
|
|
unsigned long : 14;
|
|
|
|
unsigned long av : 1; /* ACCF-Validity Control */
|
|
|
|
unsigned long acc: 4; /* Access-Control Bits */
|
|
|
|
unsigned long f : 1; /* Fetch-Protection Bit */
|
|
|
|
unsigned long fc : 1; /* Format-Control */
|
|
|
|
unsigned long p : 1; /* DAT-Protection Bit */
|
|
|
|
unsigned long co : 1; /* Change-Recording Override */
|
|
|
|
unsigned long : 2;
|
|
|
|
unsigned long i : 1; /* Region-Invalid Bit */
|
|
|
|
unsigned long cr : 1; /* Common-Region Bit */
|
|
|
|
unsigned long tt : 2; /* Table-Type Bits */
|
|
|
|
unsigned long : 2;
|
|
|
|
};
|
|
|
|
|
|
|
|
union region3_table_entry {
|
|
|
|
unsigned long val;
|
|
|
|
struct region3_table_entry_fc0 fc0;
|
|
|
|
struct region3_table_entry_fc1 fc1;
|
|
|
|
struct {
|
|
|
|
unsigned long : 53;
|
|
|
|
unsigned long fc : 1; /* Format-Control */
|
|
|
|
unsigned long : 4;
|
|
|
|
unsigned long i : 1; /* Region-Invalid Bit */
|
|
|
|
unsigned long cr : 1; /* Common-Region Bit */
|
|
|
|
unsigned long tt : 2; /* Table-Type Bits */
|
|
|
|
unsigned long : 2;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
struct segment_entry_fc0 {
|
|
|
|
unsigned long pto: 53;/* Page-Table Origin */
|
|
|
|
unsigned long fc : 1; /* Format-Control */
|
|
|
|
unsigned long p : 1; /* DAT-Protection Bit */
|
|
|
|
unsigned long : 3;
|
|
|
|
unsigned long i : 1; /* Segment-Invalid Bit */
|
|
|
|
unsigned long cs : 1; /* Common-Segment Bit */
|
|
|
|
unsigned long tt : 2; /* Table-Type Bits */
|
|
|
|
unsigned long : 2;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct segment_entry_fc1 {
|
|
|
|
unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
|
|
|
|
unsigned long : 3;
|
|
|
|
unsigned long av : 1; /* ACCF-Validity Control */
|
|
|
|
unsigned long acc: 4; /* Access-Control Bits */
|
|
|
|
unsigned long f : 1; /* Fetch-Protection Bit */
|
|
|
|
unsigned long fc : 1; /* Format-Control */
|
|
|
|
unsigned long p : 1; /* DAT-Protection Bit */
|
|
|
|
unsigned long co : 1; /* Change-Recording Override */
|
|
|
|
unsigned long : 2;
|
|
|
|
unsigned long i : 1; /* Segment-Invalid Bit */
|
|
|
|
unsigned long cs : 1; /* Common-Segment Bit */
|
|
|
|
unsigned long tt : 2; /* Table-Type Bits */
|
|
|
|
unsigned long : 2;
|
|
|
|
};
|
|
|
|
|
|
|
|
union segment_table_entry {
|
|
|
|
unsigned long val;
|
|
|
|
struct segment_entry_fc0 fc0;
|
|
|
|
struct segment_entry_fc1 fc1;
|
|
|
|
struct {
|
|
|
|
unsigned long : 53;
|
|
|
|
unsigned long fc : 1; /* Format-Control */
|
|
|
|
unsigned long : 4;
|
|
|
|
unsigned long i : 1; /* Segment-Invalid Bit */
|
|
|
|
unsigned long cs : 1; /* Common-Segment Bit */
|
|
|
|
unsigned long tt : 2; /* Table-Type Bits */
|
|
|
|
unsigned long : 2;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
TABLE_TYPE_SEGMENT = 0,
|
|
|
|
TABLE_TYPE_REGION3 = 1,
|
|
|
|
TABLE_TYPE_REGION2 = 2,
|
|
|
|
TABLE_TYPE_REGION1 = 3
|
|
|
|
};
|
|
|
|
|
|
|
|
union page_table_entry {
|
|
|
|
unsigned long val;
|
|
|
|
struct {
|
|
|
|
unsigned long pfra : 52; /* Page-Frame Real Address */
|
|
|
|
unsigned long z : 1; /* Zero Bit */
|
|
|
|
unsigned long i : 1; /* Page-Invalid Bit */
|
|
|
|
unsigned long p : 1; /* DAT-Protection Bit */
|
|
|
|
unsigned long co : 1; /* Change-Recording Override */
|
|
|
|
unsigned long : 8;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vaddress union in order to easily decode a virtual address into its
|
|
|
|
* region first index, region second index etc. parts.
|
|
|
|
*/
|
|
|
|
union vaddress {
|
|
|
|
unsigned long addr;
|
|
|
|
struct {
|
|
|
|
unsigned long rfx : 11;
|
|
|
|
unsigned long rsx : 11;
|
|
|
|
unsigned long rtx : 11;
|
|
|
|
unsigned long sx : 11;
|
|
|
|
unsigned long px : 8;
|
|
|
|
unsigned long bx : 12;
|
|
|
|
};
|
|
|
|
struct {
|
|
|
|
unsigned long rfx01 : 2;
|
|
|
|
unsigned long : 9;
|
|
|
|
unsigned long rsx01 : 2;
|
|
|
|
unsigned long : 9;
|
|
|
|
unsigned long rtx01 : 2;
|
|
|
|
unsigned long : 9;
|
|
|
|
unsigned long sx01 : 2;
|
|
|
|
unsigned long : 29;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* raddress union which will contain the result (real or absolute address)
|
|
|
|
* after a page table walk. The rfaa, sfaa and pfra members are used to
|
|
|
|
* simply assign them the value of a region, segment or page table entry.
|
|
|
|
*/
|
|
|
|
union raddress {
|
|
|
|
unsigned long addr;
|
|
|
|
unsigned long rfaa : 33; /* Region-Frame Absolute Address */
|
|
|
|
unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
|
|
|
|
unsigned long pfra : 52; /* Page-Frame Real Address */
|
|
|
|
};
|
|
|
|
|
2015-03-09 19:17:25 +08:00
|
|
|
union alet {
|
|
|
|
u32 val;
|
|
|
|
struct {
|
|
|
|
u32 reserved : 7;
|
|
|
|
u32 p : 1;
|
|
|
|
u32 alesn : 8;
|
|
|
|
u32 alen : 16;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
union ald {
|
|
|
|
u32 val;
|
|
|
|
struct {
|
|
|
|
u32 : 1;
|
|
|
|
u32 alo : 24;
|
|
|
|
u32 all : 7;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ale {
|
|
|
|
unsigned long i : 1; /* ALEN-Invalid Bit */
|
|
|
|
unsigned long : 5;
|
|
|
|
unsigned long fo : 1; /* Fetch-Only Bit */
|
|
|
|
unsigned long p : 1; /* Private Bit */
|
|
|
|
unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
|
|
|
|
unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
|
|
|
|
unsigned long : 32;
|
|
|
|
unsigned long : 1;
|
|
|
|
unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
|
|
|
|
unsigned long : 6;
|
|
|
|
unsigned long astesn : 32; /* ASTE Sequence Number */
|
|
|
|
} __packed;
|
|
|
|
|
|
|
|
struct aste {
|
|
|
|
unsigned long i : 1; /* ASX-Invalid Bit */
|
|
|
|
unsigned long ato : 29; /* Authority-Table Origin */
|
|
|
|
unsigned long : 1;
|
|
|
|
unsigned long b : 1; /* Base-Space Bit */
|
|
|
|
unsigned long ax : 16; /* Authorization Index */
|
|
|
|
unsigned long atl : 12; /* Authority-Table Length */
|
|
|
|
unsigned long : 2;
|
|
|
|
unsigned long ca : 1; /* Controlled-ASN Bit */
|
|
|
|
unsigned long ra : 1; /* Reusable-ASN Bit */
|
|
|
|
unsigned long asce : 64; /* Address-Space-Control Element */
|
|
|
|
unsigned long ald : 32;
|
|
|
|
unsigned long astesn : 32;
|
|
|
|
/* .. more fields there */
|
|
|
|
} __packed;
|
2014-01-10 21:33:28 +08:00
|
|
|
|
|
|
|
int ipte_lock_held(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2015-04-23 00:08:39 +08:00
|
|
|
if (vcpu->arch.sie_block->eca & 1) {
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
|
|
|
rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
|
|
|
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
|
|
|
return rc;
|
|
|
|
}
|
2014-10-01 20:48:42 +08:00
|
|
|
return vcpu->kvm->arch.ipte_lock_count != 0;
|
2014-01-10 21:33:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ipte_lock_simple(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
union ipte_control old, new, *ic;
|
|
|
|
|
2014-10-01 20:48:42 +08:00
|
|
|
mutex_lock(&vcpu->kvm->arch.ipte_mutex);
|
|
|
|
vcpu->kvm->arch.ipte_lock_count++;
|
|
|
|
if (vcpu->kvm->arch.ipte_lock_count > 1)
|
2014-01-10 21:33:28 +08:00
|
|
|
goto out;
|
2015-04-23 00:08:39 +08:00
|
|
|
retry:
|
|
|
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
2015-04-21 20:44:54 +08:00
|
|
|
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
2014-01-10 21:33:28 +08:00
|
|
|
do {
|
2014-11-25 20:17:34 +08:00
|
|
|
old = READ_ONCE(*ic);
|
2015-04-23 00:08:39 +08:00
|
|
|
if (old.k) {
|
|
|
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
2014-01-10 21:33:28 +08:00
|
|
|
cond_resched();
|
2015-04-23 00:08:39 +08:00
|
|
|
goto retry;
|
2014-01-10 21:33:28 +08:00
|
|
|
}
|
|
|
|
new = old;
|
|
|
|
new.k = 1;
|
|
|
|
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
2015-04-23 00:08:39 +08:00
|
|
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
2014-01-10 21:33:28 +08:00
|
|
|
out:
|
2014-10-01 20:48:42 +08:00
|
|
|
mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
|
2014-01-10 21:33:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
union ipte_control old, new, *ic;
|
|
|
|
|
2014-10-01 20:48:42 +08:00
|
|
|
mutex_lock(&vcpu->kvm->arch.ipte_mutex);
|
|
|
|
vcpu->kvm->arch.ipte_lock_count--;
|
|
|
|
if (vcpu->kvm->arch.ipte_lock_count)
|
2014-01-10 21:33:28 +08:00
|
|
|
goto out;
|
2015-04-23 00:08:39 +08:00
|
|
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
2015-04-21 20:44:54 +08:00
|
|
|
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
2014-01-10 21:33:28 +08:00
|
|
|
do {
|
2014-11-25 20:17:34 +08:00
|
|
|
old = READ_ONCE(*ic);
|
KVM: s390: Fix ipte locking
ipte_unlock_siif uses cmpxchg to replace the in-memory data of the ipte
lock together with ACCESS_ONCE for the intial read.
union ipte_control {
unsigned long val;
struct {
unsigned long k : 1;
unsigned long kh : 31;
unsigned long kg : 32;
};
};
[...]
static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
{
union ipte_control old, new, *ic;
ic = &vcpu->kvm->arch.sca->ipte_control;
do {
new = old = ACCESS_ONCE(*ic);
new.kh--;
if (!new.kh)
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
if (!new.kh)
wake_up(&vcpu->kvm->arch.ipte_wq);
}
The new value, is loaded twice from memory with gcc 4.7.2 of
fedora 18, despite the ACCESS_ONCE:
--->
l %r4,0(%r3) <--- load first 32 bit of lock (k and kh) in r4
alfi %r4,2147483647 <--- add -1 to r4
llgtr %r4,%r4 <--- zero out the sign bit of r4
lg %r1,0(%r3) <--- load all 64 bit of lock into new
lgr %r2,%r1 <--- load the same into old
risbg %r1,%r4,1,31,32 <--- shift and insert r4 into the bits 1-31 of
new
llihf %r4,2147483647
ngrk %r4,%r1,%r4
jne aa0 <ipte_unlock+0xf8>
nihh %r1,32767
lgr %r4,%r2
csg %r4,%r1,0(%r3)
cgr %r2,%r4
jne a70 <ipte_unlock+0xc8>
If the memory value changes between the first load (l) and the second
load (lg) we are broken. If that happens VCPU threads will hang
(unkillable) in handle_ipte_interlock.
Andreas Krebbel analyzed this and tracked it down to a compiler bug in
that version:
"while it is not that obvious the C99 standard basically forbids
duplicating the memory access also in that case. For an argumentation of
a similiar case please see:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=22278#c43
For the implementation-defined cases regarding volatile there are some
GCC-specific clarifications which can be found here:
https://gcc.gnu.org/onlinedocs/gcc/Volatiles.html#Volatiles
I've tracked down the problem with a reduced testcase. The problem was
that during a tree level optimization (SRA - scalar replacement of
aggregates) the volatile marker is lost. And an RTL level optimizer (CSE
- common subexpression elimination) then propagated the memory read into
its second use introducing another access to the memory location. So
indeed Christian's suspicion that the union access has something to do
with it is correct (since it triggered the SRA optimization).
This issue has been reported and fixed in the GCC 4.8 development cycle:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145"
This patch replaces the ACCESS_ONCE scheme with a barrier() based scheme
that should work for all supported compilers.
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: stable@vger.kernel.org # v3.16+
2014-11-04 15:31:16 +08:00
|
|
|
new = old;
|
2014-01-10 21:33:28 +08:00
|
|
|
new.k = 0;
|
|
|
|
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
2015-04-23 00:08:39 +08:00
|
|
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
2014-09-04 03:17:03 +08:00
|
|
|
wake_up(&vcpu->kvm->arch.ipte_wq);
|
2014-01-10 21:33:28 +08:00
|
|
|
out:
|
2014-10-01 20:48:42 +08:00
|
|
|
mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
|
2014-01-10 21:33:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ipte_lock_siif(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
union ipte_control old, new, *ic;
|
|
|
|
|
2015-04-23 00:08:39 +08:00
|
|
|
retry:
|
|
|
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
2015-04-21 20:44:54 +08:00
|
|
|
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
2014-01-10 21:33:28 +08:00
|
|
|
do {
|
2014-11-25 20:17:34 +08:00
|
|
|
old = READ_ONCE(*ic);
|
2015-04-23 00:08:39 +08:00
|
|
|
if (old.kg) {
|
|
|
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
2014-01-10 21:33:28 +08:00
|
|
|
cond_resched();
|
2015-04-23 00:08:39 +08:00
|
|
|
goto retry;
|
2014-01-10 21:33:28 +08:00
|
|
|
}
|
|
|
|
new = old;
|
|
|
|
new.k = 1;
|
|
|
|
new.kh++;
|
|
|
|
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
2015-04-23 00:08:39 +08:00
|
|
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
2014-01-10 21:33:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
union ipte_control old, new, *ic;
|
|
|
|
|
2015-04-23 00:08:39 +08:00
|
|
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
2015-04-21 20:44:54 +08:00
|
|
|
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
2014-01-10 21:33:28 +08:00
|
|
|
do {
|
2014-11-25 20:17:34 +08:00
|
|
|
old = READ_ONCE(*ic);
|
KVM: s390: Fix ipte locking
ipte_unlock_siif uses cmpxchg to replace the in-memory data of the ipte
lock together with ACCESS_ONCE for the intial read.
union ipte_control {
unsigned long val;
struct {
unsigned long k : 1;
unsigned long kh : 31;
unsigned long kg : 32;
};
};
[...]
static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
{
union ipte_control old, new, *ic;
ic = &vcpu->kvm->arch.sca->ipte_control;
do {
new = old = ACCESS_ONCE(*ic);
new.kh--;
if (!new.kh)
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
if (!new.kh)
wake_up(&vcpu->kvm->arch.ipte_wq);
}
The new value, is loaded twice from memory with gcc 4.7.2 of
fedora 18, despite the ACCESS_ONCE:
--->
l %r4,0(%r3) <--- load first 32 bit of lock (k and kh) in r4
alfi %r4,2147483647 <--- add -1 to r4
llgtr %r4,%r4 <--- zero out the sign bit of r4
lg %r1,0(%r3) <--- load all 64 bit of lock into new
lgr %r2,%r1 <--- load the same into old
risbg %r1,%r4,1,31,32 <--- shift and insert r4 into the bits 1-31 of
new
llihf %r4,2147483647
ngrk %r4,%r1,%r4
jne aa0 <ipte_unlock+0xf8>
nihh %r1,32767
lgr %r4,%r2
csg %r4,%r1,0(%r3)
cgr %r2,%r4
jne a70 <ipte_unlock+0xc8>
If the memory value changes between the first load (l) and the second
load (lg) we are broken. If that happens VCPU threads will hang
(unkillable) in handle_ipte_interlock.
Andreas Krebbel analyzed this and tracked it down to a compiler bug in
that version:
"while it is not that obvious the C99 standard basically forbids
duplicating the memory access also in that case. For an argumentation of
a similiar case please see:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=22278#c43
For the implementation-defined cases regarding volatile there are some
GCC-specific clarifications which can be found here:
https://gcc.gnu.org/onlinedocs/gcc/Volatiles.html#Volatiles
I've tracked down the problem with a reduced testcase. The problem was
that during a tree level optimization (SRA - scalar replacement of
aggregates) the volatile marker is lost. And an RTL level optimizer (CSE
- common subexpression elimination) then propagated the memory read into
its second use introducing another access to the memory location. So
indeed Christian's suspicion that the union access has something to do
with it is correct (since it triggered the SRA optimization).
This issue has been reported and fixed in the GCC 4.8 development cycle:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145"
This patch replaces the ACCESS_ONCE scheme with a barrier() based scheme
that should work for all supported compilers.
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: stable@vger.kernel.org # v3.16+
2014-11-04 15:31:16 +08:00
|
|
|
new = old;
|
2014-01-10 21:33:28 +08:00
|
|
|
new.kh--;
|
|
|
|
if (!new.kh)
|
|
|
|
new.k = 0;
|
|
|
|
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
2015-04-23 00:08:39 +08:00
|
|
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
2014-01-10 21:33:28 +08:00
|
|
|
if (!new.kh)
|
|
|
|
wake_up(&vcpu->kvm->arch.ipte_wq);
|
|
|
|
}
|
|
|
|
|
2014-02-04 21:48:07 +08:00
|
|
|
void ipte_lock(struct kvm_vcpu *vcpu)
|
2014-01-10 21:33:28 +08:00
|
|
|
{
|
|
|
|
if (vcpu->arch.sie_block->eca & 1)
|
|
|
|
ipte_lock_siif(vcpu);
|
|
|
|
else
|
|
|
|
ipte_lock_simple(vcpu);
|
|
|
|
}
|
|
|
|
|
2014-02-04 21:48:07 +08:00
|
|
|
void ipte_unlock(struct kvm_vcpu *vcpu)
|
2014-01-10 21:33:28 +08:00
|
|
|
{
|
|
|
|
if (vcpu->arch.sie_block->eca & 1)
|
|
|
|
ipte_unlock_siif(vcpu);
|
|
|
|
else
|
|
|
|
ipte_unlock_simple(vcpu);
|
|
|
|
}
|
|
|
|
|
2015-03-09 19:17:25 +08:00
|
|
|
static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
|
2015-11-16 22:42:11 +08:00
|
|
|
enum gacc_mode mode)
|
2015-03-09 19:17:25 +08:00
|
|
|
{
|
|
|
|
union alet alet;
|
|
|
|
struct ale ale;
|
|
|
|
struct aste aste;
|
|
|
|
unsigned long ald_addr, authority_table_addr;
|
|
|
|
union ald ald;
|
|
|
|
int eax, rc;
|
|
|
|
u8 authority_table;
|
|
|
|
|
|
|
|
if (ar >= NUM_ACRS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
save_access_regs(vcpu->run->s.regs.acrs);
|
|
|
|
alet.val = vcpu->run->s.regs.acrs[ar];
|
|
|
|
|
|
|
|
if (ar == 0 || alet.val == 0) {
|
|
|
|
asce->val = vcpu->arch.sie_block->gcr[1];
|
|
|
|
return 0;
|
|
|
|
} else if (alet.val == 1) {
|
|
|
|
asce->val = vcpu->arch.sie_block->gcr[7];
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (alet.reserved)
|
|
|
|
return PGM_ALET_SPECIFICATION;
|
|
|
|
|
|
|
|
if (alet.p)
|
|
|
|
ald_addr = vcpu->arch.sie_block->gcr[5];
|
|
|
|
else
|
|
|
|
ald_addr = vcpu->arch.sie_block->gcr[2];
|
|
|
|
ald_addr &= 0x7fffffc0;
|
|
|
|
|
|
|
|
rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (alet.alen / 8 > ald.all)
|
|
|
|
return PGM_ALEN_TRANSLATION;
|
|
|
|
|
|
|
|
if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
|
|
|
|
return PGM_ADDRESSING;
|
|
|
|
|
|
|
|
rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
|
|
|
|
sizeof(struct ale));
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (ale.i == 1)
|
|
|
|
return PGM_ALEN_TRANSLATION;
|
|
|
|
if (ale.alesn != alet.alesn)
|
|
|
|
return PGM_ALE_SEQUENCE;
|
|
|
|
|
|
|
|
rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (aste.i)
|
|
|
|
return PGM_ASTE_VALIDITY;
|
|
|
|
if (aste.astesn != ale.astesn)
|
|
|
|
return PGM_ASTE_SEQUENCE;
|
|
|
|
|
|
|
|
if (ale.p == 1) {
|
|
|
|
eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
|
|
|
|
if (ale.aleax != eax) {
|
|
|
|
if (eax / 16 > aste.atl)
|
|
|
|
return PGM_EXTENDED_AUTHORITY;
|
|
|
|
|
|
|
|
authority_table_addr = aste.ato * 4 + eax / 4;
|
|
|
|
|
|
|
|
rc = read_guest_real(vcpu, authority_table_addr,
|
|
|
|
&authority_table,
|
|
|
|
sizeof(u8));
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
|
|
|
|
return PGM_EXTENDED_AUTHORITY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-16 22:42:11 +08:00
|
|
|
if (ale.fo == 1 && mode == GACC_STORE)
|
2015-03-09 19:17:25 +08:00
|
|
|
return PGM_PROTECTION;
|
|
|
|
|
|
|
|
asce->val = aste.asce;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct trans_exc_code_bits {
|
|
|
|
unsigned long addr : 52; /* Translation-exception Address */
|
|
|
|
unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
|
|
|
|
unsigned long : 6;
|
|
|
|
unsigned long b60 : 1;
|
|
|
|
unsigned long b61 : 1;
|
|
|
|
unsigned long as : 2; /* ASCE Identifier */
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
|
|
|
|
FSI_STORE = 1, /* Exception was due to store operation */
|
|
|
|
FSI_FETCH = 2 /* Exception was due to fetch operation */
|
|
|
|
};
|
|
|
|
|
2016-06-01 01:56:46 +08:00
|
|
|
enum prot_type {
|
|
|
|
PROT_TYPE_LA = 0,
|
|
|
|
PROT_TYPE_KEYC = 1,
|
|
|
|
PROT_TYPE_ALC = 2,
|
|
|
|
PROT_TYPE_DAT = 3,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
|
|
|
|
ar_t ar, enum gacc_mode mode, enum prot_type prot)
|
|
|
|
{
|
|
|
|
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
|
|
|
struct trans_exc_code_bits *tec;
|
|
|
|
|
|
|
|
memset(pgm, 0, sizeof(*pgm));
|
|
|
|
pgm->code = code;
|
|
|
|
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
|
|
|
|
|
|
|
switch (code) {
|
2016-07-29 17:36:04 +08:00
|
|
|
case PGM_PROTECTION:
|
|
|
|
switch (prot) {
|
|
|
|
case PROT_TYPE_ALC:
|
|
|
|
tec->b60 = 1;
|
|
|
|
/* FALL THROUGH */
|
|
|
|
case PROT_TYPE_DAT:
|
|
|
|
tec->b61 = 1;
|
|
|
|
break;
|
|
|
|
default: /* LA and KEYC set b61 to 0, other params undefined */
|
|
|
|
return code;
|
|
|
|
}
|
|
|
|
/* FALL THROUGH */
|
2016-06-01 01:56:46 +08:00
|
|
|
case PGM_ASCE_TYPE:
|
|
|
|
case PGM_PAGE_TRANSLATION:
|
|
|
|
case PGM_REGION_FIRST_TRANS:
|
|
|
|
case PGM_REGION_SECOND_TRANS:
|
|
|
|
case PGM_REGION_THIRD_TRANS:
|
|
|
|
case PGM_SEGMENT_TRANSLATION:
|
|
|
|
/*
|
|
|
|
* op_access_id only applies to MOVE_PAGE -> set bit 61
|
|
|
|
* exc_access_id has to be set to 0 for some instructions. Both
|
2016-07-29 17:36:04 +08:00
|
|
|
* cases have to be handled by the caller.
|
2016-06-01 01:56:46 +08:00
|
|
|
*/
|
|
|
|
tec->addr = gva >> PAGE_SHIFT;
|
|
|
|
tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
|
|
|
|
tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
|
|
|
|
/* FALL THROUGH */
|
|
|
|
case PGM_ALEN_TRANSLATION:
|
|
|
|
case PGM_ALE_SEQUENCE:
|
|
|
|
case PGM_ASTE_VALIDITY:
|
|
|
|
case PGM_ASTE_SEQUENCE:
|
|
|
|
case PGM_EXTENDED_AUTHORITY:
|
2016-07-29 17:36:04 +08:00
|
|
|
/*
|
|
|
|
* We can always store exc_access_id, as it is
|
|
|
|
* undefined for non-ar cases. It is undefined for
|
|
|
|
* most DAT protection exceptions.
|
|
|
|
*/
|
2016-06-01 01:56:46 +08:00
|
|
|
pgm->exc_access_id = ar;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return code;
|
|
|
|
}
|
|
|
|
|
2015-03-09 19:17:25 +08:00
|
|
|
static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
|
2016-06-01 01:44:10 +08:00
|
|
|
unsigned long ga, ar_t ar, enum gacc_mode mode)
|
2014-01-01 23:26:52 +08:00
|
|
|
{
|
2015-03-09 19:17:25 +08:00
|
|
|
int rc;
|
2015-11-16 22:48:59 +08:00
|
|
|
struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
|
2015-03-09 19:17:25 +08:00
|
|
|
|
2015-11-16 22:48:59 +08:00
|
|
|
if (!psw.t) {
|
2015-03-09 19:17:25 +08:00
|
|
|
asce->val = 0;
|
|
|
|
asce->r = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-16 22:48:59 +08:00
|
|
|
if (mode == GACC_IFETCH)
|
|
|
|
psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY;
|
|
|
|
|
|
|
|
switch (psw.as) {
|
2014-01-01 23:26:52 +08:00
|
|
|
case PSW_AS_PRIMARY:
|
2015-03-09 19:17:25 +08:00
|
|
|
asce->val = vcpu->arch.sie_block->gcr[1];
|
|
|
|
return 0;
|
2014-01-01 23:26:52 +08:00
|
|
|
case PSW_AS_SECONDARY:
|
2015-03-09 19:17:25 +08:00
|
|
|
asce->val = vcpu->arch.sie_block->gcr[7];
|
|
|
|
return 0;
|
2014-01-01 23:26:52 +08:00
|
|
|
case PSW_AS_HOME:
|
2015-03-09 19:17:25 +08:00
|
|
|
asce->val = vcpu->arch.sie_block->gcr[13];
|
|
|
|
return 0;
|
|
|
|
case PSW_AS_ACCREG:
|
2015-11-16 22:42:11 +08:00
|
|
|
rc = ar_translation(vcpu, asce, ar, mode);
|
2015-03-09 19:17:25 +08:00
|
|
|
if (rc > 0)
|
2016-06-01 02:21:03 +08:00
|
|
|
return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
|
2015-03-09 19:17:25 +08:00
|
|
|
return rc;
|
2014-01-01 23:26:52 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
|
|
|
|
{
|
|
|
|
return kvm_read_guest(kvm, gpa, val, sizeof(*val));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* guest_translate - translate a guest virtual into a guest absolute address
|
|
|
|
* @vcpu: virtual cpu
|
|
|
|
* @gva: guest virtual address
|
|
|
|
* @gpa: points to where guest physical (absolute) address should be stored
|
2015-01-22 17:44:11 +08:00
|
|
|
* @asce: effective asce
|
2015-11-16 22:42:11 +08:00
|
|
|
* @mode: indicates the access mode to be used
|
2014-01-01 23:26:52 +08:00
|
|
|
*
|
|
|
|
* Translate a guest virtual address into a guest absolute address by means
|
2015-02-27 06:16:44 +08:00
|
|
|
* of dynamic address translation as specified by the architecture.
|
2014-01-01 23:26:52 +08:00
|
|
|
* If the resulting absolute address is not available in the configuration
|
|
|
|
* an addressing exception is indicated and @gpa will not be changed.
|
|
|
|
*
|
|
|
|
* Returns: - zero on success; @gpa contains the resulting absolute address
|
|
|
|
* - a negative value if guest access failed due to e.g. broken
|
|
|
|
* guest mapping
|
|
|
|
* - a positve value if an access exception happened. In this case
|
|
|
|
* the returned value is the program interruption code as defined
|
|
|
|
* by the architecture
|
|
|
|
*/
|
|
|
|
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
2015-01-22 17:44:11 +08:00
|
|
|
unsigned long *gpa, const union asce asce,
|
2015-11-16 22:42:11 +08:00
|
|
|
enum gacc_mode mode)
|
2014-01-01 23:26:52 +08:00
|
|
|
{
|
|
|
|
union vaddress vaddr = {.addr = gva};
|
|
|
|
union raddress raddr = {.addr = gva};
|
|
|
|
union page_table_entry pte;
|
|
|
|
int dat_protection = 0;
|
|
|
|
union ctlreg0 ctlreg0;
|
|
|
|
unsigned long ptr;
|
|
|
|
int edat1, edat2;
|
|
|
|
|
|
|
|
ctlreg0.val = vcpu->arch.sie_block->gcr[0];
|
2015-02-02 22:42:51 +08:00
|
|
|
edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
|
|
|
|
edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
|
2014-01-01 23:26:52 +08:00
|
|
|
if (asce.r)
|
|
|
|
goto real_address;
|
|
|
|
ptr = asce.origin * 4096;
|
|
|
|
switch (asce.dt) {
|
|
|
|
case ASCE_TYPE_REGION1:
|
|
|
|
if (vaddr.rfx01 > asce.tl)
|
|
|
|
return PGM_REGION_FIRST_TRANS;
|
|
|
|
ptr += vaddr.rfx * 8;
|
|
|
|
break;
|
|
|
|
case ASCE_TYPE_REGION2:
|
|
|
|
if (vaddr.rfx)
|
|
|
|
return PGM_ASCE_TYPE;
|
|
|
|
if (vaddr.rsx01 > asce.tl)
|
|
|
|
return PGM_REGION_SECOND_TRANS;
|
|
|
|
ptr += vaddr.rsx * 8;
|
|
|
|
break;
|
|
|
|
case ASCE_TYPE_REGION3:
|
|
|
|
if (vaddr.rfx || vaddr.rsx)
|
|
|
|
return PGM_ASCE_TYPE;
|
|
|
|
if (vaddr.rtx01 > asce.tl)
|
|
|
|
return PGM_REGION_THIRD_TRANS;
|
|
|
|
ptr += vaddr.rtx * 8;
|
|
|
|
break;
|
|
|
|
case ASCE_TYPE_SEGMENT:
|
|
|
|
if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
|
|
|
|
return PGM_ASCE_TYPE;
|
|
|
|
if (vaddr.sx01 > asce.tl)
|
|
|
|
return PGM_SEGMENT_TRANSLATION;
|
|
|
|
ptr += vaddr.sx * 8;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
switch (asce.dt) {
|
|
|
|
case ASCE_TYPE_REGION1: {
|
|
|
|
union region1_table_entry rfte;
|
|
|
|
|
|
|
|
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
|
|
|
return PGM_ADDRESSING;
|
|
|
|
if (deref_table(vcpu->kvm, ptr, &rfte.val))
|
|
|
|
return -EFAULT;
|
|
|
|
if (rfte.i)
|
|
|
|
return PGM_REGION_FIRST_TRANS;
|
|
|
|
if (rfte.tt != TABLE_TYPE_REGION1)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
|
|
|
|
return PGM_REGION_SECOND_TRANS;
|
|
|
|
if (edat1)
|
|
|
|
dat_protection |= rfte.p;
|
|
|
|
ptr = rfte.rto * 4096 + vaddr.rsx * 8;
|
|
|
|
}
|
|
|
|
/* fallthrough */
|
|
|
|
case ASCE_TYPE_REGION2: {
|
|
|
|
union region2_table_entry rste;
|
|
|
|
|
|
|
|
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
|
|
|
return PGM_ADDRESSING;
|
|
|
|
if (deref_table(vcpu->kvm, ptr, &rste.val))
|
|
|
|
return -EFAULT;
|
|
|
|
if (rste.i)
|
|
|
|
return PGM_REGION_SECOND_TRANS;
|
|
|
|
if (rste.tt != TABLE_TYPE_REGION2)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
|
|
|
|
return PGM_REGION_THIRD_TRANS;
|
|
|
|
if (edat1)
|
|
|
|
dat_protection |= rste.p;
|
|
|
|
ptr = rste.rto * 4096 + vaddr.rtx * 8;
|
|
|
|
}
|
|
|
|
/* fallthrough */
|
|
|
|
case ASCE_TYPE_REGION3: {
|
|
|
|
union region3_table_entry rtte;
|
|
|
|
|
|
|
|
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
|
|
|
return PGM_ADDRESSING;
|
|
|
|
if (deref_table(vcpu->kvm, ptr, &rtte.val))
|
|
|
|
return -EFAULT;
|
|
|
|
if (rtte.i)
|
|
|
|
return PGM_REGION_THIRD_TRANS;
|
|
|
|
if (rtte.tt != TABLE_TYPE_REGION3)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (rtte.cr && asce.p && edat2)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (rtte.fc && edat2) {
|
|
|
|
dat_protection |= rtte.fc1.p;
|
|
|
|
raddr.rfaa = rtte.fc1.rfaa;
|
|
|
|
goto absolute_address;
|
|
|
|
}
|
|
|
|
if (vaddr.sx01 < rtte.fc0.tf)
|
|
|
|
return PGM_SEGMENT_TRANSLATION;
|
|
|
|
if (vaddr.sx01 > rtte.fc0.tl)
|
|
|
|
return PGM_SEGMENT_TRANSLATION;
|
|
|
|
if (edat1)
|
|
|
|
dat_protection |= rtte.fc0.p;
|
|
|
|
ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8;
|
|
|
|
}
|
|
|
|
/* fallthrough */
|
|
|
|
case ASCE_TYPE_SEGMENT: {
|
|
|
|
union segment_table_entry ste;
|
|
|
|
|
|
|
|
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
|
|
|
return PGM_ADDRESSING;
|
|
|
|
if (deref_table(vcpu->kvm, ptr, &ste.val))
|
|
|
|
return -EFAULT;
|
|
|
|
if (ste.i)
|
|
|
|
return PGM_SEGMENT_TRANSLATION;
|
|
|
|
if (ste.tt != TABLE_TYPE_SEGMENT)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (ste.cs && asce.p)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (ste.fc && edat1) {
|
|
|
|
dat_protection |= ste.fc1.p;
|
|
|
|
raddr.sfaa = ste.fc1.sfaa;
|
|
|
|
goto absolute_address;
|
|
|
|
}
|
|
|
|
dat_protection |= ste.fc0.p;
|
|
|
|
ptr = ste.fc0.pto * 2048 + vaddr.px * 8;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
|
|
|
return PGM_ADDRESSING;
|
|
|
|
if (deref_table(vcpu->kvm, ptr, &pte.val))
|
|
|
|
return -EFAULT;
|
|
|
|
if (pte.i)
|
|
|
|
return PGM_PAGE_TRANSLATION;
|
|
|
|
if (pte.z)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (pte.co && !edat1)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
dat_protection |= pte.p;
|
|
|
|
raddr.pfra = pte.pfra;
|
|
|
|
real_address:
|
|
|
|
raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
|
|
|
|
absolute_address:
|
2015-11-16 22:42:11 +08:00
|
|
|
if (mode == GACC_STORE && dat_protection)
|
2014-01-01 23:26:52 +08:00
|
|
|
return PGM_PROTECTION;
|
|
|
|
if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
|
|
|
|
return PGM_ADDRESSING;
|
|
|
|
*gpa = raddr.addr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_low_address(unsigned long ga)
|
|
|
|
{
|
|
|
|
/* Check for address ranges 0..511 and 4096..4607 */
|
|
|
|
return (ga & ~0x11fful) == 0;
|
|
|
|
}
|
|
|
|
|
2015-01-22 17:44:11 +08:00
|
|
|
static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
|
|
|
|
const union asce asce)
|
2014-01-01 23:26:52 +08:00
|
|
|
{
|
|
|
|
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
|
|
|
|
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
|
|
|
|
|
|
|
if (!ctlreg0.lap)
|
|
|
|
return 0;
|
|
|
|
if (psw_bits(*psw).t && asce.p)
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-06-01 02:13:35 +08:00
|
|
|
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
|
2014-01-01 23:26:52 +08:00
|
|
|
unsigned long *pages, unsigned long nr_pages,
|
2015-11-16 22:42:11 +08:00
|
|
|
const union asce asce, enum gacc_mode mode)
|
2014-01-01 23:26:52 +08:00
|
|
|
{
|
|
|
|
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
2016-06-01 02:13:35 +08:00
|
|
|
int lap_enabled, rc = 0;
|
2014-01-01 23:26:52 +08:00
|
|
|
|
2015-01-22 17:44:11 +08:00
|
|
|
lap_enabled = low_address_protection_enabled(vcpu, asce);
|
2014-01-01 23:26:52 +08:00
|
|
|
while (nr_pages) {
|
|
|
|
ga = kvm_s390_logical_to_effective(vcpu, ga);
|
2016-06-01 02:13:35 +08:00
|
|
|
if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
|
|
|
|
return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
|
|
|
|
PROT_TYPE_LA);
|
2014-01-01 23:26:52 +08:00
|
|
|
ga &= PAGE_MASK;
|
|
|
|
if (psw_bits(*psw).t) {
|
2015-11-16 22:42:11 +08:00
|
|
|
rc = guest_translate(vcpu, ga, pages, asce, mode);
|
2014-01-01 23:26:52 +08:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
} else {
|
|
|
|
*pages = kvm_s390_real_to_abs(vcpu, ga);
|
|
|
|
if (kvm_is_error_gpa(vcpu->kvm, *pages))
|
2016-06-01 02:13:35 +08:00
|
|
|
rc = PGM_ADDRESSING;
|
2014-01-01 23:26:52 +08:00
|
|
|
}
|
2016-06-01 02:13:35 +08:00
|
|
|
if (rc)
|
|
|
|
return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_DAT);
|
2014-01-01 23:26:52 +08:00
|
|
|
ga += PAGE_SIZE;
|
|
|
|
pages++;
|
|
|
|
nr_pages--;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-19 18:24:51 +08:00
|
|
|
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
2015-11-16 22:42:11 +08:00
|
|
|
unsigned long len, enum gacc_mode mode)
|
2014-01-01 23:26:52 +08:00
|
|
|
{
|
|
|
|
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
|
|
|
unsigned long _len, nr_pages, gpa, idx;
|
|
|
|
unsigned long pages_array[2];
|
|
|
|
unsigned long *pages;
|
2014-01-10 21:33:28 +08:00
|
|
|
int need_ipte_lock;
|
|
|
|
union asce asce;
|
2014-01-01 23:26:52 +08:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!len)
|
|
|
|
return 0;
|
2016-06-01 01:44:10 +08:00
|
|
|
ga = kvm_s390_logical_to_effective(vcpu, ga);
|
|
|
|
rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
|
2015-03-09 19:17:25 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
2014-01-01 23:26:52 +08:00
|
|
|
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
|
|
|
|
pages = pages_array;
|
|
|
|
if (nr_pages > ARRAY_SIZE(pages_array))
|
|
|
|
pages = vmalloc(nr_pages * sizeof(unsigned long));
|
|
|
|
if (!pages)
|
|
|
|
return -ENOMEM;
|
2014-01-10 21:33:28 +08:00
|
|
|
need_ipte_lock = psw_bits(*psw).t && !asce.r;
|
|
|
|
if (need_ipte_lock)
|
|
|
|
ipte_lock(vcpu);
|
2016-06-01 02:13:35 +08:00
|
|
|
rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
|
2014-01-01 23:26:52 +08:00
|
|
|
for (idx = 0; idx < nr_pages && !rc; idx++) {
|
|
|
|
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
|
|
|
|
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
|
2015-11-16 22:42:11 +08:00
|
|
|
if (mode == GACC_STORE)
|
2014-01-01 23:26:52 +08:00
|
|
|
rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
|
|
|
|
else
|
|
|
|
rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
|
|
|
|
len -= _len;
|
|
|
|
ga += _len;
|
|
|
|
data += _len;
|
|
|
|
}
|
2014-01-10 21:33:28 +08:00
|
|
|
if (need_ipte_lock)
|
|
|
|
ipte_unlock(vcpu);
|
2014-01-01 23:26:52 +08:00
|
|
|
if (nr_pages > ARRAY_SIZE(pages_array))
|
|
|
|
vfree(pages);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
2015-11-16 22:42:11 +08:00
|
|
|
void *data, unsigned long len, enum gacc_mode mode)
|
2014-01-01 23:26:52 +08:00
|
|
|
{
|
|
|
|
unsigned long _len, gpa;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
while (len && !rc) {
|
|
|
|
gpa = kvm_s390_real_to_abs(vcpu, gra);
|
|
|
|
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
|
2015-11-16 22:42:11 +08:00
|
|
|
if (mode)
|
2014-01-01 23:26:52 +08:00
|
|
|
rc = write_guest_abs(vcpu, gpa, data, _len);
|
|
|
|
else
|
|
|
|
rc = read_guest_abs(vcpu, gpa, data, _len);
|
|
|
|
len -= _len;
|
|
|
|
gra += _len;
|
|
|
|
data += _len;
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
2014-03-04 06:34:42 +08:00
|
|
|
|
2014-02-04 21:43:25 +08:00
|
|
|
/**
|
|
|
|
* guest_translate_address - translate guest logical into guest absolute address
|
|
|
|
*
|
|
|
|
* Parameter semantics are the same as the ones from guest_translate.
|
|
|
|
* The memory contents at the guest address are not changed.
|
|
|
|
*
|
|
|
|
* Note: The IPTE lock is not taken during this function, so the caller
|
|
|
|
* has to take care of this.
|
|
|
|
*/
|
2015-01-19 18:24:51 +08:00
|
|
|
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
|
2015-11-16 22:42:11 +08:00
|
|
|
unsigned long *gpa, enum gacc_mode mode)
|
2014-02-04 21:43:25 +08:00
|
|
|
{
|
|
|
|
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
|
|
|
union asce asce;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
gva = kvm_s390_logical_to_effective(vcpu, gva);
|
2016-06-01 01:44:10 +08:00
|
|
|
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
|
2015-03-09 19:17:25 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
2015-01-22 17:44:11 +08:00
|
|
|
if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
|
2016-06-01 02:06:55 +08:00
|
|
|
if (mode == GACC_STORE)
|
|
|
|
return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
|
|
|
|
mode, PROT_TYPE_LA);
|
2014-02-04 21:43:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
|
2015-11-16 22:42:11 +08:00
|
|
|
rc = guest_translate(vcpu, gva, gpa, asce, mode);
|
2016-06-01 02:06:55 +08:00
|
|
|
if (rc > 0)
|
|
|
|
return trans_exc(vcpu, rc, gva, 0, mode, PROT_TYPE_DAT);
|
2014-02-04 21:43:25 +08:00
|
|
|
} else {
|
|
|
|
*gpa = kvm_s390_real_to_abs(vcpu, gva);
|
|
|
|
if (kvm_is_error_gpa(vcpu->kvm, *gpa))
|
2016-06-01 02:06:55 +08:00
|
|
|
return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
|
2014-02-04 21:43:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-02-06 22:01:21 +08:00
|
|
|
/**
|
|
|
|
* check_gva_range - test a range of guest virtual addresses for accessibility
|
|
|
|
*/
|
|
|
|
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
|
2015-11-16 22:42:11 +08:00
|
|
|
unsigned long length, enum gacc_mode mode)
|
2015-02-06 22:01:21 +08:00
|
|
|
{
|
|
|
|
unsigned long gpa;
|
|
|
|
unsigned long currlen;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
ipte_lock(vcpu);
|
|
|
|
while (length > 0 && !rc) {
|
|
|
|
currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
|
2015-11-16 22:42:11 +08:00
|
|
|
rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
|
2015-02-06 22:01:21 +08:00
|
|
|
gva += currlen;
|
|
|
|
length -= currlen;
|
|
|
|
}
|
|
|
|
ipte_unlock(vcpu);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2014-03-04 06:34:42 +08:00
|
|
|
/**
|
2015-03-03 19:26:14 +08:00
|
|
|
* kvm_s390_check_low_addr_prot_real - check for low-address protection
|
|
|
|
* @gra: Guest real address
|
2014-03-04 06:34:42 +08:00
|
|
|
*
|
|
|
|
* Checks whether an address is subject to low-address protection and set
|
|
|
|
* up vcpu->arch.pgm accordingly if necessary.
|
|
|
|
*
|
|
|
|
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
|
|
|
|
*/
|
2015-03-03 19:26:14 +08:00
|
|
|
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
|
2014-03-04 06:34:42 +08:00
|
|
|
{
|
2015-03-03 19:26:14 +08:00
|
|
|
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
|
2014-03-04 06:34:42 +08:00
|
|
|
|
2015-03-03 19:26:14 +08:00
|
|
|
if (!ctlreg0.lap || !is_low_address(gra))
|
2014-03-04 06:34:42 +08:00
|
|
|
return 0;
|
2016-06-01 02:00:33 +08:00
|
|
|
return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
|
2014-03-04 06:34:42 +08:00
|
|
|
}
|
2016-03-08 19:16:35 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* kvm_s390_shadow_tables - walk the guest page table and create shadow tables
|
|
|
|
* @sg: pointer to the shadow guest address space structure
|
|
|
|
* @saddr: faulting address in the shadow gmap
|
|
|
|
* @pgt: pointer to the page table address result
|
2016-04-18 19:24:52 +08:00
|
|
|
* @fake: pgt references contiguous guest memory block, not a pgtable
|
2016-03-08 19:16:35 +08:00
|
|
|
*/
|
|
|
|
static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
2016-04-18 19:24:52 +08:00
|
|
|
unsigned long *pgt, int *dat_protection,
|
|
|
|
int *fake)
|
2016-03-08 19:16:35 +08:00
|
|
|
{
|
|
|
|
struct gmap *parent;
|
|
|
|
union asce asce;
|
|
|
|
union vaddress vaddr;
|
|
|
|
unsigned long ptr;
|
|
|
|
int rc;
|
|
|
|
|
2016-04-18 19:24:52 +08:00
|
|
|
*fake = 0;
|
2016-04-18 23:46:21 +08:00
|
|
|
*dat_protection = 0;
|
2016-03-08 19:16:35 +08:00
|
|
|
parent = sg->parent;
|
|
|
|
vaddr.addr = saddr;
|
|
|
|
asce.val = sg->orig_asce;
|
|
|
|
ptr = asce.origin * 4096;
|
2016-04-18 22:22:24 +08:00
|
|
|
if (asce.r) {
|
|
|
|
*fake = 1;
|
|
|
|
asce.dt = ASCE_TYPE_REGION1;
|
|
|
|
}
|
2016-03-08 19:16:35 +08:00
|
|
|
switch (asce.dt) {
|
|
|
|
case ASCE_TYPE_REGION1:
|
2016-04-18 22:22:24 +08:00
|
|
|
if (vaddr.rfx01 > asce.tl && !asce.r)
|
2016-03-08 19:16:35 +08:00
|
|
|
return PGM_REGION_FIRST_TRANS;
|
|
|
|
break;
|
|
|
|
case ASCE_TYPE_REGION2:
|
|
|
|
if (vaddr.rfx)
|
|
|
|
return PGM_ASCE_TYPE;
|
|
|
|
if (vaddr.rsx01 > asce.tl)
|
|
|
|
return PGM_REGION_SECOND_TRANS;
|
|
|
|
break;
|
|
|
|
case ASCE_TYPE_REGION3:
|
|
|
|
if (vaddr.rfx || vaddr.rsx)
|
|
|
|
return PGM_ASCE_TYPE;
|
|
|
|
if (vaddr.rtx01 > asce.tl)
|
|
|
|
return PGM_REGION_THIRD_TRANS;
|
|
|
|
break;
|
|
|
|
case ASCE_TYPE_SEGMENT:
|
|
|
|
if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
|
|
|
|
return PGM_ASCE_TYPE;
|
|
|
|
if (vaddr.sx01 > asce.tl)
|
|
|
|
return PGM_SEGMENT_TRANSLATION;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (asce.dt) {
|
|
|
|
case ASCE_TYPE_REGION1: {
|
|
|
|
union region1_table_entry rfte;
|
|
|
|
|
2016-04-18 22:22:24 +08:00
|
|
|
if (*fake) {
|
|
|
|
/* offset in 16EB guest memory block */
|
|
|
|
ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
|
|
|
|
rfte.val = ptr;
|
|
|
|
goto shadow_r2t;
|
|
|
|
}
|
2016-03-08 19:16:35 +08:00
|
|
|
rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
if (rfte.i)
|
|
|
|
return PGM_REGION_FIRST_TRANS;
|
|
|
|
if (rfte.tt != TABLE_TYPE_REGION1)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
|
|
|
|
return PGM_REGION_SECOND_TRANS;
|
2016-04-18 23:46:21 +08:00
|
|
|
if (sg->edat_level >= 1)
|
|
|
|
*dat_protection |= rfte.p;
|
2016-04-18 22:22:24 +08:00
|
|
|
ptr = rfte.rto << 12UL;
|
|
|
|
shadow_r2t:
|
|
|
|
rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
|
2016-03-08 19:16:35 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
/* fallthrough */
|
|
|
|
}
|
|
|
|
case ASCE_TYPE_REGION2: {
|
|
|
|
union region2_table_entry rste;
|
|
|
|
|
2016-04-18 22:22:24 +08:00
|
|
|
if (*fake) {
|
|
|
|
/* offset in 8PB guest memory block */
|
|
|
|
ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
|
|
|
|
rste.val = ptr;
|
|
|
|
goto shadow_r3t;
|
|
|
|
}
|
2016-03-08 19:16:35 +08:00
|
|
|
rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
if (rste.i)
|
|
|
|
return PGM_REGION_SECOND_TRANS;
|
|
|
|
if (rste.tt != TABLE_TYPE_REGION2)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
|
|
|
|
return PGM_REGION_THIRD_TRANS;
|
2016-04-18 23:46:21 +08:00
|
|
|
if (sg->edat_level >= 1)
|
|
|
|
*dat_protection |= rste.p;
|
2016-04-18 22:22:24 +08:00
|
|
|
ptr = rste.rto << 12UL;
|
|
|
|
shadow_r3t:
|
2016-04-18 23:46:21 +08:00
|
|
|
rste.p |= *dat_protection;
|
2016-04-18 22:22:24 +08:00
|
|
|
rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
|
2016-03-08 19:16:35 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
/* fallthrough */
|
|
|
|
}
|
|
|
|
case ASCE_TYPE_REGION3: {
|
|
|
|
union region3_table_entry rtte;
|
|
|
|
|
2016-04-18 22:22:24 +08:00
|
|
|
if (*fake) {
|
|
|
|
/* offset in 4TB guest memory block */
|
|
|
|
ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
|
|
|
|
rtte.val = ptr;
|
|
|
|
goto shadow_sgt;
|
|
|
|
}
|
2016-03-08 19:16:35 +08:00
|
|
|
rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
if (rtte.i)
|
|
|
|
return PGM_REGION_THIRD_TRANS;
|
|
|
|
if (rtte.tt != TABLE_TYPE_REGION3)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
2016-04-18 19:42:05 +08:00
|
|
|
if (rtte.cr && asce.p && sg->edat_level >= 2)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (rtte.fc && sg->edat_level >= 2) {
|
2016-04-18 23:46:21 +08:00
|
|
|
*dat_protection |= rtte.fc0.p;
|
2016-04-18 19:42:05 +08:00
|
|
|
*fake = 1;
|
|
|
|
ptr = rtte.fc1.rfaa << 31UL;
|
|
|
|
rtte.val = ptr;
|
|
|
|
goto shadow_sgt;
|
|
|
|
}
|
2016-03-08 19:16:35 +08:00
|
|
|
if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
|
|
|
|
return PGM_SEGMENT_TRANSLATION;
|
2016-04-18 23:46:21 +08:00
|
|
|
if (sg->edat_level >= 1)
|
|
|
|
*dat_protection |= rtte.fc0.p;
|
2016-04-18 19:42:05 +08:00
|
|
|
ptr = rtte.fc0.sto << 12UL;
|
|
|
|
shadow_sgt:
|
2016-04-18 23:46:21 +08:00
|
|
|
rtte.fc0.p |= *dat_protection;
|
2016-04-18 19:42:05 +08:00
|
|
|
rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
|
2016-03-08 19:16:35 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
/* fallthrough */
|
|
|
|
}
|
|
|
|
case ASCE_TYPE_SEGMENT: {
|
|
|
|
union segment_table_entry ste;
|
|
|
|
|
2016-04-18 19:42:05 +08:00
|
|
|
if (*fake) {
|
|
|
|
/* offset in 2G guest memory block */
|
|
|
|
ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
|
|
|
|
ste.val = ptr;
|
|
|
|
goto shadow_pgt;
|
|
|
|
}
|
2016-03-08 19:16:35 +08:00
|
|
|
rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
if (ste.i)
|
|
|
|
return PGM_SEGMENT_TRANSLATION;
|
|
|
|
if (ste.tt != TABLE_TYPE_SEGMENT)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
|
|
|
if (ste.cs && asce.p)
|
|
|
|
return PGM_TRANSLATION_SPEC;
|
2016-04-18 23:46:21 +08:00
|
|
|
*dat_protection |= ste.fc0.p;
|
2016-04-18 19:24:52 +08:00
|
|
|
if (ste.fc && sg->edat_level >= 1) {
|
|
|
|
*fake = 1;
|
|
|
|
ptr = ste.fc1.sfaa << 20UL;
|
|
|
|
ste.val = ptr;
|
|
|
|
goto shadow_pgt;
|
|
|
|
}
|
|
|
|
ptr = ste.fc0.pto << 11UL;
|
|
|
|
shadow_pgt:
|
2016-04-18 23:46:21 +08:00
|
|
|
ste.fc0.p |= *dat_protection;
|
2016-04-18 19:24:52 +08:00
|
|
|
rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
|
2016-03-08 19:16:35 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Return the parent address of the page table */
|
|
|
|
*pgt = ptr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* kvm_s390_shadow_fault - handle fault on a shadow page table
|
2016-01-28 00:24:03 +08:00
|
|
|
* @vcpu: virtual cpu
|
2016-03-08 19:16:35 +08:00
|
|
|
* @sg: pointer to the shadow guest address space structure
|
|
|
|
* @saddr: faulting address in the shadow gmap
|
|
|
|
*
|
|
|
|
* Returns: - 0 if the shadow fault was successfully resolved
|
|
|
|
* - > 0 (pgm exception code) on exceptions while faulting
|
|
|
|
* - -EAGAIN if the caller can retry immediately
|
|
|
|
* - -EFAULT when accessing invalid guest addresses
|
|
|
|
* - -ENOMEM if out of memory
|
|
|
|
*/
|
2016-01-28 00:24:03 +08:00
|
|
|
int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
|
|
|
|
unsigned long saddr)
|
2016-03-08 19:16:35 +08:00
|
|
|
{
|
|
|
|
union vaddress vaddr;
|
|
|
|
union page_table_entry pte;
|
|
|
|
unsigned long pgt;
|
2016-04-18 19:24:52 +08:00
|
|
|
int dat_protection, fake;
|
2016-03-08 19:16:35 +08:00
|
|
|
int rc;
|
|
|
|
|
2016-02-02 19:26:00 +08:00
|
|
|
down_read(&sg->mm->mmap_sem);
|
2016-01-28 00:24:03 +08:00
|
|
|
/*
|
|
|
|
* We don't want any guest-2 tables to change - so the parent
|
|
|
|
* tables/pointers we read stay valid - unshadowing is however
|
|
|
|
* always possible - only guest_table_lock protects us.
|
|
|
|
*/
|
|
|
|
ipte_lock(vcpu);
|
2016-02-02 19:26:00 +08:00
|
|
|
|
2016-04-18 19:24:52 +08:00
|
|
|
rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
|
2016-02-02 19:26:00 +08:00
|
|
|
if (rc)
|
2016-04-18 19:24:52 +08:00
|
|
|
rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
|
|
|
|
&fake);
|
2016-03-08 19:16:35 +08:00
|
|
|
|
|
|
|
vaddr.addr = saddr;
|
2016-04-18 19:24:52 +08:00
|
|
|
if (fake) {
|
|
|
|
/* offset in 1MB guest memory block */
|
|
|
|
pte.val = pgt + ((unsigned long) vaddr.px << 12UL);
|
|
|
|
goto shadow_page;
|
|
|
|
}
|
2016-02-02 19:26:00 +08:00
|
|
|
if (!rc)
|
|
|
|
rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
|
|
|
|
if (!rc && pte.i)
|
|
|
|
rc = PGM_PAGE_TRANSLATION;
|
2016-04-18 19:24:52 +08:00
|
|
|
if (!rc && (pte.z || (pte.co && sg->edat_level < 1)))
|
2016-02-02 19:26:00 +08:00
|
|
|
rc = PGM_TRANSLATION_SPEC;
|
2016-04-18 19:24:52 +08:00
|
|
|
shadow_page:
|
2016-04-18 23:19:59 +08:00
|
|
|
pte.p |= dat_protection;
|
2016-02-02 19:26:00 +08:00
|
|
|
if (!rc)
|
|
|
|
rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
|
2016-01-28 00:24:03 +08:00
|
|
|
ipte_unlock(vcpu);
|
2016-02-02 19:26:00 +08:00
|
|
|
up_read(&sg->mm->mmap_sem);
|
|
|
|
return rc;
|
2016-03-08 19:16:35 +08:00
|
|
|
}
|