From mboxrd@z Thu Jan 1 00:00:00 1970 From: Janosch Frank Subject: [RFC/PATCH v3 13/16] s390/mm: Add huge pmd storage key handling Date: Fri, 9 Feb 2018 10:34:21 +0100 Message-ID: <1518168864-147803-14-git-send-email-frankja@linux.vnet.ibm.com> References: <1518168864-147803-1-git-send-email-frankja@linux.vnet.ibm.com> Cc: schwidefsky@de.ibm.com, borntraeger@de.ibm.com, david@redhat.com, dominik.dingel@gmail.com, linux-s390@vger.kernel.org To: kvm@vger.kernel.org Return-path: Received: from mx0b-001b2d01.pphosted.com ([148.163.158.5]:39224 "EHLO mx0a-001b2d01.pphosted.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1751007AbeBIJhp (ORCPT ); Fri, 9 Feb 2018 04:37:45 -0500 Received: from pps.filterd (m0098416.ppops.net [127.0.0.1]) by mx0b-001b2d01.pphosted.com (8.16.0.22/8.16.0.22) with SMTP id w199YJMg040819 for ; Fri, 9 Feb 2018 04:37:44 -0500 Received: from e06smtp10.uk.ibm.com (e06smtp10.uk.ibm.com [195.75.94.106]) by mx0b-001b2d01.pphosted.com with ESMTP id 2g16tq5vq2-1 (version=TLSv1.2 cipher=AES256-SHA bits=256 verify=NOT) for ; Fri, 09 Feb 2018 04:37:44 -0500 Received: from localhost by e06smtp10.uk.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Fri, 9 Feb 2018 09:37:42 -0000 In-Reply-To: <1518168864-147803-1-git-send-email-frankja@linux.vnet.ibm.com> Sender: kvm-owner@vger.kernel.org List-ID: Storage keys for guests with huge page mappings have to directly set the key in hardware. There are no PGSTEs for PMDs that we could use to retain the guests's logical view of the key. Signed-off-by: Janosch Frank --- arch/s390/mm/pgtable.c | 104 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 98 insertions(+), 6 deletions(-) diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 497fefe..871fc65 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -811,12 +811,45 @@ EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty); int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned char key, bool nq) { - unsigned long keyul; + unsigned long keyul, address; spinlock_t *ptl; pgste_t old, new; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; pte_t *ptep; - ptep = get_locked_pte(mm, addr, &ptl); + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return -EFAULT; + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return -EFAULT; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return -EFAULT; + + ptl = pmd_lock(mm, pmd); + if (!pmd_present(*pmd)) { + spin_unlock(ptl); + return -EFAULT; + } + if (pmd_large(*pmd)) { + address = pmd_val(*pmd) & HPAGE_MASK; + address |= addr & ~HPAGE_MASK; + /* + * Huge pmds need quiescing operations, they are + * always mapped. + */ + page_set_storage_key(address, key, 1); + spin_unlock(ptl); + return 0; + } + spin_unlock(ptl); + + ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (unlikely(!ptep)) return -EFAULT; @@ -827,7 +860,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; if (!(pte_val(*ptep) & _PAGE_INVALID)) { - unsigned long address, bits, skey; + unsigned long bits, skey; address = pte_val(*ptep) & PAGE_MASK; skey = (unsigned long) page_get_storage_key(address); @@ -890,14 +923,43 @@ EXPORT_SYMBOL(cond_set_guest_storage_key); int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) { spinlock_t *ptl; + unsigned long address; pgste_t old, new; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; pte_t *ptep; int cc = 0; - ptep = get_locked_pte(mm, addr, &ptl); - if (unlikely(!ptep)) + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return -EFAULT; + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return -EFAULT; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) return -EFAULT; + ptl = pmd_lock(mm, pmd); + if (!pmd_present(*pmd)) { + spin_unlock(ptl); + return -EFAULT; + } + if (pmd_large(*pmd)) { + address = pmd_val(*pmd) & HPAGE_MASK; + address |= addr & ~HPAGE_MASK; + cc = page_reset_referenced(addr); + spin_unlock(ptl); + return cc; + } + spin_unlock(ptl); + + ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl); + if (unlikely(!ptep)) + return -EFAULT; new = old = pgste_get_lock(ptep); /* Reset guest reference bit only */ pgste_val(new) &= ~PGSTE_GR_BIT; @@ -922,11 +984,41 @@ EXPORT_SYMBOL(reset_guest_reference_bit); int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned char *key) { + unsigned long address; spinlock_t *ptl; pgste_t pgste; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; pte_t *ptep; - ptep = get_locked_pte(mm, addr, &ptl); + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return -EFAULT; + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return -EFAULT; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return -EFAULT; + + ptl = pmd_lock(mm, pmd); + if (!pmd_present(*pmd)) { + spin_unlock(ptl); + return -EFAULT; + } + if (pmd_large(*pmd)) { + address = pmd_val(*pmd) & HPAGE_MASK; + address |= addr & ~HPAGE_MASK; + *key = page_get_storage_key(address); + spin_unlock(ptl); + return 0; + } + spin_unlock(ptl); + + ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (unlikely(!ptep)) return -EFAULT; -- 2.7.4