public inbox for linux-next@vger.kernel.org
 help / color / mirror / Atom feed
* linux-next: manual merge of the kvms390 tree with the origin tree
@ 2026-04-08 15:31 Mark Brown
  0 siblings, 0 replies; 2+ messages in thread
From: Mark Brown @ 2026-04-08 15:31 UTC (permalink / raw)
  To: Christian Borntraeger, Janosch Frank
  Cc: Christian Borntraeger, Claudio Imbrenda,
	Linux Kernel Mailing List, Linux Next Mailing List

Hi all,

Today's linux-next merge of the kvms390 tree got a conflict in:

  tools/testing/selftests/kvm/Makefile.kvm

between commit:

  0c6294d98a6df ("KVM: s390: selftests: Add IRQ routing address offset tests")

from the origin tree and commit:

  857e92662c075 ("KVM: s390: selftests: enable some common memory-related tests")

from the kvms390 tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

I note that it's traditional to sort these lists to minimise
conflicts...

diff --cc tools/testing/selftests/kvm/Makefile.kvm
index 7c2713967c699,057f17d6b896d..0000000000000
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@@ -207,7 -203,7 +205,8 @@@ TEST_GEN_PROGS_s390 += s390/ucontrol_te
  TEST_GEN_PROGS_s390 += s390/user_operexec
  TEST_GEN_PROGS_s390 += s390/keyop
  TEST_GEN_PROGS_s390 += rseq_test
 +TEST_GEN_PROGS_s390 += s390/irq_routing
+ TEST_GEN_PROGS_s390 += mmu_stress_test
  
  TEST_GEN_PROGS_riscv = $(TEST_GEN_PROGS_COMMON)
  TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test

^ permalink raw reply	[flat|nested] 2+ messages in thread
* linux-next: manual merge of the kvms390 tree with the origin tree
@ 2026-04-08 15:31 Mark Brown
  0 siblings, 0 replies; 2+ messages in thread
From: Mark Brown @ 2026-04-08 15:31 UTC (permalink / raw)
  To: Christian Borntraeger, Janosch Frank
  Cc: Claudio Imbrenda, Linux Kernel Mailing List,
	Linux Next Mailing List

[-- Attachment #1: Type: text/plain, Size: 4775 bytes --]

Hi all,

Today's linux-next merge of the kvms390 tree got a conflict in:

  arch/s390/kvm/gmap.c

between commit:

  45921d0212d4a ("KVM: s390: Fix gmap_link()")

from the origin tree and commit:

  4204067f99820 ("KVM: s390: Add alignment checks for hugepages")

from the kvms390 tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

diff --cc arch/s390/kvm/gmap.c
index 57802cb8af8e4,e3c1b070a11dc..0000000000000
--- a/arch/s390/kvm/gmap.c
+++ b/arch/s390/kvm/gmap.c
@@@ -623,78 -619,48 +624,97 @@@ static inline bool gmap_2g_allowed(stru
  	return false;
  }
  
- static inline bool gmap_1m_allowed(struct gmap *gmap, gfn_t gfn)
+ /**
+  * gmap_1m_allowed() - Check whether a 1M hugepage is allowed.
+  * @gmap: The gmap of the guest.
+  * @f: Describes the fault that is being resolved.
+  * @slot: The memslot the faulting address belongs to.
+  *
+  * The function checks whether the GMAP_FLAG_ALLOW_HPAGE_1M flag is set for
+  * @gmap, whether the offset of the address in the 1M virtual frame is the
+  * same as the offset in the physical 1M frame, and finally whether the whole
+  * 1M page would fit in the given memslot.
+  *
+  * Return: true if a 1M hugepage is allowed to back the faulting address, false
+  *         otherwise.
+  */
+ static inline bool gmap_1m_allowed(struct gmap *gmap, struct guest_fault *f,
+ 				   struct kvm_memory_slot *slot)
  {
- 	return test_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &gmap->flags);
+ 	return test_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &gmap->flags) &&
+ 	       !((f->gfn ^ f->pfn) & ~_SEGMENT_FR_MASK) &&
+ 	       slot->base_gfn <= ALIGN_DOWN(f->gfn, _PAGES_PER_SEGMENT) &&
+ 	       slot->base_gfn + slot->npages >= ALIGN(f->gfn + 1, _PAGES_PER_SEGMENT);
  }
  
 +static int _gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, int level,
 +		      struct guest_fault *f)
 +{
 +	union crste oldval, newval;
 +	union pte newpte, oldpte;
 +	union pgste pgste;
 +	int rc = 0;
 +
 +	rc = dat_entry_walk(mc, f->gfn, gmap->asce, DAT_WALK_ALLOC_CONTINUE, level,
 +			    &f->crstep, &f->ptep);
 +	if (rc == -ENOMEM)
 +		return rc;
 +	if (KVM_BUG_ON(rc == -EINVAL, gmap->kvm))
 +		return rc;
 +	if (rc)
 +		return -EAGAIN;
 +	if (KVM_BUG_ON(get_level(f->crstep, f->ptep) > level, gmap->kvm))
 +		return -EINVAL;
 +
 +	if (f->ptep) {
 +		pgste = pgste_get_lock(f->ptep);
 +		oldpte = *f->ptep;
 +		newpte = _pte(f->pfn, f->writable, f->write_attempt | oldpte.s.d, !f->page);
 +		newpte.s.sd = oldpte.s.sd;
 +		oldpte.s.sd = 0;
 +		if (oldpte.val == _PTE_EMPTY.val || oldpte.h.pfra == f->pfn) {
 +			pgste = gmap_ptep_xchg(gmap, f->ptep, newpte, pgste, f->gfn);
 +			if (f->callback)
 +				f->callback(f);
 +		} else {
 +			rc = -EAGAIN;
 +		}
 +		pgste_set_unlock(f->ptep, pgste);
 +	} else {
 +		do {
 +			oldval = READ_ONCE(*f->crstep);
 +			newval = _crste_fc1(f->pfn, oldval.h.tt, f->writable,
 +					    f->write_attempt | oldval.s.fc1.d);
 +			newval.s.fc1.s = !f->page;
 +			newval.s.fc1.sd = oldval.s.fc1.sd;
 +			if (oldval.val != _CRSTE_EMPTY(oldval.h.tt).val &&
 +			    crste_origin_large(oldval) != crste_origin_large(newval))
 +				return -EAGAIN;
 +		} while (!gmap_crstep_xchg_atomic(gmap, f->crstep, oldval, newval, f->gfn));
 +		if (f->callback)
 +			f->callback(f);
 +	}
 +
 +	return rc;
 +}
 +
- int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *f)
+ int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *f,
+ 	      struct kvm_memory_slot *slot)
  {
  	unsigned int order;
 -	int rc, level;
 +	int level;
  
  	lockdep_assert_held(&gmap->kvm->mmu_lock);
  
  	level = TABLE_TYPE_PAGE_TABLE;
  	if (f->page) {
  		order = folio_order(page_folio(f->page));
- 		if (order >= get_order(_REGION3_SIZE) && gmap_2g_allowed(gmap, f->gfn))
+ 		if (order >= get_order(_REGION3_SIZE) && gmap_2g_allowed(gmap, f, slot))
  			level = TABLE_TYPE_REGION3;
- 		else if (order >= get_order(_SEGMENT_SIZE) && gmap_1m_allowed(gmap, f->gfn))
+ 		else if (order >= get_order(_SEGMENT_SIZE) && gmap_1m_allowed(gmap, f, slot))
  			level = TABLE_TYPE_SEGMENT;
  	}
 -	rc = dat_link(mc, gmap->asce, level, uses_skeys(gmap), f);
 -	KVM_BUG_ON(rc == -EINVAL, gmap->kvm);
 -	return rc;
 +	return _gmap_link(mc, gmap, level, f);
  }
  
  static int gmap_ucas_map_one(struct kvm_s390_mmu_cache *mc, struct gmap *gmap,

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2026-04-08 15:31 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-08 15:31 linux-next: manual merge of the kvms390 tree with the origin tree Mark Brown
  -- strict thread matches above, loose matches on Subject: below --
2026-04-08 15:31 Mark Brown

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox