summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-03-16 12:21:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-03-16 12:21:00 -0700
commit8a91ebb337fa68e01339a8c1c411a38d66eac80e (patch)
tree730e172117d8e38c109ca01ef42f657fee1b0401 /mm
parent2d1373e4246da3b58e1df058374ed6b101804e07 (diff)
parent182b9b3d8d1d36500f58e4f3dc82b144d6487bdf (diff)
Merge tag 'mm-hotfixes-stable-2026-03-16-12-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "6 hotfixes. 4 are cc:stable. 3 are for MM. All are singletons - please see the changelogs for details" * tag 'mm-hotfixes-stable-2026-03-16-12-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: MAINTAINERS: update email address for Ignat Korchagin mm/huge_memory: fix early failure try_to_migrate() when split huge pmd for shared THP mm/rmap: fix incorrect pte restoration for lazyfree folios mm/huge_memory: fix use of NULL folio in move_pages_huge_pmd() build_bug.h: correct function parameters names in kernel-doc crash_dump: don't log dm-crypt key bytes in read_key_from_user_keying
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/rmap.c21
2 files changed, 19 insertions, 5 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 912c248a3f7e..b298cba853ab 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2797,7 +2797,8 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
_dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
} else {
src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
- _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
+ _dst_pmd = move_soft_dirty_pmd(src_pmdval);
+ _dst_pmd = clear_uffd_wp_pmd(_dst_pmd);
}
set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
diff --git a/mm/rmap.c b/mm/rmap.c
index 0f00570d1b9e..391337282e3f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1955,7 +1955,14 @@ static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
if (userfaultfd_wp(vma))
return 1;
- return folio_pte_batch(folio, pvmw->pte, pte, max_nr);
+ /*
+ * If unmap fails, we need to restore the ptes. To avoid accidentally
+ * upgrading write permissions for ptes that were not originally
+ * writable, and to avoid losing the soft-dirty bit, use the
+ * appropriate FPB flags.
+ */
+ return folio_pte_batch_flags(folio, vma, pvmw->pte, &pte, max_nr,
+ FPB_RESPECT_WRITE | FPB_RESPECT_SOFT_DIRTY);
}
/*
@@ -2443,11 +2450,17 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
__maybe_unused pmd_t pmdval;
if (flags & TTU_SPLIT_HUGE_PMD) {
+ /*
+ * split_huge_pmd_locked() might leave the
+ * folio mapped through PTEs. Retry the walk
+ * so we can detect this scenario and properly
+ * abort the walk.
+ */
split_huge_pmd_locked(vma, pvmw.address,
pvmw.pmd, true);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- break;
+ flags &= ~TTU_SPLIT_HUGE_PMD;
+ page_vma_mapped_walk_restart(&pvmw);
+ continue;
}
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmdval = pmdp_get(pvmw.pmd);