From 6538b6221cc2feda415ca1946e66a5ef02dc6a0a Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Thu, 8 Jan 2026 15:46:18 -0600 Subject: KVM: guest_memfd: Remove partial hugepage handling from kvm_gmem_populate() kvm_gmem_populate(), and the associated post-populate callbacks, have some limited support for dealing with guests backed by hugepages by passing the order information along to each post-populate callback and iterating through the pages passed to kvm_gmem_populate() in hugepage-chunks. However, guest_memfd doesn't yet support hugepages, and in most cases additional changes in the kvm_gmem_populate() path would also be needed to actually allow for this functionality. This makes the existing code unnecessarily complex, and makes changes difficult to work through upstream due to theoretical impacts on hugepage support that can't be considered properly without an actual hugepage implementation to reference. So for now, remove what's there so changes for things like in-place conversion can be implemented/reviewed more efficiently. Suggested-by: Vishal Annapurve Co-developed-by: Vishal Annapurve Signed-off-by: Vishal Annapurve Tested-by: Vishal Annapurve Tested-by: Kai Huang Signed-off-by: Michael Roth Tested-by: Yan Zhao Reviewed-by: Yan Zhao Link: https://patch.msgid.link/20260108214622.1084057-3-michael.roth@amd.com [sean: check for !IS_ERR() before checking folio_order()] Signed-off-by: Sean Christopherson --- virt/kvm/guest_memfd.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) (limited to 'virt') diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index fdaea3422c30..24eb33c7948d 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -151,6 +151,15 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index) mapping_gfp_mask(inode->i_mapping), policy); mpol_cond_put(policy); + /* + * External interfaces like kvm_gmem_get_pfn() support dealing + * with hugepages to a degree, but internally, guest_memfd currently + * assumes that all folios are order-0 and handling would need + * to be updated for anything otherwise (e.g. page-clearing + * operations). + */ + WARN_ON_ONCE(!IS_ERR(folio) && folio_order(folio)); + return folio; } @@ -829,7 +838,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long struct kvm_memory_slot *slot; void __user *p; - int ret = 0, max_order; + int ret = 0; long i; lockdep_assert_held(&kvm->slots_lock); @@ -848,7 +857,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long filemap_invalidate_lock(file->f_mapping); npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages); - for (i = 0; i < npages; i += (1 << max_order)) { + for (i = 0; i < npages; i++) { struct folio *folio; gfn_t gfn = start_gfn + i; pgoff_t index = kvm_gmem_get_index(slot, gfn); @@ -860,7 +869,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long break; } - folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, &max_order); + folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, NULL); if (IS_ERR(folio)) { ret = PTR_ERR(folio); break; @@ -874,20 +883,15 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long } folio_unlock(folio); - WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) || - (npages - i) < (1 << max_order)); ret = -EINVAL; - while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order), - KVM_MEMORY_ATTRIBUTE_PRIVATE, - KVM_MEMORY_ATTRIBUTE_PRIVATE)) { - if (!max_order) - goto put_folio_and_exit; - max_order--; - } + if (!kvm_range_has_memory_attributes(kvm, gfn, gfn + 1, + KVM_MEMORY_ATTRIBUTE_PRIVATE, + KVM_MEMORY_ATTRIBUTE_PRIVATE)) + goto put_folio_and_exit; p = src ? src + i * PAGE_SIZE : NULL; - ret = post_populate(kvm, gfn, pfn, p, max_order, opaque); + ret = post_populate(kvm, gfn, pfn, p, opaque); if (!ret) kvm_gmem_mark_prepared(folio); -- cgit v1.2.3 From 8622ef05709fbc4903f54cdf1ac8c3725e479dd8 Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Thu, 8 Jan 2026 15:46:19 -0600 Subject: KVM: guest_memfd: Remove preparation tracking guest_memfd currently uses the folio uptodate flag to track: 1) whether or not a page has been cleared before initial usage 2) whether or not the architecture hooks have been issued to put the page in a private state as defined by the architecture In practice, (2) is only actually being tracked for SEV-SNP VMs, and there do not seem to be any plans/reasons that would suggest this will change in the future, so this additional tracking/complexity is not really providing any general benefit to guest_memfd users. On the other hand, future plans around in-place conversion and hugepage support will make the burden of tracking this information within guest_memfd even more complex. With in-place conversion and hugepage support, the plan is to use the per-folio uptodate flag purely to track the initial clearing of folios, whereas conversion operations could trigger multiple transitions between 'prepared' and 'unprepared' and thus need separate tracking. Since preparation generally happens during fault time, i.e. on the "read-side" of any VM-wide locks that might protect state tracked by guest_memfd, supporting concurrent handling of page faults would likely require more complex locking schemes if the "preparedness" state were tracked by guest_memfd, i.e. if it needs to be updated as part of handling the fault. Instead of keeping this current/future complexity within guest_memfd for what is essentially just SEV-SNP, just drop the tracking for (2) and have the arch-specific preparation hooks get triggered unconditionally on every fault so the arch-specific hooks can check the preparation state directly and decide whether or not a folio still needs additional preparation. In the case of SEV-SNP, the preparation state is already checked again via the preparation hooks to avoid double-preparation, so nothing extra needs to be done to update the handling of things there. Reviewed-by: Vishal Annapurve Tested-by: Vishal Annapurve Reviewed-by: Pankaj Gupta Tested-by: Kai Huang Signed-off-by: Michael Roth Link: https://patch.msgid.link/20260108214622.1084057-4-michael.roth@amd.com [sean: massage changelog] Signed-off-by: Sean Christopherson --- virt/kvm/guest_memfd.c | 44 ++++++++++++-------------------------------- 1 file changed, 12 insertions(+), 32 deletions(-) (limited to 'virt') diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index 24eb33c7948d..e90879322fd0 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -76,11 +76,6 @@ static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo return 0; } -static inline void kvm_gmem_mark_prepared(struct folio *folio) -{ - folio_mark_uptodate(folio); -} - /* * Process @folio, which contains @gfn, so that the guest can use it. * The folio must be locked and the gfn must be contained in @slot. @@ -90,13 +85,7 @@ static inline void kvm_gmem_mark_prepared(struct folio *folio) static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, struct folio *folio) { - unsigned long nr_pages, i; pgoff_t index; - int r; - - nr_pages = folio_nr_pages(folio); - for (i = 0; i < nr_pages; i++) - clear_highpage(folio_page(folio, i)); /* * Preparing huge folios should always be safe, since it should @@ -114,11 +103,8 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot, WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, folio_nr_pages(folio))); index = kvm_gmem_get_index(slot, gfn); index = ALIGN_DOWN(index, folio_nr_pages(folio)); - r = __kvm_gmem_prepare_folio(kvm, slot, index, folio); - if (!r) - kvm_gmem_mark_prepared(folio); - return r; + return __kvm_gmem_prepare_folio(kvm, slot, index, folio); } /* @@ -429,7 +415,7 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) if (!folio_test_uptodate(folio)) { clear_highpage(folio_page(folio, 0)); - kvm_gmem_mark_prepared(folio); + folio_mark_uptodate(folio); } vmf->page = folio_file_page(folio, vmf->pgoff); @@ -766,7 +752,7 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot) static struct folio *__kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot, pgoff_t index, kvm_pfn_t *pfn, - bool *is_prepared, int *max_order) + int *max_order) { struct file *slot_file = READ_ONCE(slot->gmem.file); struct gmem_file *f = file->private_data; @@ -796,7 +782,6 @@ static struct folio *__kvm_gmem_get_pfn(struct file *file, if (max_order) *max_order = 0; - *is_prepared = folio_test_uptodate(folio); return folio; } @@ -806,19 +791,22 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, { pgoff_t index = kvm_gmem_get_index(slot, gfn); struct folio *folio; - bool is_prepared = false; int r = 0; CLASS(gmem_get_file, file)(slot); if (!file) return -EFAULT; - folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &is_prepared, max_order); + folio = __kvm_gmem_get_pfn(file, slot, index, pfn, max_order); if (IS_ERR(folio)) return PTR_ERR(folio); - if (!is_prepared) - r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio); + if (!folio_test_uptodate(folio)) { + clear_highpage(folio_page(folio, 0)); + folio_mark_uptodate(folio); + } + + r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio); folio_unlock(folio); @@ -861,7 +849,6 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long struct folio *folio; gfn_t gfn = start_gfn + i; pgoff_t index = kvm_gmem_get_index(slot, gfn); - bool is_prepared = false; kvm_pfn_t pfn; if (signal_pending(current)) { @@ -869,19 +856,12 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long break; } - folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, &is_prepared, NULL); + folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, NULL); if (IS_ERR(folio)) { ret = PTR_ERR(folio); break; } - if (is_prepared) { - folio_unlock(folio); - folio_put(folio); - ret = -EEXIST; - break; - } - folio_unlock(folio); ret = -EINVAL; @@ -893,7 +873,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long p = src ? src + i * PAGE_SIZE : NULL; ret = post_populate(kvm, gfn, pfn, p, opaque); if (!ret) - kvm_gmem_mark_prepared(folio); + folio_mark_uptodate(folio); put_folio_and_exit: folio_put(folio); -- cgit v1.2.3 From 2a62345b30529e488beb6a1220577b3495933724 Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Thu, 8 Jan 2026 15:46:22 -0600 Subject: KVM: guest_memfd: GUP source pages prior to populating guest memory Currently the post-populate callbacks handle copying source pages into private GPA ranges backed by guest_memfd, where kvm_gmem_populate() acquires the filemap invalidate lock, then calls a post-populate callback which may issue a get_user_pages() on the source pages prior to copying them into the private GPA (e.g. TDX). This will not be compatible with in-place conversion, where the userspace page fault path will attempt to acquire the filemap invalidate lock while holding the mm->mmap_lock, leading to a potential ABBA deadlock. Address this by hoisting the GUP above the filemap invalidate lock so that these page faults path can be taken early, prior to acquiring the filemap invalidate lock. It's not currently clear whether this issue is reachable with the current implementation of guest_memfd, which doesn't support in-place conversion, however it does provide a consistent mechanism to provide stable source/target PFNs to callbacks rather than punting to vendor-specific code, which allows for more commonality across architectures, which may be worthwhile even without in-place conversion. As part of this change, also begin enforcing that the 'src' argument to kvm_gmem_populate() must be page-aligned, as this greatly reduces the complexity around how the post-populate callbacks are implemented, and since no current in-tree users support using a non-page-aligned 'src' argument. Suggested-by: Sean Christopherson Co-developed-by: Sean Christopherson Co-developed-by: Vishal Annapurve Signed-off-by: Vishal Annapurve Tested-by: Vishal Annapurve Tested-by: Kai Huang Signed-off-by: Michael Roth Tested-by: Yan Zhao Reviewed-by: Yan Zhao Link: https://patch.msgid.link/20260108214622.1084057-7-michael.roth@amd.com [sean: avoid local "p" variable] Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/sev.c | 33 +++++++++---------- arch/x86/kvm/vmx/tdx.c | 16 ++-------- include/linux/kvm_host.h | 4 +-- virt/kvm/guest_memfd.c | 83 ++++++++++++++++++++++++++++++++---------------- 4 files changed, 78 insertions(+), 58 deletions(-) (limited to 'virt') diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index b4409bc652d1..0ab7c89262fb 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2268,7 +2268,7 @@ struct sev_gmem_populate_args { }; static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, - void __user *src, void *opaque) + struct page *src_page, void *opaque) { struct sev_gmem_populate_args *sev_populate_args = opaque; struct sev_data_snp_launch_update fw_args = {0}; @@ -2277,7 +2277,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int level; int ret; - if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src)) + if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src_page)) return -EINVAL; ret = snp_lookup_rmpentry((u64)pfn, &assigned, &level); @@ -2288,15 +2288,14 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, goto out; } - if (src) { - void *vaddr = kmap_local_pfn(pfn); + if (src_page) { + void *src_vaddr = kmap_local_page(src_page); + void *dst_vaddr = kmap_local_pfn(pfn); - if (copy_from_user(vaddr, src, PAGE_SIZE)) { - kunmap_local(vaddr); - ret = -EFAULT; - goto out; - } - kunmap_local(vaddr); + memcpy(dst_vaddr, src_vaddr, PAGE_SIZE); + + kunmap_local(src_vaddr); + kunmap_local(dst_vaddr); } ret = rmp_make_private(pfn, gfn << PAGE_SHIFT, PG_LEVEL_4K, @@ -2326,17 +2325,19 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, if (ret && !snp_page_reclaim(kvm, pfn) && sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID && sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) { - void *vaddr = kmap_local_pfn(pfn); + void *src_vaddr = kmap_local_page(src_page); + void *dst_vaddr = kmap_local_pfn(pfn); - if (copy_to_user(src, vaddr, PAGE_SIZE)) - pr_debug("Failed to write CPUID page back to userspace\n"); + memcpy(src_vaddr, dst_vaddr, PAGE_SIZE); - kunmap_local(vaddr); + kunmap_local(src_vaddr); + kunmap_local(dst_vaddr); } out: - pr_debug("%s: exiting with return code %d (fw_error %d)\n", - __func__, ret, sev_populate_args->fw_error); + if (ret) + pr_debug("%s: error updating GFN %llx, return code %d (fw_error %d)\n", + __func__, gfn, ret, sev_populate_args->fw_error); return ret; } diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 4fb042ce8ed1..5df9d32d2058 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -3118,34 +3118,24 @@ struct tdx_gmem_post_populate_arg { }; static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, - void __user *src, void *_arg) + struct page *src_page, void *_arg) { struct tdx_gmem_post_populate_arg *arg = _arg; struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm); u64 err, entry, level_state; gpa_t gpa = gfn_to_gpa(gfn); - struct page *src_page; int ret, i; if (KVM_BUG_ON(kvm_tdx->page_add_src, kvm)) return -EIO; - /* - * Get the source page if it has been faulted in. Return failure if the - * source page has been swapped out or unmapped in primary memory. - */ - ret = get_user_pages_fast((unsigned long)src, 1, 0, &src_page); - if (ret < 0) - return ret; - if (ret != 1) - return -ENOMEM; + if (!src_page) + return -EOPNOTSUPP; kvm_tdx->page_add_src = src_page; ret = kvm_tdp_mmu_map_private_pfn(arg->vcpu, gfn, pfn); kvm_tdx->page_add_src = NULL; - put_page(src_page); - if (ret || !(arg->flags & KVM_TDX_MEASURE_MEMORY_REGION)) return ret; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1d0cee72e560..49c0cfe24fd8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2566,7 +2566,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord * @gfn: starting GFN to be populated * @src: userspace-provided buffer containing data to copy into GFN range * (passed to @post_populate, and incremented on each iteration - * if not NULL) + * if not NULL). Must be page-aligned. * @npages: number of pages to copy from userspace-buffer * @post_populate: callback to issue for each gmem page that backs the GPA * range @@ -2581,7 +2581,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord * Returns the number of pages that were populated. */ typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, - void __user *src, void *opaque); + struct page *page, void *opaque); long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages, kvm_gmem_populate_cb post_populate, void *opaque); diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index e90879322fd0..923c51a3a525 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -820,12 +820,48 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_get_pfn); #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE + +static long __kvm_gmem_populate(struct kvm *kvm, struct kvm_memory_slot *slot, + struct file *file, gfn_t gfn, struct page *src_page, + kvm_gmem_populate_cb post_populate, void *opaque) +{ + pgoff_t index = kvm_gmem_get_index(slot, gfn); + struct folio *folio; + kvm_pfn_t pfn; + int ret; + + filemap_invalidate_lock(file->f_mapping); + + folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, NULL); + if (IS_ERR(folio)) { + ret = PTR_ERR(folio); + goto out_unlock; + } + + folio_unlock(folio); + + if (!kvm_range_has_memory_attributes(kvm, gfn, gfn + 1, + KVM_MEMORY_ATTRIBUTE_PRIVATE, + KVM_MEMORY_ATTRIBUTE_PRIVATE)) { + ret = -EINVAL; + goto out_put_folio; + } + + ret = post_populate(kvm, gfn, pfn, src_page, opaque); + if (!ret) + folio_mark_uptodate(folio); + +out_put_folio: + folio_put(folio); +out_unlock: + filemap_invalidate_unlock(file->f_mapping); + return ret; +} + long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages, kvm_gmem_populate_cb post_populate, void *opaque) { struct kvm_memory_slot *slot; - void __user *p; - int ret = 0; long i; @@ -834,6 +870,9 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long if (WARN_ON_ONCE(npages <= 0)) return -EINVAL; + if (WARN_ON_ONCE(!PAGE_ALIGNED(src))) + return -EINVAL; + slot = gfn_to_memslot(kvm, start_gfn); if (!kvm_slot_has_gmem(slot)) return -EINVAL; @@ -842,47 +881,37 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long if (!file) return -EFAULT; - filemap_invalidate_lock(file->f_mapping); - npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages); for (i = 0; i < npages; i++) { - struct folio *folio; - gfn_t gfn = start_gfn + i; - pgoff_t index = kvm_gmem_get_index(slot, gfn); - kvm_pfn_t pfn; + struct page *src_page = NULL; if (signal_pending(current)) { ret = -EINTR; break; } - folio = __kvm_gmem_get_pfn(file, slot, index, &pfn, NULL); - if (IS_ERR(folio)) { - ret = PTR_ERR(folio); - break; + if (src) { + unsigned long uaddr = (unsigned long)src + i * PAGE_SIZE; + + ret = get_user_pages_fast(uaddr, 1, 0, &src_page); + if (ret < 0) + break; + if (ret != 1) { + ret = -ENOMEM; + break; + } } - folio_unlock(folio); + ret = __kvm_gmem_populate(kvm, slot, file, start_gfn + i, src_page, + post_populate, opaque); - ret = -EINVAL; - if (!kvm_range_has_memory_attributes(kvm, gfn, gfn + 1, - KVM_MEMORY_ATTRIBUTE_PRIVATE, - KVM_MEMORY_ATTRIBUTE_PRIVATE)) - goto put_folio_and_exit; + if (src_page) + put_page(src_page); - p = src ? src + i * PAGE_SIZE : NULL; - ret = post_populate(kvm, gfn, pfn, p, opaque); - if (!ret) - folio_mark_uptodate(folio); - -put_folio_and_exit: - folio_put(folio); if (ret) break; } - filemap_invalidate_unlock(file->f_mapping); - return ret && !i ? ret : i; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate); -- cgit v1.2.3