summaryrefslogtreecommitdiff
path: root/mm/pagewalk.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/pagewalk.c')
-rw-r--r--mm/pagewalk.c25
1 files changed, 22 insertions, 3 deletions
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index a94c401ab2cf..4e7bcd975c54 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -97,6 +97,7 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
+ pud_t pudval = pudp_get(pud);
pmd_t *pmd;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
@@ -105,6 +106,24 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
int err = 0;
int depth = real_depth(3);
+ /*
+ * For PTE handling, pte_offset_map_lock() takes care of checking
+ * whether there actually is a page table. But it also has to be
+ * very careful about concurrent page table reclaim.
+ *
+ * Similarly, we have to be careful here - a PUD entry that points
+ * to a PMD table cannot go away, so we can just walk it. But if
+ * it's something else, we need to ensure we didn't race something,
+ * so need to retry.
+ *
+ * A pertinent example of this is a PUD refault after PUD split -
+ * we will need to split again or risk accessing invalid memory.
+ */
+ if (!pud_present(pudval) || pud_leaf(pudval)) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
+
pmd = pmd_offset(pud, addr);
do {
again:
@@ -218,12 +237,12 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
else if (pud_leaf(*pud) || !pud_present(*pud))
continue; /* Nothing to do. */
- if (pud_none(*pud))
- goto again;
-
err = walk_pmd_range(pud, addr, next, walk);
if (err)
break;
+
+ if (walk->action == ACTION_AGAIN)
+ goto again;
} while (pud++, addr = next, addr != end);
return err;