summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/kfence.h
blob: dfd5c74ba41a2f34499679dadfed2ca9595806c5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * x86 KFENCE support.
 *
 * Copyright (C) 2020, Google LLC.
 */

#ifndef _ASM_X86_KFENCE_H
#define _ASM_X86_KFENCE_H

#ifndef MODULE

#include <linux/bug.h>
#include <linux/kfence.h>

#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>

/* Force 4K pages for __kfence_pool. */
static inline bool arch_kfence_init_pool(void)
{
	unsigned long addr;

	for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
	     addr += PAGE_SIZE) {
		unsigned int level;

		if (!lookup_address(addr, &level))
			return false;

		if (level != PG_LEVEL_4K)
			set_memory_4k(addr, 1);
	}

	return true;
}

/* Protect the given page and flush TLB. */
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
	unsigned int level;
	pte_t *pte = lookup_address(addr, &level);
	pteval_t val, new;

	if (WARN_ON(!pte || level != PG_LEVEL_4K))
		return false;

	val = pte_val(*pte);

	/*
	 * protect requires making the page not-present.  If the PTE is
	 * already in the right state, there's nothing to do.
	 */
	if (protect != !!(val & _PAGE_PRESENT))
		return true;

	/*
	 * Otherwise, flip the Present bit, taking care to avoid writing an
	 * L1TF-vulnerable PTE (not present, without the high address bits
	 * set).
	 */
	new = val ^ _PAGE_PRESENT;
	set_pte(pte, __pte(flip_protnone_guard(val, new, PTE_PFN_MASK)));

	/*
	 * If the page was protected (non-present) and we're making it
	 * present, there is no need to flush the TLB at all.
	 */
	if (!protect)
		return true;

	/*
	 * We need to avoid IPIs, as we may get KFENCE allocations or faults
	 * with interrupts disabled. Therefore, the below is best-effort, and
	 * does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
	 * lazy fault handling takes care of faults after the page is PRESENT.
	 */

	/*
	 * Flush this CPU's TLB, assuming whoever did the allocation/free is
	 * likely to continue running on this CPU.
	 */
	preempt_disable();
	flush_tlb_one_kernel(addr);
	preempt_enable();
	return true;
}

#endif /* !MODULE */

#endif /* _ASM_X86_KFENCE_H */