summaryrefslogtreecommitdiff
path: root/arch/x86/hyperv/hv_crash.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/hyperv/hv_crash.c')
-rw-r--r--arch/x86/hyperv/hv_crash.c100
1 files changed, 52 insertions, 48 deletions
diff --git a/arch/x86/hyperv/hv_crash.c b/arch/x86/hyperv/hv_crash.c
index 92da1b4f2e73..5ffcc23255de 100644
--- a/arch/x86/hyperv/hv_crash.c
+++ b/arch/x86/hyperv/hv_crash.c
@@ -107,14 +107,12 @@ static void __noreturn hv_panic_timeout_reboot(void)
cpu_relax();
}
-/* This cannot be inlined as it needs stack */
-static noinline __noclone void hv_crash_restore_tss(void)
+static void hv_crash_restore_tss(void)
{
load_TR_desc();
}
-/* This cannot be inlined as it needs stack */
-static noinline void hv_crash_clear_kernpt(void)
+static void hv_crash_clear_kernpt(void)
{
pgd_t *pgd;
p4d_t *p4d;
@@ -125,6 +123,25 @@ static noinline void hv_crash_clear_kernpt(void)
native_p4d_clear(p4d);
}
+
+static void __noreturn hv_crash_handle(void)
+{
+ hv_crash_restore_tss();
+ hv_crash_clear_kernpt();
+
+ /* we are now fully in devirtualized normal kernel mode */
+ __crash_kexec(NULL);
+
+ hv_panic_timeout_reboot();
+}
+
+/*
+ * __naked functions do not permit function calls, not even to __always_inline
+ * functions that only contain asm() blocks themselves. So use a macro instead.
+ */
+#define hv_wrmsr(msr, val) \
+ asm volatile("wrmsr" :: "c"(msr), "a"((u32)val), "d"((u32)(val >> 32)) : "memory")
+
/*
* This is the C entry point from the asm glue code after the disable hypercall.
* We enter here in IA32-e long mode, ie, full 64bit mode running on kernel
@@ -133,51 +150,38 @@ static noinline void hv_crash_clear_kernpt(void)
* available. We restore kernel GDT, and rest of the context, and continue
* to kexec.
*/
-static asmlinkage void __noreturn hv_crash_c_entry(void)
+static void __naked hv_crash_c_entry(void)
{
- struct hv_crash_ctxt *ctxt = &hv_crash_ctxt;
-
/* first thing, restore kernel gdt */
- native_load_gdt(&ctxt->gdtr);
+ asm volatile("lgdt %0" : : "m" (hv_crash_ctxt.gdtr));
- asm volatile("movw %%ax, %%ss" : : "a"(ctxt->ss));
- asm volatile("movq %0, %%rsp" : : "m"(ctxt->rsp));
+ asm volatile("movw %0, %%ss\n\t"
+ "movq %1, %%rsp"
+ :: "m"(hv_crash_ctxt.ss), "m"(hv_crash_ctxt.rsp));
- asm volatile("movw %%ax, %%ds" : : "a"(ctxt->ds));
- asm volatile("movw %%ax, %%es" : : "a"(ctxt->es));
- asm volatile("movw %%ax, %%fs" : : "a"(ctxt->fs));
- asm volatile("movw %%ax, %%gs" : : "a"(ctxt->gs));
+ asm volatile("movw %0, %%ds" : : "m"(hv_crash_ctxt.ds));
+ asm volatile("movw %0, %%es" : : "m"(hv_crash_ctxt.es));
+ asm volatile("movw %0, %%fs" : : "m"(hv_crash_ctxt.fs));
+ asm volatile("movw %0, %%gs" : : "m"(hv_crash_ctxt.gs));
- native_wrmsrq(MSR_IA32_CR_PAT, ctxt->pat);
- asm volatile("movq %0, %%cr0" : : "r"(ctxt->cr0));
+ hv_wrmsr(MSR_IA32_CR_PAT, hv_crash_ctxt.pat);
+ asm volatile("movq %0, %%cr0" : : "r"(hv_crash_ctxt.cr0));
- asm volatile("movq %0, %%cr8" : : "r"(ctxt->cr8));
- asm volatile("movq %0, %%cr4" : : "r"(ctxt->cr4));
- asm volatile("movq %0, %%cr2" : : "r"(ctxt->cr4));
+ asm volatile("movq %0, %%cr8" : : "r"(hv_crash_ctxt.cr8));
+ asm volatile("movq %0, %%cr4" : : "r"(hv_crash_ctxt.cr4));
+ asm volatile("movq %0, %%cr2" : : "r"(hv_crash_ctxt.cr2));
- native_load_idt(&ctxt->idtr);
- native_wrmsrq(MSR_GS_BASE, ctxt->gsbase);
- native_wrmsrq(MSR_EFER, ctxt->efer);
+ asm volatile("lidt %0" : : "m" (hv_crash_ctxt.idtr));
+ hv_wrmsr(MSR_GS_BASE, hv_crash_ctxt.gsbase);
+ hv_wrmsr(MSR_EFER, hv_crash_ctxt.efer);
/* restore the original kernel CS now via far return */
- asm volatile("movzwq %0, %%rax\n\t"
- "pushq %%rax\n\t"
- "pushq $1f\n\t"
- "lretq\n\t"
- "1:nop\n\t" : : "m"(ctxt->cs) : "rax");
-
- /* We are in asmlinkage without stack frame, hence make C function
- * calls which will buy stack frames.
- */
- hv_crash_restore_tss();
- hv_crash_clear_kernpt();
-
- /* we are now fully in devirtualized normal kernel mode */
- __crash_kexec(NULL);
-
- hv_panic_timeout_reboot();
+ asm volatile("pushq %q0\n\t"
+ "pushq %q1\n\t"
+ "lretq"
+ :: "r"(hv_crash_ctxt.cs), "r"(hv_crash_handle));
}
-/* Tell gcc we are using lretq long jump in the above function intentionally */
+/* Tell objtool we are using lretq long jump in the above function intentionally */
STACK_FRAME_NON_STANDARD(hv_crash_c_entry);
static void hv_mark_tss_not_busy(void)
@@ -195,20 +199,20 @@ static void hv_hvcrash_ctxt_save(void)
{
struct hv_crash_ctxt *ctxt = &hv_crash_ctxt;
- asm volatile("movq %%rsp,%0" : "=m"(ctxt->rsp));
+ ctxt->rsp = current_stack_pointer;
ctxt->cr0 = native_read_cr0();
ctxt->cr4 = native_read_cr4();
- asm volatile("movq %%cr2, %0" : "=a"(ctxt->cr2));
- asm volatile("movq %%cr8, %0" : "=a"(ctxt->cr8));
+ asm volatile("movq %%cr2, %0" : "=r"(ctxt->cr2));
+ asm volatile("movq %%cr8, %0" : "=r"(ctxt->cr8));
- asm volatile("movl %%cs, %%eax" : "=a"(ctxt->cs));
- asm volatile("movl %%ss, %%eax" : "=a"(ctxt->ss));
- asm volatile("movl %%ds, %%eax" : "=a"(ctxt->ds));
- asm volatile("movl %%es, %%eax" : "=a"(ctxt->es));
- asm volatile("movl %%fs, %%eax" : "=a"(ctxt->fs));
- asm volatile("movl %%gs, %%eax" : "=a"(ctxt->gs));
+ asm volatile("movw %%cs, %0" : "=m"(ctxt->cs));
+ asm volatile("movw %%ss, %0" : "=m"(ctxt->ss));
+ asm volatile("movw %%ds, %0" : "=m"(ctxt->ds));
+ asm volatile("movw %%es, %0" : "=m"(ctxt->es));
+ asm volatile("movw %%fs, %0" : "=m"(ctxt->fs));
+ asm volatile("movw %%gs, %0" : "=m"(ctxt->gs));
native_store_gdt(&ctxt->gdtr);
store_idt(&ctxt->idtr);