aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Fleming <matt.fleming@intel.com>2013-06-24 20:09:58 +0100
committerMatt Fleming <matt.fleming@intel.com>2013-06-25 16:27:14 +0100
commitc07dbea2b90c97839edde5a32e80d3d9d47f8dc4 (patch)
tree1552a2f51b68c15ac21f98ceea545a4474c244a4
parent042d0e52f7debe5bdf303254e3b8e90d24e97635 (diff)
downloadsyslinux-c07dbea2b90c97839edde5a32e80d3d9d47f8dc4.tar.gz
syslinux-c07dbea2b90c97839edde5a32e80d3d9d47f8dc4.tar.xz
syslinux-c07dbea2b90c97839edde5a32e80d3d9d47f8dc4.zip
efi: Support booting 32-bit kernels from 64-bit EFI
The default scheme for booting Linux kernels should be to switch to 32-bit protected mode and jump to the start of the kernel image. The kernel has always had the know-how to switch 64-bit capable CPUs into 64-bit mode if necessary. By using this scheme, we can transparently boot either 32-bit or 64-bit kernels. This change necessitated moving kernel_jump() to a .S file for both i386 and x86-64. Writing inline assembly is fun for about 5 minutes, but then becomes monstrously tedious. Cc: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Matt Fleming <matt.fleming@intel.com>
-rw-r--r--efi/Makefile8
-rw-r--r--efi/i386/linux.S20
-rw-r--r--efi/main.c66
-rw-r--r--efi/x86_64/linux.S45
4 files changed, 74 insertions, 65 deletions
diff --git a/efi/Makefile b/efi/Makefile
index c89ca061..8e575ae2 100644
--- a/efi/Makefile
+++ b/efi/Makefile
@@ -38,7 +38,13 @@ LIB_OBJS = $(addprefix $(objdir)/com32/lib/,$(CORELIBOBJS))
CSRC = $(wildcard $(SRC)/*.c)
OBJS = $(subst $(SRC)/,,$(filter-out %wrapper.o, $(patsubst %.c,%.o,$(CSRC))))
-OBJS += $(objdir)/core/codepage.o
+OBJS += $(objdir)/core/codepage.o $(ARCH)/linux.o
+
+.PHONY: subdirs
+subdirs:
+ mkdir -p $(ARCH)
+
+$(OBJS): subdirs
# The targets to build in this directory
BTARGET = syslinux.efi
diff --git a/efi/i386/linux.S b/efi/i386/linux.S
new file mode 100644
index 00000000..557d3e20
--- /dev/null
+++ b/efi/i386/linux.S
@@ -0,0 +1,20 @@
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 2013 Intel Corporation; author: Matt Fleming
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston MA 02110-1301, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+ .globl kernel_jump
+ .type kernel_jump,@function
+ .text
+kernel_jump:
+ cli
+ movl 0x8(%esp), %esi
+ movl 0x4(%esp), %ecx
+ jmp *%ecx
diff --git a/efi/main.c b/efi/main.c
index 71333a4b..0bae399b 100644
--- a/efi/main.c
+++ b/efi/main.c
@@ -417,74 +417,12 @@ struct boot_params {
* allocate_pool()/free_pool()
* memory_map()
*/
+extern void kernel_jump(EFI_PHYSICAL_ADDRESS kernel_start,
+ struct boot_params *boot_params);
#if __SIZEOF_POINTER__ == 4
#define EFI_LOAD_SIG "EL32"
-static inline void kernel_jump(EFI_PHYSICAL_ADDRESS kernel_start,
- struct boot_params *boot_params)
-{
- asm volatile ("cli \n"
- "movl %0, %%esi \n"
- "movl %1, %%ecx \n"
- "jmp *%%ecx \n"
- :: "m" (boot_params), "m" (kernel_start));
-}
-
-static inline void handover_jump(EFI_HANDLE image, struct boot_params *bp,
- EFI_PHYSICAL_ADDRESS kernel_start)
-{
- /* handover protocol not implemented yet; the linux header needs to be updated */
-#if 0
- kernel_start += hdr->handover_offset;
-
- asm volatile ("cli \n"
- "pushl %0 \n"
- "pushl %1 \n"
- "pushl %2 \n"
- "movl %3, %%ecx \n"
- "jmp *%%ecx \n"
- :: "m" (bp), "m" (ST),
- "m" (image), "m" (kernel_start));
-#endif
-}
#elif __SIZEOF_POINTER__ == 8
#define EFI_LOAD_SIG "EL64"
-typedef void(*kernel_func)(void *, struct boot_params *);
-typedef void(*handover_func)(void *, EFI_SYSTEM_TABLE *, struct boot_params *);
-static inline void kernel_jump(EFI_PHYSICAL_ADDRESS kernel_start,
- struct boot_params *boot_params)
-{
- kernel_func kf;
-
- asm volatile ("cli");
-
- /* The 64-bit kernel entry is 512 bytes after the start. */
- kf = (kernel_func)kernel_start + 512;
-
- /*
- * The first parameter is a dummy because the kernel expects
- * boot_params in %[re]si.
- */
- kf(NULL, boot_params);
-}
-
-static inline void handover_jump(EFI_HANDLE image, struct boot_params *bp,
- EFI_PHYSICAL_ADDRESS kernel_start)
-{
-#if 0
- /* handover protocol not implemented yet the linux header needs to be updated */
-
- UINT32 offset = bp->hdr.handover_offset;
- handover_func hf;
-
- asm volatile ("cli");
-
- /* The 64-bit kernel entry is 512 bytes after the start. */
- kernel_start += 512;
-
- hf = (handover_func)(kernel_start + offset);
- hf(image, ST, bp);
-#endif
-}
#else
#error "unsupported architecture"
#endif
diff --git a/efi/x86_64/linux.S b/efi/x86_64/linux.S
new file mode 100644
index 00000000..4b1b88be
--- /dev/null
+++ b/efi/x86_64/linux.S
@@ -0,0 +1,45 @@
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 2013 Intel Corporation; author: Matt Fleming
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston MA 02110-1301, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+#define CR0_PG_FLAG 0x80000000
+#define MSR_EFER 0xc0000080
+
+ .globl kernel_jump
+ .type kernel_jump,@function
+ .code64
+kernel_jump:
+ cli
+
+ /*
+ * Setup our segment selector (0x10) and return address (%rdi)
+ * on the stack in preparation for the far return below.
+ */
+ mov $0x1000000000, %rcx
+ addq %rcx, %rdi
+ pushq %rdi
+
+ .code32
+pm_code:
+
+ /* Disable IA-32e mode by clearing IA32_EFER.LME */
+ xorl %eax, %eax
+ xorl %edx, %edx
+ movl $MSR_EFER, %ecx
+ wrmsr
+
+ /* Turn off paging to disable long mode */
+ movl %cr0, %eax
+ andl $~CR0_PG_FLAG, %eax
+ movl %eax, %cr0
+
+ /* Far return */
+ lret