diff options
Diffstat (limited to 'linux')
-rw-r--r-- | linux/fstab | 5 | ||||
-rw-r--r-- | linux/group | 1 | ||||
-rw-r--r-- | linux/inittab | 51 | ||||
-rw-r--r-- | linux/passwd | 1 | ||||
-rw-r--r-- | linux/patches/bugs.c.patch | 156 | ||||
-rw-r--r-- | linux/patches/gen_initramfs.sh.patch | 31 | ||||
-rw-r--r-- | linux/patches/init.c.patch | 386 | ||||
-rw-r--r-- | linux/patches/initramfs.c.patch | 45 | ||||
-rw-r--r-- | linux/patches/intel.c.patch | 26 | ||||
-rw-r--r-- | linux/patches/rmpiggy.S.patch | 34 | ||||
-rwxr-xr-x | linux/rcS | 9 |
11 files changed, 745 insertions, 0 deletions
diff --git a/linux/fstab b/linux/fstab new file mode 100644 index 0000000..e1652d3 --- /dev/null +++ b/linux/fstab @@ -0,0 +1,5 @@ +none /proc proc defaults 0 0 +#devpts /dev/pts devpts gid=5,mode=620 0 0 +#devtmpfs /dev devtmpfs mode=0755,nosuid 0 0 +sysfs /sys sysfs defaults 0 0 +tmpfs /run tmpfs defaults 0 0 diff --git a/linux/group b/linux/group new file mode 100644 index 0000000..5ba713c --- /dev/null +++ b/linux/group @@ -0,0 +1 @@ +root::0:root diff --git a/linux/inittab b/linux/inittab new file mode 100644 index 0000000..7e5cbc0 --- /dev/null +++ b/linux/inittab @@ -0,0 +1,51 @@ +# Note: BusyBox init works just fine without an inittab. If no inittab is +# found, it has the following default behavior: +# ::sysinit:/etc/init.d/rcS +# ::askfirst:/bin/sh +# ::ctrlaltdel:/sbin/reboot +# ::shutdown:/sbin/swapoff -a +# ::shutdown:/bin/umount -a -r +# ::restart:/sbin/init +# tty2::askfirst:/bin/sh +# tty3::askfirst:/bin/sh +# tty4::askfirst:/bin/sh +# +# Boot-time system configuration/initialization script. +# This is run first except when booting in single-user mode. +# +#::sysinit:/etc/init.d/rcS +::sysinit:/bin/mount -a + +# /bin/sh invocations on selected ttys +# +# Note below that we prefix the shell commands with a "-" to indicate to the +# shell that it is supposed to be a login shell. Normally this is handled by +# login, but since we are bypassing login in this case, BusyBox lets you do +# this yourself... +# +# Start an "askfirst" shell on the console (whatever that may be) +::askfirst:-/bin/sh +# Start an "askfirst" shell on /dev/tty2-4 +#tty2::askfirst:-/bin/sh +#tty3::askfirst:-/bin/sh +#tty4::askfirst:-/bin/sh + +# /bin/getty invocations for selected ttys +#tty4::respawn:/bin/getty 38400 tty5 +#tty5::respawn:/bin/getty 38400 tty6 + +# Example of how to put a getty on a serial line (for a terminal) +::respawn:/bin/getty -L ttyS0 9600 vt100 +#::respawn:/bin/getty -L ttyS1 9600 vt100 +# +# Example how to put a getty on a modem line. +#::respawn:/bin/getty 57600 ttyS2 + +# Stuff to do when restarting the init process +::restart:/bin/init + +# Stuff to do before rebooting +#::ctrlaltdel:/bin/reboot +::shutdown:/bin/umount -a -r +#::shutdown:/bin/swapoff -a + diff --git a/linux/passwd b/linux/passwd new file mode 100644 index 0000000..b66b71f --- /dev/null +++ b/linux/passwd @@ -0,0 +1 @@ +root:Dtd09GUh1f5sY:0:0:root:/root:/bin/sh diff --git a/linux/patches/bugs.c.patch b/linux/patches/bugs.c.patch new file mode 100644 index 0000000..e585e23 --- /dev/null +++ b/linux/patches/bugs.c.patch @@ -0,0 +1,156 @@ +--- arch/x86/kernel/cpu/bugs.c.new 2022-04-21 10:22:57.337425325 -0400 ++++ arch/x86/kernel/cpu/bugs.c 2022-04-21 10:34:47.550249356 -0400 +@@ -36,6 +36,7 @@ + + #include "cpu.h" + ++#ifndef CONFIG_M486 + static void __init spectre_v1_select_mitigation(void); + static void __init spectre_v2_select_mitigation(void); + static void __init ssb_select_mitigation(void); +@@ -45,6 +46,7 @@ + static void __init taa_select_mitigation(void); + static void __init srbds_select_mitigation(void); + static void __init l1d_flush_select_mitigation(void); ++#endif // CONFIG_M486 + + /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ + u64 x86_spec_ctrl_base; +@@ -112,6 +114,7 @@ + if (boot_cpu_has(X86_FEATURE_STIBP)) + x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; + ++#ifndef CONFIG_M486 + /* Select the proper CPU mitigations before patching alternatives: */ + spectre_v1_select_mitigation(); + spectre_v2_select_mitigation(); +@@ -127,6 +130,7 @@ + * mitigation until after TAA mitigation selection is done. + */ + mds_print_mitigation(); ++#endif // CONFIG_M486 + + arch_smt_update(); + +@@ -248,6 +252,7 @@ + [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", + }; + ++#ifndef CONFIG_M486 + static void __init mds_select_mitigation(void) + { + if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { +@@ -274,6 +279,7 @@ + + pr_info("%s\n", mds_strings[mds_mitigation]); + } ++#endif // CONFIG_M486 + + static int __init mds_cmdline(char *str) + { +@@ -317,6 +323,7 @@ + [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", + }; + ++#ifndef CONFIG_M486 + static void __init taa_select_mitigation(void) + { + u64 ia32_cap; +@@ -388,6 +395,7 @@ + out: + pr_info("%s\n", taa_strings[taa_mitigation]); + } ++#endif // CONFIG_M486 + + static int __init tsx_async_abort_parse_cmdline(char *str) + { +@@ -463,6 +471,7 @@ + wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + } + ++#ifndef CONFIG_M486 + static void __init srbds_select_mitigation(void) + { + u64 ia32_cap; +@@ -487,6 +496,7 @@ + update_srbds_msr(); + pr_info("%s\n", srbds_strings[srbds_mitigation]); + } ++#endif // CONFIG_M486 + + static int __init srbds_parse_cmdline(char *str) + { +@@ -504,6 +514,7 @@ + #undef pr_fmt + #define pr_fmt(fmt) "L1D Flush : " fmt + ++#ifndef CONFIG_M486 + enum l1d_flush_mitigations { + L1D_FLUSH_OFF = 0, + L1D_FLUSH_ON, +@@ -528,10 +539,12 @@ + return 0; + } + early_param("l1d_flush", l1d_flush_parse_cmdline); ++#endif // CONFIG_M486 + + #undef pr_fmt + #define pr_fmt(fmt) "Spectre V1 : " fmt + ++#ifndef CONFIG_M486 + enum spectre_v1_mitigation { + SPECTRE_V1_MITIGATION_NONE, + SPECTRE_V1_MITIGATION_AUTO, +@@ -618,6 +631,7 @@ + return 0; + } + early_param("nospectre_v1", nospectre_v1_cmdline); ++#endif // CONFIG_M486 + + #undef pr_fmt + #define pr_fmt(fmt) "Spectre V2 : " fmt +@@ -729,6 +743,7 @@ + { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, + }; + ++#ifndef CONFIG_M486 + static void __init spec_v2_user_print_cond(const char *reason, bool secure) + { + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) +@@ -1071,6 +1086,7 @@ + /* Set up IBPB and STIBP depending on the general spectre V2 command */ + spectre_v2_user_select_mitigation(cmd); + } ++#endif // CONFIG_M486 + + static void update_stibp_msr(void * __unused) + { +@@ -1207,6 +1223,7 @@ + { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ + }; + ++#ifndef CONFIG_M486 + static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) + { + enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; +@@ -1316,6 +1333,7 @@ + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) + pr_info("%s\n", ssb_strings[ssb_mode]); + } ++#endif // CONFIG_M486 + + #undef pr_fmt + #define pr_fmt(fmt) "Speculation prctl: " fmt +@@ -1573,6 +1591,7 @@ + enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; + EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); + ++#ifndef CONFIG_M486 + /* + * These CPUs all support 44bits physical address space internally in the + * cache but CPUID can report a smaller number of physical address bits. +@@ -1926,3 +1945,4 @@ + return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); + } + #endif ++#endif // CONFIG_M486 diff --git a/linux/patches/gen_initramfs.sh.patch b/linux/patches/gen_initramfs.sh.patch new file mode 100644 index 0000000..472fe37 --- /dev/null +++ b/linux/patches/gen_initramfs.sh.patch @@ -0,0 +1,31 @@ +--- usr/gen_initramfs.sh 2022-04-08 07:59:05.000000000 -0400 ++++ usr/gen_initramfs.sh.new 2022-04-21 08:51:04.635080820 -0400 +@@ -187,8 +187,8 @@ + } + + prog=$0 +-root_uid=0 +-root_gid=0 ++root_uid="squash" ++root_gid="squash" + dep_list= + cpio_list=$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX) + output="/dev/stdout" +@@ -209,13 +209,13 @@ + shift + ;; + "-u") # map $1 to uid=0 (root) +- root_uid="$1" +- [ "$root_uid" = "-1" ] && root_uid=$(id -u || echo 0) ++# root_uid="$1" ++# [ "$root_uid" = "-1" ] && root_uid=$(id -u || echo 0) + shift + ;; + "-g") # map $1 to gid=0 (root) +- root_gid="$1" +- [ "$root_gid" = "-1" ] && root_gid=$(id -g || echo 0) ++# root_gid="$1" ++# [ "$root_gid" = "-1" ] && root_gid=$(id -g || echo 0) + shift + ;; + "-h") diff --git a/linux/patches/init.c.patch b/linux/patches/init.c.patch new file mode 100644 index 0000000..4225489 --- /dev/null +++ b/linux/patches/init.c.patch @@ -0,0 +1,386 @@ +--- arch/x86/realmode/init.c.new 2022-04-21 08:59:29.875419413 -0400 ++++ arch/x86/realmode/init.c 2022-04-21 09:00:11.359752804 -0400 +@@ -19,195 +19,195 @@ + + void load_trampoline_pgtable(void) + { +-#ifdef CONFIG_X86_32 +- load_cr3(initial_page_table); +-#else +- /* +- * This function is called before exiting to real-mode and that will +- * fail with CR4.PCIDE still set. +- */ +- if (boot_cpu_has(X86_FEATURE_PCID)) +- cr4_clear_bits(X86_CR4_PCIDE); +- +- write_cr3(real_mode_header->trampoline_pgd); +-#endif +- +- /* +- * The CR3 write above will not flush global TLB entries. +- * Stale, global entries from previous page tables may still be +- * present. Flush those stale entries. +- * +- * This ensures that memory accessed while running with +- * trampoline_pgd is *actually* mapped into trampoline_pgd. +- */ +- __flush_tlb_all(); ++//#ifdef CONFIG_X86_32 ++// load_cr3(initial_page_table); ++//#else ++// /* ++// * This function is called before exiting to real-mode and that will ++// * fail with CR4.PCIDE still set. ++// */ ++// if (boot_cpu_has(X86_FEATURE_PCID)) ++// cr4_clear_bits(X86_CR4_PCIDE); ++// ++// write_cr3(real_mode_header->trampoline_pgd); ++//#endif ++// ++// /* ++// * The CR3 write above will not flush global TLB entries. ++// * Stale, global entries from previous page tables may still be ++// * present. Flush those stale entries. ++// * ++// * This ensures that memory accessed while running with ++// * trampoline_pgd is *actually* mapped into trampoline_pgd. ++// */ ++// __flush_tlb_all(); + } + + void __init reserve_real_mode(void) + { +- phys_addr_t mem; +- size_t size = real_mode_size_needed(); +- +- if (!size) +- return; +- +- WARN_ON(slab_is_available()); +- +- /* Has to be under 1M so we can execute real-mode AP code. */ +- mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20); +- if (!mem) +- pr_info("No sub-1M memory is available for the trampoline\n"); +- else +- set_real_mode_mem(mem); +- +- /* +- * Unconditionally reserve the entire fisrt 1M, see comment in +- * setup_arch(). +- */ +- memblock_reserve(0, SZ_1M); +-} +- +-static void sme_sev_setup_real_mode(struct trampoline_header *th) +-{ +-#ifdef CONFIG_AMD_MEM_ENCRYPT +- if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) +- th->flags |= TH_FLAGS_SME_ACTIVE; +- +- if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) { +- /* +- * Skip the call to verify_cpu() in secondary_startup_64 as it +- * will cause #VC exceptions when the AP can't handle them yet. +- */ +- th->start = (u64) secondary_startup_64_no_verify; +- +- if (sev_es_setup_ap_jump_table(real_mode_header)) +- panic("Failed to get/update SEV-ES AP Jump Table"); +- } +-#endif +-} +- +-static void __init setup_real_mode(void) +-{ +- u16 real_mode_seg; +- const u32 *rel; +- u32 count; +- unsigned char *base; +- unsigned long phys_base; +- struct trampoline_header *trampoline_header; +- size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); +-#ifdef CONFIG_X86_64 +- u64 *trampoline_pgd; +- u64 efer; +- int i; +-#endif +- +- base = (unsigned char *)real_mode_header; +- +- /* +- * If SME is active, the trampoline area will need to be in +- * decrypted memory in order to bring up other processors +- * successfully. This is not needed for SEV. +- */ +- if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) +- set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT); +- +- memcpy(base, real_mode_blob, size); +- +- phys_base = __pa(base); +- real_mode_seg = phys_base >> 4; +- +- rel = (u32 *) real_mode_relocs; +- +- /* 16-bit segment relocations. */ +- count = *rel++; +- while (count--) { +- u16 *seg = (u16 *) (base + *rel++); +- *seg = real_mode_seg; +- } +- +- /* 32-bit linear relocations. */ +- count = *rel++; +- while (count--) { +- u32 *ptr = (u32 *) (base + *rel++); +- *ptr += phys_base; +- } +- +- /* Must be performed *after* relocation. */ +- trampoline_header = (struct trampoline_header *) +- __va(real_mode_header->trampoline_header); +- +-#ifdef CONFIG_X86_32 +- trampoline_header->start = __pa_symbol(startup_32_smp); +- trampoline_header->gdt_limit = __BOOT_DS + 7; +- trampoline_header->gdt_base = __pa_symbol(boot_gdt); +-#else +- /* +- * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR +- * so we need to mask it out. +- */ +- rdmsrl(MSR_EFER, efer); +- trampoline_header->efer = efer & ~EFER_LMA; +- +- trampoline_header->start = (u64) secondary_startup_64; +- trampoline_cr4_features = &trampoline_header->cr4; +- *trampoline_cr4_features = mmu_cr4_features; +- +- trampoline_header->flags = 0; +- +- trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); +- +- /* Map the real mode stub as virtual == physical */ +- trampoline_pgd[0] = trampoline_pgd_entry.pgd; +- +- /* +- * Include the entirety of the kernel mapping into the trampoline +- * PGD. This way, all mappings present in the normal kernel page +- * tables are usable while running on trampoline_pgd. +- */ +- for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++) +- trampoline_pgd[i] = init_top_pgt[i].pgd; +-#endif +- +- sme_sev_setup_real_mode(trampoline_header); +-} +- +-/* +- * reserve_real_mode() gets called very early, to guarantee the +- * availability of low memory. This is before the proper kernel page +- * tables are set up, so we cannot set page permissions in that +- * function. Also trampoline code will be executed by APs so we +- * need to mark it executable at do_pre_smp_initcalls() at least, +- * thus run it as a early_initcall(). +- */ +-static void __init set_real_mode_permissions(void) +-{ +- unsigned char *base = (unsigned char *) real_mode_header; +- size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); +- +- size_t ro_size = +- PAGE_ALIGN(real_mode_header->ro_end) - +- __pa(base); +- +- size_t text_size = +- PAGE_ALIGN(real_mode_header->ro_end) - +- real_mode_header->text_start; +- +- unsigned long text_start = +- (unsigned long) __va(real_mode_header->text_start); +- +- set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); +- set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); +- set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); +-} +- +-static int __init init_real_mode(void) +-{ +- if (!real_mode_header) +- panic("Real mode trampoline was not allocated"); +- +- setup_real_mode(); +- set_real_mode_permissions(); +- +- return 0; +-} +-early_initcall(init_real_mode); ++// phys_addr_t mem; ++// size_t size = real_mode_size_needed(); ++// ++// if (!size) ++// return; ++// ++// WARN_ON(slab_is_available()); ++// ++// /* Has to be under 1M so we can execute real-mode AP code. */ ++// mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20); ++// if (!mem) ++// pr_info("No sub-1M memory is available for the trampoline\n"); ++// else ++// set_real_mode_mem(mem); ++// ++// /* ++// * Unconditionally reserve the entire fisrt 1M, see comment in ++// * setup_arch(). ++// */ ++// memblock_reserve(0, SZ_1M); ++} ++ ++//static void sme_sev_setup_real_mode(struct trampoline_header *th) ++//{ ++//#ifdef CONFIG_AMD_MEM_ENCRYPT ++// if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) ++// th->flags |= TH_FLAGS_SME_ACTIVE; ++// ++// if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) { ++// /* ++// * Skip the call to verify_cpu() in secondary_startup_64 as it ++// * will cause #VC exceptions when the AP can't handle them yet. ++// */ ++// th->start = (u64) secondary_startup_64_no_verify; ++// ++// if (sev_es_setup_ap_jump_table(real_mode_header)) ++// panic("Failed to get/update SEV-ES AP Jump Table"); ++// } ++//#endif ++//} ++// ++//static void __init setup_real_mode(void) ++//{ ++// u16 real_mode_seg; ++// const u32 *rel; ++// u32 count; ++// unsigned char *base; ++// unsigned long phys_base; ++// struct trampoline_header *trampoline_header; ++// size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); ++//#ifdef CONFIG_X86_64 ++// u64 *trampoline_pgd; ++// u64 efer; ++// int i; ++//#endif ++// ++// base = (unsigned char *)real_mode_header; ++// ++// /* ++// * If SME is active, the trampoline area will need to be in ++// * decrypted memory in order to bring up other processors ++// * successfully. This is not needed for SEV. ++// */ ++// if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) ++// set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT); ++// ++// memcpy(base, real_mode_blob, size); ++// ++// phys_base = __pa(base); ++// real_mode_seg = phys_base >> 4; ++// ++// rel = (u32 *) real_mode_relocs; ++// ++// /* 16-bit segment relocations. */ ++// count = *rel++; ++// while (count--) { ++// u16 *seg = (u16 *) (base + *rel++); ++// *seg = real_mode_seg; ++// } ++// ++// /* 32-bit linear relocations. */ ++// count = *rel++; ++// while (count--) { ++// u32 *ptr = (u32 *) (base + *rel++); ++// *ptr += phys_base; ++// } ++// ++// /* Must be performed *after* relocation. */ ++// trampoline_header = (struct trampoline_header *) ++// __va(real_mode_header->trampoline_header); ++// ++//#ifdef CONFIG_X86_32 ++// trampoline_header->start = __pa_symbol(startup_32_smp); ++// trampoline_header->gdt_limit = __BOOT_DS + 7; ++// trampoline_header->gdt_base = __pa_symbol(boot_gdt); ++//#else ++// /* ++// * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR ++// * so we need to mask it out. ++// */ ++// rdmsrl(MSR_EFER, efer); ++// trampoline_header->efer = efer & ~EFER_LMA; ++// ++// trampoline_header->start = (u64) secondary_startup_64; ++// trampoline_cr4_features = &trampoline_header->cr4; ++// *trampoline_cr4_features = mmu_cr4_features; ++// ++// trampoline_header->flags = 0; ++// ++// trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); ++// ++// /* Map the real mode stub as virtual == physical */ ++// trampoline_pgd[0] = trampoline_pgd_entry.pgd; ++// ++// /* ++// * Include the entirety of the kernel mapping into the trampoline ++// * PGD. This way, all mappings present in the normal kernel page ++// * tables are usable while running on trampoline_pgd. ++// */ ++// for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++) ++// trampoline_pgd[i] = init_top_pgt[i].pgd; ++//#endif ++// ++// sme_sev_setup_real_mode(trampoline_header); ++//} ++// ++///* ++// * reserve_real_mode() gets called very early, to guarantee the ++// * availability of low memory. This is before the proper kernel page ++// * tables are set up, so we cannot set page permissions in that ++// * function. Also trampoline code will be executed by APs so we ++// * need to mark it executable at do_pre_smp_initcalls() at least, ++// * thus run it as a early_initcall(). ++// */ ++//static void __init set_real_mode_permissions(void) ++//{ ++// unsigned char *base = (unsigned char *) real_mode_header; ++// size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); ++// ++// size_t ro_size = ++// PAGE_ALIGN(real_mode_header->ro_end) - ++// __pa(base); ++// ++// size_t text_size = ++// PAGE_ALIGN(real_mode_header->ro_end) - ++// real_mode_header->text_start; ++// ++// unsigned long text_start = ++// (unsigned long) __va(real_mode_header->text_start); ++// ++// set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); ++// set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); ++// set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); ++//} ++// ++//static int __init init_real_mode(void) ++//{ ++// if (!real_mode_header) ++// panic("Real mode trampoline was not allocated"); ++// ++// setup_real_mode(); ++// set_real_mode_permissions(); ++// ++// return 0; ++//} ++//early_initcall(init_real_mode); diff --git a/linux/patches/initramfs.c.patch b/linux/patches/initramfs.c.patch new file mode 100644 index 0000000..dc8a304 --- /dev/null +++ b/linux/patches/initramfs.c.patch @@ -0,0 +1,45 @@ +--- init/initramfs.c.new 2022-04-21 10:43:58.644900319 -0400 ++++ init/initramfs.c 2022-04-21 10:46:57.309758246 -0400 +@@ -461,7 +461,7 @@ + + #include <linux/decompress/generic.h> + +-static char * __init unpack_to_rootfs(char *buf, unsigned long len) ++static char * __init do_unpack_to_rootfs(char *buf, unsigned long len, char *output) + { + long written; + decompress_fn decompress; +@@ -497,7 +497,7 @@ + decompress = decompress_method(buf, len, &compress_name); + pr_debug("Detected %s compressed data\n", compress_name); + if (decompress) { +- int res = decompress(buf, len, NULL, flush_buffer, NULL, ++ int res = decompress(buf, len, NULL, flush_buffer, output, + &my_inptr, error); + if (res) + error("decompressor failed"); +@@ -523,6 +523,11 @@ + return message; + } + ++static char * __init unpack_to_rootfs(char *buf, unsigned long len) ++{ ++ return do_unpack_to_rootfs(buf, len, NULL); ++} ++ + static int __initdata do_retain_initrd; + + static int __init retain_initrd_param(char *str) +@@ -683,7 +688,11 @@ + else + printk(KERN_INFO "Unpacking initramfs...\n"); + +- err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start); ++ //err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start); ++ void *output = vmalloc(0x80000); ++ err = do_unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start, output); ++ vfree(output); ++ + if (err) { + #ifdef CONFIG_BLK_DEV_RAM + populate_initrd_image(err); diff --git a/linux/patches/intel.c.patch b/linux/patches/intel.c.patch new file mode 100644 index 0000000..19f73c4 --- /dev/null +++ b/linux/patches/intel.c.patch @@ -0,0 +1,26 @@ +--- arch/x86/kernel/cpu/intel.c.new 2022-04-21 10:30:16.303395343 -0400 ++++ arch/x86/kernel/cpu/intel.c 2022-04-21 10:31:18.648938081 -0400 +@@ -752,6 +752,8 @@ + } + #endif + ++#ifndef CONFIG_M486 ++ + #define TLB_INST_4K 0x01 + #define TLB_INST_4M 0x02 + #define TLB_INST_2M_4M 0x03 +@@ -926,6 +928,14 @@ + } + } + ++#else ++ ++static void intel_detect_tlb(struct cpuinfo_x86 *c) ++{ ++} ++ ++#endif // CONFIG_M486 ++ + static const struct cpu_dev intel_cpu_dev = { + .c_vendor = "Intel", + .c_ident = { "GenuineIntel" }, diff --git a/linux/patches/rmpiggy.S.patch b/linux/patches/rmpiggy.S.patch new file mode 100644 index 0000000..0a9b1b2 --- /dev/null +++ b/linux/patches/rmpiggy.S.patch @@ -0,0 +1,34 @@ +--- arch/x86/realmode/rmpiggy.S.new 2022-04-21 09:00:39.636016815 -0400 ++++ arch/x86/realmode/rmpiggy.S 2022-04-21 09:00:51.392134904 -0400 +@@ -3,17 +3,17 @@ + * Wrapper script for the realmode binary as a transport object + * before copying to low memory. + */ +-#include <linux/linkage.h> +-#include <asm/page_types.h> +- +- .section ".init.data","aw" +- +- .balign PAGE_SIZE +- +-SYM_DATA_START(real_mode_blob) +- .incbin "arch/x86/realmode/rm/realmode.bin" +-SYM_DATA_END_LABEL(real_mode_blob, SYM_L_GLOBAL, real_mode_blob_end) +- +-SYM_DATA_START(real_mode_relocs) +- .incbin "arch/x86/realmode/rm/realmode.relocs" +-SYM_DATA_END(real_mode_relocs) ++//#include <linux/linkage.h> ++//#include <asm/page_types.h> ++// ++// .section ".init.data","aw" ++// ++// .balign PAGE_SIZE ++// ++//SYM_DATA_START(real_mode_blob) ++// .incbin "arch/x86/realmode/rm/realmode.bin" ++//SYM_DATA_END_LABEL(real_mode_blob, SYM_L_GLOBAL, real_mode_blob_end) ++// ++//SYM_DATA_START(real_mode_relocs) ++// .incbin "arch/x86/realmode/rm/realmode.relocs" ++//SYM_DATA_END(real_mode_relocs) diff --git a/linux/rcS b/linux/rcS new file mode 100755 index 0000000..474bb0f --- /dev/null +++ b/linux/rcS @@ -0,0 +1,9 @@ +#!/bin/sh + +echo "Mounting filesystems..." +mount -a +mkdir /dev/pts +mount devpts /dev/pts -t devpts -o gid=5,mode=620 + +echo "Ready." + |