arm64 fixes:
- Fix HugeTLB leak due to CoW and PTE_RDONLY mismatch - Avoid accessing unmapped FDT fields when checking validity - Correctly account for vDSO AUX entry in ARCH_DLINFO - Fix kallsyms with absolute expressions in linker script - Kill unnecessary symbol-based relocs in vmlinux -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABCgAGBQJXpFZ5AAoJELescNyEwWM0PI4IALsTuHRzClOSMDLiqMUj8t+5 WNAcqybxAjCOVxAHckhweju++TeJBxcRH1nvBoNwiHIdHTv4fq1TZ3PeEq9kWMg5 JbKjYjvd9dW8k6LXMya8iXCYtG3kzbNejkNpOTVebC86yvas1IiEjNb/ztPdhJeM HBSOkhfk8RcskfNxhuscZzGXbbdH9/R+XSTNRHN/RwCZH8PlInmduD9BbMvDhZyP NLFonD2IgQ4as1kYG/HdIcw0BamHiURjd043+gyoqMvm7JjPksRzlQnr91SMkX17 LykXjHYPi2Me3aTrZ1NtkUNd5FHLHZ6/b9Wg6nA19d5KWkd3ER9uSJqGxkkbnt0= =dtGK -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: - fix HugeTLB leak due to CoW and PTE_RDONLY mismatch - avoid accessing unmapped FDT fields when checking validity - correctly account for vDSO AUX entry in ARCH_DLINFO - fix kallsyms with absolute expressions in linker script - kill unnecessary symbol-based relocs in vmlinux * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: Fix copy-on-write referencing in HugeTLB arm64: mm: avoid fdt_check_header() before the FDT is fully mapped arm64: Define AT_VECTOR_SIZE_ARCH for ARCH_DLINFO arm64: relocatable: suppress R_AARCH64_ABS64 relocations in vmlinux arm64: vmlinux.lds: make __rela_offset and __dynsym_offset ABSOLUTE
This commit is contained in:
commit
194d6ad32e
|
@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
|
||||||
GZFLAGS :=-9
|
GZFLAGS :=-9
|
||||||
|
|
||||||
ifneq ($(CONFIG_RELOCATABLE),)
|
ifneq ($(CONFIG_RELOCATABLE),)
|
||||||
LDFLAGS_vmlinux += -pie
|
LDFLAGS_vmlinux += -pie -Bsymbolic
|
||||||
endif
|
endif
|
||||||
|
|
||||||
KBUILD_DEFCONFIG := defconfig
|
KBUILD_DEFCONFIG := defconfig
|
||||||
|
|
|
@ -140,6 +140,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;
|
||||||
|
|
||||||
#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
|
#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
|
||||||
|
|
||||||
|
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
|
||||||
#define ARCH_DLINFO \
|
#define ARCH_DLINFO \
|
||||||
do { \
|
do { \
|
||||||
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
|
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
|
||||||
|
|
|
@ -224,6 +224,23 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||||
set_pte(ptep, pte);
|
set_pte(ptep, pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define __HAVE_ARCH_PTE_SAME
|
||||||
|
static inline int pte_same(pte_t pte_a, pte_t pte_b)
|
||||||
|
{
|
||||||
|
pteval_t lhs, rhs;
|
||||||
|
|
||||||
|
lhs = pte_val(pte_a);
|
||||||
|
rhs = pte_val(pte_b);
|
||||||
|
|
||||||
|
if (pte_present(pte_a))
|
||||||
|
lhs &= ~PTE_RDONLY;
|
||||||
|
|
||||||
|
if (pte_present(pte_b))
|
||||||
|
rhs &= ~PTE_RDONLY;
|
||||||
|
|
||||||
|
return (lhs == rhs);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Huge pte definitions.
|
* Huge pte definitions.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -19,4 +19,6 @@
|
||||||
/* vDSO location */
|
/* vDSO location */
|
||||||
#define AT_SYSINFO_EHDR 33
|
#define AT_SYSINFO_EHDR 33
|
||||||
|
|
||||||
|
#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -781,40 +781,25 @@ __primary_switch:
|
||||||
* Iterate over each entry in the relocation table, and apply the
|
* Iterate over each entry in the relocation table, and apply the
|
||||||
* relocations in place.
|
* relocations in place.
|
||||||
*/
|
*/
|
||||||
ldr w8, =__dynsym_offset // offset to symbol table
|
|
||||||
ldr w9, =__rela_offset // offset to reloc table
|
ldr w9, =__rela_offset // offset to reloc table
|
||||||
ldr w10, =__rela_size // size of reloc table
|
ldr w10, =__rela_size // size of reloc table
|
||||||
|
|
||||||
mov_q x11, KIMAGE_VADDR // default virtual offset
|
mov_q x11, KIMAGE_VADDR // default virtual offset
|
||||||
add x11, x11, x23 // actual virtual offset
|
add x11, x11, x23 // actual virtual offset
|
||||||
add x8, x8, x11 // __va(.dynsym)
|
|
||||||
add x9, x9, x11 // __va(.rela)
|
add x9, x9, x11 // __va(.rela)
|
||||||
add x10, x9, x10 // __va(.rela) + sizeof(.rela)
|
add x10, x9, x10 // __va(.rela) + sizeof(.rela)
|
||||||
|
|
||||||
0: cmp x9, x10
|
0: cmp x9, x10
|
||||||
b.hs 2f
|
b.hs 1f
|
||||||
ldp x11, x12, [x9], #24
|
ldp x11, x12, [x9], #24
|
||||||
ldr x13, [x9, #-8]
|
ldr x13, [x9, #-8]
|
||||||
cmp w12, #R_AARCH64_RELATIVE
|
cmp w12, #R_AARCH64_RELATIVE
|
||||||
b.ne 1f
|
b.ne 0b
|
||||||
add x13, x13, x23 // relocate
|
add x13, x13, x23 // relocate
|
||||||
str x13, [x11, x23]
|
str x13, [x11, x23]
|
||||||
b 0b
|
b 0b
|
||||||
|
|
||||||
1: cmp w12, #R_AARCH64_ABS64
|
1:
|
||||||
b.ne 0b
|
|
||||||
add x12, x12, x12, lsl #1 // symtab offset: 24x top word
|
|
||||||
add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
|
|
||||||
ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
|
|
||||||
ldr x15, [x12, #8] // Elf64_Sym::st_value
|
|
||||||
cmp w14, #-0xf // SHN_ABS (0xfff1) ?
|
|
||||||
add x14, x15, x23 // relocate
|
|
||||||
csel x15, x14, x15, ne
|
|
||||||
add x15, x13, x15
|
|
||||||
str x15, [x11, x23]
|
|
||||||
b 0b
|
|
||||||
|
|
||||||
2:
|
|
||||||
#endif
|
#endif
|
||||||
ldr x8, =__primary_switched
|
ldr x8, =__primary_switched
|
||||||
br x8
|
br x8
|
||||||
|
|
|
@ -103,6 +103,7 @@ SECTIONS
|
||||||
*(.discard)
|
*(.discard)
|
||||||
*(.discard.*)
|
*(.discard.*)
|
||||||
*(.interp .dynamic)
|
*(.interp .dynamic)
|
||||||
|
*(.dynsym .dynstr .hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
. = KIMAGE_VADDR + TEXT_OFFSET;
|
. = KIMAGE_VADDR + TEXT_OFFSET;
|
||||||
|
@ -174,19 +175,9 @@ SECTIONS
|
||||||
.rela : ALIGN(8) {
|
.rela : ALIGN(8) {
|
||||||
*(.rela .rela*)
|
*(.rela .rela*)
|
||||||
}
|
}
|
||||||
.dynsym : ALIGN(8) {
|
|
||||||
*(.dynsym)
|
|
||||||
}
|
|
||||||
.dynstr : {
|
|
||||||
*(.dynstr)
|
|
||||||
}
|
|
||||||
.hash : {
|
|
||||||
*(.hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
__rela_offset = ADDR(.rela) - KIMAGE_VADDR;
|
__rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
|
||||||
__rela_size = SIZEOF(.rela);
|
__rela_size = SIZEOF(.rela);
|
||||||
__dynsym_offset = ADDR(.dynsym) - KIMAGE_VADDR;
|
|
||||||
|
|
||||||
. = ALIGN(SEGMENT_ALIGN);
|
. = ALIGN(SEGMENT_ALIGN);
|
||||||
__init_end = .;
|
__init_end = .;
|
||||||
|
|
|
@ -686,9 +686,9 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
|
||||||
/*
|
/*
|
||||||
* Check whether the physical FDT address is set and meets the minimum
|
* Check whether the physical FDT address is set and meets the minimum
|
||||||
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
|
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
|
||||||
* at least 8 bytes so that we can always access the size field of the
|
* at least 8 bytes so that we can always access the magic and size
|
||||||
* FDT header after mapping the first chunk, double check here if that
|
* fields of the FDT header after mapping the first chunk, double check
|
||||||
* is indeed the case.
|
* here if that is indeed the case.
|
||||||
*/
|
*/
|
||||||
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
|
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
|
||||||
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
|
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
|
||||||
|
@ -716,7 +716,7 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
|
||||||
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
|
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
|
||||||
dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
|
dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
|
||||||
|
|
||||||
if (fdt_check_header(dt_virt) != 0)
|
if (fdt_magic(dt_virt) != FDT_MAGIC)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
*size = fdt_totalsize(dt_virt);
|
*size = fdt_totalsize(dt_virt);
|
||||||
|
|
Loading…
Reference in New Issue