If a Cortex-A715 cpu sees a page mapping permissions change from executable to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers, on the next instruction abort caused by permission fault. Only user-space does executable to non-executable permission transition via mprotect() system call which calls ptep_modify_prot_start() and ptep_modify _prot_commit() helpers, while changing the page mapping. The platform code can override these helpers via __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION. Work around the problem via doing a break-before-make TLB invalidation, for all executable user space mappings, that go through mprotect() system call. This overrides ptep_modify_prot_start() and ptep_modify_prot_commit(), via defining HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION on the platform thus giving an opportunity to intercept user space exec mappings, and do the necessary TLB invalidation. Similar interceptions are also implemented for HugeTLB. Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Mark Rutland <mark.rutland@arm.com> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Link: https://lore.kernel.org/r/20230102061651.34745-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon <will@kernel.org>
64 lines
2.2 KiB
C
64 lines
2.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* arch/arm64/include/asm/hugetlb.h
|
|
*
|
|
* Copyright (C) 2013 Linaro Ltd.
|
|
*
|
|
* Based on arch/x86/include/asm/hugetlb.h
|
|
*/
|
|
|
|
#ifndef __ASM_HUGETLB_H
|
|
#define __ASM_HUGETLB_H
|
|
|
|
#include <asm/page.h>
|
|
|
|
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
|
#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
|
|
extern bool arch_hugetlb_migration_supported(struct hstate *h);
|
|
#endif
|
|
|
|
static inline void arch_clear_hugepage_flags(struct page *page)
|
|
{
|
|
clear_bit(PG_dcache_clean, &page->flags);
|
|
}
|
|
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
|
|
|
|
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
|
|
#define arch_make_huge_pte arch_make_huge_pte
|
|
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
|
|
extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte);
|
|
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
|
|
extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep,
|
|
pte_t pte, int dirty);
|
|
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
|
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep);
|
|
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
|
|
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep);
|
|
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
|
extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep);
|
|
#define __HAVE_ARCH_HUGE_PTE_CLEAR
|
|
extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, unsigned long sz);
|
|
#define __HAVE_ARCH_HUGE_PTEP_GET
|
|
extern pte_t huge_ptep_get(pte_t *ptep);
|
|
|
|
void __init arm64_hugetlb_cma_reserve(void);
|
|
|
|
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
|
|
extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep);
|
|
|
|
#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
|
|
extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep,
|
|
pte_t old_pte, pte_t new_pte);
|
|
|
|
#include <asm-generic/hugetlb.h>
|
|
|
|
#endif /* __ASM_HUGETLB_H */
|