Server IP : 15.235.198.142 / Your IP : 216.73.216.208 Web Server : Apache/2.4.58 (Ubuntu) System : Linux ballsack 6.8.0-45-generic #45-Ubuntu SMP PREEMPT_DYNAMIC Fri Aug 30 12:02:04 UTC 2024 x86_64 User : www-data ( 33) PHP Version : 8.3.6 Disable Function : NONE MySQL : OFF | cURL : ON | WGET : ON | Perl : ON | Python : OFF | Sudo : ON | Pkexec : OFF Directory : /usr/src/linux-headers-6.8.0-45/arch/s390/include/asm/ |
Upload File : |
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_S390_KFENCE_H #define _ASM_S390_KFENCE_H #include <linux/mm.h> #include <linux/kfence.h> #include <asm/set_memory.h> #include <asm/page.h> void __kernel_map_pages(struct page *page, int numpages, int enable); static __always_inline bool arch_kfence_init_pool(void) { return true; } #define arch_kfence_test_address(addr) ((addr) & PAGE_MASK) /* * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(), * but earlier where page table allocations still happen with memblock. * Reason is that arch_kfence_init_pool() gets called when the system * is still in a limbo state - disabling and enabling bottom halves is * not yet allowed, but that is what our page_table_alloc() would do. */ static __always_inline void kfence_split_mapping(void) { #ifdef CONFIG_KFENCE unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT; set_memory_4k((unsigned long)__kfence_pool, pool_pages); #endif } static inline bool kfence_protect_page(unsigned long addr, bool protect) { __kernel_map_pages(virt_to_page((void *)addr), 1, !protect); return true; } #endif /* _ASM_S390_KFENCE_H */