1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2020 ARM Limited 3 4 #define _GNU_SOURCE 5 6 #include <errno.h> 7 #include <fcntl.h> 8 #include <signal.h> 9 #include <stdlib.h> 10 #include <stdio.h> 11 #include <string.h> 12 #include <ucontext.h> 13 #include <unistd.h> 14 #include <sys/mman.h> 15 16 #include "kselftest.h" 17 #include "mte_common_util.h" 18 #include "mte_def.h" 19 20 static size_t page_sz; 21 22 static int check_usermem_access_fault(int mem_type, int mode, int mapping) 23 { 24 int fd, i, err; 25 char val = 'A'; 26 size_t len, read_len; 27 void *ptr, *ptr_next; 28 29 err = KSFT_FAIL; 30 len = 2 * page_sz; 31 mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG); 32 fd = create_temp_file(); 33 if (fd == -1) 34 return KSFT_FAIL; 35 for (i = 0; i < len; i++) 36 write(fd, &val, sizeof(val)); 37 lseek(fd, 0, 0); 38 ptr = mte_allocate_memory(len, mem_type, mapping, true); 39 if (check_allocated_memory(ptr, len, mem_type, true) != KSFT_PASS) { 40 close(fd); 41 return KSFT_FAIL; 42 } 43 mte_initialize_current_context(mode, (uintptr_t)ptr, len); 44 /* Copy from file into buffer with valid tag */ 45 read_len = read(fd, ptr, len); 46 mte_wait_after_trig(); 47 if (cur_mte_cxt.fault_valid || read_len < len) 48 goto usermem_acc_err; 49 /* Verify same pattern is read */ 50 for (i = 0; i < len; i++) 51 if (*(char *)(ptr + i) != val) 52 break; 53 if (i < len) 54 goto usermem_acc_err; 55 56 /* Tag the next half of memory with different value */ 57 ptr_next = (void *)((unsigned long)ptr + page_sz); 58 ptr_next = mte_insert_new_tag(ptr_next); 59 mte_set_tag_address_range(ptr_next, page_sz); 60 61 lseek(fd, 0, 0); 62 /* Copy from file into buffer with invalid tag */ 63 read_len = read(fd, ptr, len); 64 mte_wait_after_trig(); 65 /* 66 * Accessing user memory in kernel with invalid tag should fail in sync 67 * mode without fault but may not fail in async mode as per the 68 * implemented MTE userspace support in Arm64 kernel. 69 */ 70 if (mode == MTE_SYNC_ERR && 71 !cur_mte_cxt.fault_valid && read_len < len) { 72 err = KSFT_PASS; 73 } else if (mode == MTE_ASYNC_ERR && 74 !cur_mte_cxt.fault_valid && read_len == len) { 75 err = KSFT_PASS; 76 } 77 usermem_acc_err: 78 mte_free_memory((void *)ptr, len, mem_type, true); 79 close(fd); 80 return err; 81 } 82 83 int main(int argc, char *argv[]) 84 { 85 int err; 86 87 page_sz = getpagesize(); 88 if (!page_sz) { 89 ksft_print_msg("ERR: Unable to get page size\n"); 90 return KSFT_FAIL; 91 } 92 err = mte_default_setup(); 93 if (err) 94 return err; 95 96 /* Register signal handlers */ 97 mte_register_signal(SIGSEGV, mte_default_handler); 98 99 /* Set test plan */ 100 ksft_set_plan(4); 101 102 evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE), 103 "Check memory access from kernel in sync mode, private mapping and mmap memory\n"); 104 evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED), 105 "Check memory access from kernel in sync mode, shared mapping and mmap memory\n"); 106 107 evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE), 108 "Check memory access from kernel in async mode, private mapping and mmap memory\n"); 109 evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED), 110 "Check memory access from kernel in async mode, shared mapping and mmap memory\n"); 111 112 mte_restore_setup(); 113 ksft_print_cnts(); 114 return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL; 115 } 116