1d0003ec0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2d0003ec0SAlexei Starovoitov * 3d0003ec0SAlexei Starovoitov * This program is free software; you can redistribute it and/or 4d0003ec0SAlexei Starovoitov * modify it under the terms of version 2 of the GNU General Public 5d0003ec0SAlexei Starovoitov * License as published by the Free Software Foundation. 6d0003ec0SAlexei Starovoitov * 7d0003ec0SAlexei Starovoitov * This program is distributed in the hope that it will be useful, but 8d0003ec0SAlexei Starovoitov * WITHOUT ANY WARRANTY; without even the implied warranty of 9d0003ec0SAlexei Starovoitov * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10d0003ec0SAlexei Starovoitov * General Public License for more details. 11d0003ec0SAlexei Starovoitov */ 12d0003ec0SAlexei Starovoitov #include <linux/bpf.h> 13d0003ec0SAlexei Starovoitov #include <linux/rcupdate.h> 1403e69b50SDaniel Borkmann #include <linux/random.h> 15c04167ceSDaniel Borkmann #include <linux/smp.h> 16*17ca8cbfSDaniel Borkmann #include <linux/ktime.h> 17d0003ec0SAlexei Starovoitov 18d0003ec0SAlexei Starovoitov /* If kernel subsystem is allowing eBPF programs to call this function, 19d0003ec0SAlexei Starovoitov * inside its own verifier_ops->get_func_proto() callback it should return 20d0003ec0SAlexei Starovoitov * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 21d0003ec0SAlexei Starovoitov * 22d0003ec0SAlexei Starovoitov * Different map implementations will rely on rcu in map methods 23d0003ec0SAlexei Starovoitov * lookup/update/delete, therefore eBPF programs must run under rcu lock 24d0003ec0SAlexei Starovoitov * if program is allowed to access maps, so check rcu_read_lock_held in 25d0003ec0SAlexei Starovoitov * all three functions. 26d0003ec0SAlexei Starovoitov */ 27d0003ec0SAlexei Starovoitov static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 28d0003ec0SAlexei Starovoitov { 29d0003ec0SAlexei Starovoitov /* verifier checked that R1 contains a valid pointer to bpf_map 30d0003ec0SAlexei Starovoitov * and R2 points to a program stack and map->key_size bytes were 31d0003ec0SAlexei Starovoitov * initialized 32d0003ec0SAlexei Starovoitov */ 33d0003ec0SAlexei Starovoitov struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 34d0003ec0SAlexei Starovoitov void *key = (void *) (unsigned long) r2; 35d0003ec0SAlexei Starovoitov void *value; 36d0003ec0SAlexei Starovoitov 37d0003ec0SAlexei Starovoitov WARN_ON_ONCE(!rcu_read_lock_held()); 38d0003ec0SAlexei Starovoitov 39d0003ec0SAlexei Starovoitov value = map->ops->map_lookup_elem(map, key); 40d0003ec0SAlexei Starovoitov 41d0003ec0SAlexei Starovoitov /* lookup() returns either pointer to element value or NULL 42d0003ec0SAlexei Starovoitov * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type 43d0003ec0SAlexei Starovoitov */ 44d0003ec0SAlexei Starovoitov return (unsigned long) value; 45d0003ec0SAlexei Starovoitov } 46d0003ec0SAlexei Starovoitov 47a2c83fffSDaniel Borkmann const struct bpf_func_proto bpf_map_lookup_elem_proto = { 48d0003ec0SAlexei Starovoitov .func = bpf_map_lookup_elem, 49d0003ec0SAlexei Starovoitov .gpl_only = false, 50d0003ec0SAlexei Starovoitov .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 51d0003ec0SAlexei Starovoitov .arg1_type = ARG_CONST_MAP_PTR, 52d0003ec0SAlexei Starovoitov .arg2_type = ARG_PTR_TO_MAP_KEY, 53d0003ec0SAlexei Starovoitov }; 54d0003ec0SAlexei Starovoitov 55d0003ec0SAlexei Starovoitov static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 56d0003ec0SAlexei Starovoitov { 57d0003ec0SAlexei Starovoitov struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 58d0003ec0SAlexei Starovoitov void *key = (void *) (unsigned long) r2; 59d0003ec0SAlexei Starovoitov void *value = (void *) (unsigned long) r3; 60d0003ec0SAlexei Starovoitov 61d0003ec0SAlexei Starovoitov WARN_ON_ONCE(!rcu_read_lock_held()); 62d0003ec0SAlexei Starovoitov 63d0003ec0SAlexei Starovoitov return map->ops->map_update_elem(map, key, value, r4); 64d0003ec0SAlexei Starovoitov } 65d0003ec0SAlexei Starovoitov 66a2c83fffSDaniel Borkmann const struct bpf_func_proto bpf_map_update_elem_proto = { 67d0003ec0SAlexei Starovoitov .func = bpf_map_update_elem, 68d0003ec0SAlexei Starovoitov .gpl_only = false, 69d0003ec0SAlexei Starovoitov .ret_type = RET_INTEGER, 70d0003ec0SAlexei Starovoitov .arg1_type = ARG_CONST_MAP_PTR, 71d0003ec0SAlexei Starovoitov .arg2_type = ARG_PTR_TO_MAP_KEY, 72d0003ec0SAlexei Starovoitov .arg3_type = ARG_PTR_TO_MAP_VALUE, 73d0003ec0SAlexei Starovoitov .arg4_type = ARG_ANYTHING, 74d0003ec0SAlexei Starovoitov }; 75d0003ec0SAlexei Starovoitov 76d0003ec0SAlexei Starovoitov static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 77d0003ec0SAlexei Starovoitov { 78d0003ec0SAlexei Starovoitov struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 79d0003ec0SAlexei Starovoitov void *key = (void *) (unsigned long) r2; 80d0003ec0SAlexei Starovoitov 81d0003ec0SAlexei Starovoitov WARN_ON_ONCE(!rcu_read_lock_held()); 82d0003ec0SAlexei Starovoitov 83d0003ec0SAlexei Starovoitov return map->ops->map_delete_elem(map, key); 84d0003ec0SAlexei Starovoitov } 85d0003ec0SAlexei Starovoitov 86a2c83fffSDaniel Borkmann const struct bpf_func_proto bpf_map_delete_elem_proto = { 87d0003ec0SAlexei Starovoitov .func = bpf_map_delete_elem, 88d0003ec0SAlexei Starovoitov .gpl_only = false, 89d0003ec0SAlexei Starovoitov .ret_type = RET_INTEGER, 90d0003ec0SAlexei Starovoitov .arg1_type = ARG_CONST_MAP_PTR, 91d0003ec0SAlexei Starovoitov .arg2_type = ARG_PTR_TO_MAP_KEY, 92d0003ec0SAlexei Starovoitov }; 9303e69b50SDaniel Borkmann 9403e69b50SDaniel Borkmann static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 9503e69b50SDaniel Borkmann { 9603e69b50SDaniel Borkmann return prandom_u32(); 9703e69b50SDaniel Borkmann } 9803e69b50SDaniel Borkmann 9903e69b50SDaniel Borkmann const struct bpf_func_proto bpf_get_prandom_u32_proto = { 10003e69b50SDaniel Borkmann .func = bpf_get_prandom_u32, 10103e69b50SDaniel Borkmann .gpl_only = false, 10203e69b50SDaniel Borkmann .ret_type = RET_INTEGER, 10303e69b50SDaniel Borkmann }; 104c04167ceSDaniel Borkmann 105c04167ceSDaniel Borkmann static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 106c04167ceSDaniel Borkmann { 107c04167ceSDaniel Borkmann return raw_smp_processor_id(); 108c04167ceSDaniel Borkmann } 109c04167ceSDaniel Borkmann 110c04167ceSDaniel Borkmann const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 111c04167ceSDaniel Borkmann .func = bpf_get_smp_processor_id, 112c04167ceSDaniel Borkmann .gpl_only = false, 113c04167ceSDaniel Borkmann .ret_type = RET_INTEGER, 114c04167ceSDaniel Borkmann }; 115*17ca8cbfSDaniel Borkmann 116*17ca8cbfSDaniel Borkmann static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 117*17ca8cbfSDaniel Borkmann { 118*17ca8cbfSDaniel Borkmann /* NMI safe access to clock monotonic */ 119*17ca8cbfSDaniel Borkmann return ktime_get_mono_fast_ns(); 120*17ca8cbfSDaniel Borkmann } 121*17ca8cbfSDaniel Borkmann 122*17ca8cbfSDaniel Borkmann const struct bpf_func_proto bpf_ktime_get_ns_proto = { 123*17ca8cbfSDaniel Borkmann .func = bpf_ktime_get_ns, 124*17ca8cbfSDaniel Borkmann .gpl_only = true, 125*17ca8cbfSDaniel Borkmann .ret_type = RET_INTEGER, 126*17ca8cbfSDaniel Borkmann }; 127