1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/bpf.h> 5 #include <linux/bitops.h> 6 #include <linux/bug.h> 7 #include <linux/jiffies.h> 8 #include <linux/skbuff.h> 9 10 #include "../ccm.h" 11 #include "../nfp_app.h" 12 #include "../nfp_net.h" 13 #include "fw.h" 14 #include "main.h" 15 16 static struct sk_buff * 17 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) 18 { 19 struct sk_buff *skb; 20 21 skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL); 22 skb_put(skb, size); 23 24 return skb; 25 } 26 27 static unsigned int 28 nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n) 29 { 30 unsigned int size; 31 32 size = sizeof(struct cmsg_req_map_op); 33 size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n; 34 35 return size; 36 } 37 38 static struct sk_buff * 39 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) 40 { 41 return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n)); 42 } 43 44 static unsigned int 45 nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n) 46 { 47 unsigned int size; 48 49 size = sizeof(struct cmsg_reply_map_op); 50 size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n; 51 52 return size; 53 } 54 55 static int 56 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf, 57 struct cmsg_reply_map_simple *reply) 58 { 59 static const int res_table[] = { 60 [CMSG_RC_SUCCESS] = 0, 61 [CMSG_RC_ERR_MAP_FD] = -EBADFD, 62 [CMSG_RC_ERR_MAP_NOENT] = -ENOENT, 63 [CMSG_RC_ERR_MAP_ERR] = -EINVAL, 64 [CMSG_RC_ERR_MAP_PARSE] = -EIO, 65 [CMSG_RC_ERR_MAP_EXIST] = -EEXIST, 66 [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM, 67 [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG, 68 }; 69 u32 rc; 70 71 rc = be32_to_cpu(reply->rc); 72 if (rc >= ARRAY_SIZE(res_table)) { 73 cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc); 74 return -EIO; 75 } 76 77 return res_table[rc]; 78 } 79 80 long long int 81 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map) 82 { 83 struct cmsg_reply_map_alloc_tbl *reply; 84 struct cmsg_req_map_alloc_tbl *req; 85 struct sk_buff *skb; 86 u32 tid; 87 int err; 88 89 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req)); 90 if (!skb) 91 return -ENOMEM; 92 93 req = (void *)skb->data; 94 req->key_size = cpu_to_be32(map->key_size); 95 req->value_size = cpu_to_be32(map->value_size); 96 req->max_entries = cpu_to_be32(map->max_entries); 97 req->map_type = cpu_to_be32(map->map_type); 98 req->map_flags = 0; 99 100 skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC, 101 sizeof(*reply)); 102 if (IS_ERR(skb)) 103 return PTR_ERR(skb); 104 105 reply = (void *)skb->data; 106 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); 107 if (err) 108 goto err_free; 109 110 tid = be32_to_cpu(reply->tid); 111 dev_consume_skb_any(skb); 112 113 return tid; 114 err_free: 115 dev_kfree_skb_any(skb); 116 return err; 117 } 118 119 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map) 120 { 121 struct cmsg_reply_map_free_tbl *reply; 122 struct cmsg_req_map_free_tbl *req; 123 struct sk_buff *skb; 124 int err; 125 126 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req)); 127 if (!skb) { 128 cmsg_warn(bpf, "leaking map - failed to allocate msg\n"); 129 return; 130 } 131 132 req = (void *)skb->data; 133 req->tid = cpu_to_be32(nfp_map->tid); 134 135 skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE, 136 sizeof(*reply)); 137 if (IS_ERR(skb)) { 138 cmsg_warn(bpf, "leaking map - I/O error\n"); 139 return; 140 } 141 142 reply = (void *)skb->data; 143 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); 144 if (err) 145 cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err); 146 147 dev_consume_skb_any(skb); 148 } 149 150 static void * 151 nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req, 152 unsigned int n) 153 { 154 return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n]; 155 } 156 157 static void * 158 nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req, 159 unsigned int n) 160 { 161 return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; 162 } 163 164 static void * 165 nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, 166 unsigned int n) 167 { 168 return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n]; 169 } 170 171 static void * 172 nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, 173 unsigned int n) 174 { 175 return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; 176 } 177 178 static int 179 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op, 180 u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value) 181 { 182 struct nfp_bpf_map *nfp_map = offmap->dev_priv; 183 struct nfp_app_bpf *bpf = nfp_map->bpf; 184 struct bpf_map *map = &offmap->map; 185 struct cmsg_reply_map_op *reply; 186 struct cmsg_req_map_op *req; 187 struct sk_buff *skb; 188 int err; 189 190 /* FW messages have no space for more than 32 bits of flags */ 191 if (flags >> 32) 192 return -EOPNOTSUPP; 193 194 skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1); 195 if (!skb) 196 return -ENOMEM; 197 198 req = (void *)skb->data; 199 req->tid = cpu_to_be32(nfp_map->tid); 200 req->count = cpu_to_be32(1); 201 req->flags = cpu_to_be32(flags); 202 203 /* Copy inputs */ 204 if (key) 205 memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size); 206 if (value) 207 memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value, 208 map->value_size); 209 210 skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 211 nfp_bpf_cmsg_map_reply_size(bpf, 1)); 212 if (IS_ERR(skb)) 213 return PTR_ERR(skb); 214 215 reply = (void *)skb->data; 216 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); 217 if (err) 218 goto err_free; 219 220 /* Copy outputs */ 221 if (out_key) 222 memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0), 223 map->key_size); 224 if (out_value) 225 memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0), 226 map->value_size); 227 228 dev_consume_skb_any(skb); 229 230 return 0; 231 err_free: 232 dev_kfree_skb_any(skb); 233 return err; 234 } 235 236 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap, 237 void *key, void *value, u64 flags) 238 { 239 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE, 240 key, value, flags, NULL, NULL); 241 } 242 243 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key) 244 { 245 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE, 246 key, NULL, 0, NULL, NULL); 247 } 248 249 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, 250 void *key, void *value) 251 { 252 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP, 253 key, NULL, 0, NULL, value); 254 } 255 256 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap, 257 void *next_key) 258 { 259 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST, 260 NULL, NULL, 0, next_key, NULL); 261 } 262 263 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, 264 void *key, void *next_key) 265 { 266 return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT, 267 key, NULL, 0, next_key, NULL); 268 } 269 270 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf) 271 { 272 return max3((unsigned int)NFP_NET_DEFAULT_MTU, 273 nfp_bpf_cmsg_map_req_size(bpf, 1), 274 nfp_bpf_cmsg_map_reply_size(bpf, 1)); 275 } 276 277 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) 278 { 279 struct nfp_app_bpf *bpf = app->priv; 280 281 if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) { 282 cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len); 283 dev_kfree_skb_any(skb); 284 return; 285 } 286 287 if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) { 288 if (!nfp_bpf_event_output(bpf, skb->data, skb->len)) 289 dev_consume_skb_any(skb); 290 else 291 dev_kfree_skb_any(skb); 292 } 293 294 nfp_ccm_rx(&bpf->ccm, skb); 295 } 296 297 void 298 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len) 299 { 300 const struct nfp_ccm_hdr *hdr = data; 301 struct nfp_app_bpf *bpf = app->priv; 302 303 if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) { 304 cmsg_warn(bpf, "cmsg drop - too short %d!\n", len); 305 return; 306 } 307 308 if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT) 309 nfp_bpf_event_output(bpf, data, len); 310 else 311 cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n", 312 hdr->type); 313 } 314