1*053c8e1fSDaniel Borkmann // SPDX-License-Identifier: GPL-2.0 2*053c8e1fSDaniel Borkmann /* Copyright (c) 2023 Isovalent */ 3*053c8e1fSDaniel Borkmann 4*053c8e1fSDaniel Borkmann #include <linux/bpf.h> 5*053c8e1fSDaniel Borkmann #include <linux/bpf_mprog.h> 6*053c8e1fSDaniel Borkmann 7*053c8e1fSDaniel Borkmann static int bpf_mprog_link(struct bpf_tuple *tuple, 8*053c8e1fSDaniel Borkmann u32 id_or_fd, u32 flags, 9*053c8e1fSDaniel Borkmann enum bpf_prog_type type) 10*053c8e1fSDaniel Borkmann { 11*053c8e1fSDaniel Borkmann struct bpf_link *link = ERR_PTR(-EINVAL); 12*053c8e1fSDaniel Borkmann bool id = flags & BPF_F_ID; 13*053c8e1fSDaniel Borkmann 14*053c8e1fSDaniel Borkmann if (id) 15*053c8e1fSDaniel Borkmann link = bpf_link_by_id(id_or_fd); 16*053c8e1fSDaniel Borkmann else if (id_or_fd) 17*053c8e1fSDaniel Borkmann link = bpf_link_get_from_fd(id_or_fd); 18*053c8e1fSDaniel Borkmann if (IS_ERR(link)) 19*053c8e1fSDaniel Borkmann return PTR_ERR(link); 20*053c8e1fSDaniel Borkmann if (type && link->prog->type != type) { 21*053c8e1fSDaniel Borkmann bpf_link_put(link); 22*053c8e1fSDaniel Borkmann return -EINVAL; 23*053c8e1fSDaniel Borkmann } 24*053c8e1fSDaniel Borkmann 25*053c8e1fSDaniel Borkmann tuple->link = link; 26*053c8e1fSDaniel Borkmann tuple->prog = link->prog; 27*053c8e1fSDaniel Borkmann return 0; 28*053c8e1fSDaniel Borkmann } 29*053c8e1fSDaniel Borkmann 30*053c8e1fSDaniel Borkmann static int bpf_mprog_prog(struct bpf_tuple *tuple, 31*053c8e1fSDaniel Borkmann u32 id_or_fd, u32 flags, 32*053c8e1fSDaniel Borkmann enum bpf_prog_type type) 33*053c8e1fSDaniel Borkmann { 34*053c8e1fSDaniel Borkmann struct bpf_prog *prog = ERR_PTR(-EINVAL); 35*053c8e1fSDaniel Borkmann bool id = flags & BPF_F_ID; 36*053c8e1fSDaniel Borkmann 37*053c8e1fSDaniel Borkmann if (id) 38*053c8e1fSDaniel Borkmann prog = bpf_prog_by_id(id_or_fd); 39*053c8e1fSDaniel Borkmann else if (id_or_fd) 40*053c8e1fSDaniel Borkmann prog = bpf_prog_get(id_or_fd); 41*053c8e1fSDaniel Borkmann if (IS_ERR(prog)) 42*053c8e1fSDaniel Borkmann return PTR_ERR(prog); 43*053c8e1fSDaniel Borkmann if (type && prog->type != type) { 44*053c8e1fSDaniel Borkmann bpf_prog_put(prog); 45*053c8e1fSDaniel Borkmann return -EINVAL; 46*053c8e1fSDaniel Borkmann } 47*053c8e1fSDaniel Borkmann 48*053c8e1fSDaniel Borkmann tuple->link = NULL; 49*053c8e1fSDaniel Borkmann tuple->prog = prog; 50*053c8e1fSDaniel Borkmann return 0; 51*053c8e1fSDaniel Borkmann } 52*053c8e1fSDaniel Borkmann 53*053c8e1fSDaniel Borkmann static int bpf_mprog_tuple_relative(struct bpf_tuple *tuple, 54*053c8e1fSDaniel Borkmann u32 id_or_fd, u32 flags, 55*053c8e1fSDaniel Borkmann enum bpf_prog_type type) 56*053c8e1fSDaniel Borkmann { 57*053c8e1fSDaniel Borkmann bool link = flags & BPF_F_LINK; 58*053c8e1fSDaniel Borkmann bool id = flags & BPF_F_ID; 59*053c8e1fSDaniel Borkmann 60*053c8e1fSDaniel Borkmann memset(tuple, 0, sizeof(*tuple)); 61*053c8e1fSDaniel Borkmann if (link) 62*053c8e1fSDaniel Borkmann return bpf_mprog_link(tuple, id_or_fd, flags, type); 63*053c8e1fSDaniel Borkmann /* If no relevant flag is set and no id_or_fd was passed, then 64*053c8e1fSDaniel Borkmann * tuple link/prog is just NULLed. This is the case when before/ 65*053c8e1fSDaniel Borkmann * after selects first/last position without passing fd. 66*053c8e1fSDaniel Borkmann */ 67*053c8e1fSDaniel Borkmann if (!id && !id_or_fd) 68*053c8e1fSDaniel Borkmann return 0; 69*053c8e1fSDaniel Borkmann return bpf_mprog_prog(tuple, id_or_fd, flags, type); 70*053c8e1fSDaniel Borkmann } 71*053c8e1fSDaniel Borkmann 72*053c8e1fSDaniel Borkmann static void bpf_mprog_tuple_put(struct bpf_tuple *tuple) 73*053c8e1fSDaniel Borkmann { 74*053c8e1fSDaniel Borkmann if (tuple->link) 75*053c8e1fSDaniel Borkmann bpf_link_put(tuple->link); 76*053c8e1fSDaniel Borkmann else if (tuple->prog) 77*053c8e1fSDaniel Borkmann bpf_prog_put(tuple->prog); 78*053c8e1fSDaniel Borkmann } 79*053c8e1fSDaniel Borkmann 80*053c8e1fSDaniel Borkmann /* The bpf_mprog_{replace,delete}() operate on exact idx position with the 81*053c8e1fSDaniel Borkmann * one exception that for deletion we support delete from front/back. In 82*053c8e1fSDaniel Borkmann * case of front idx is -1, in case of back idx is bpf_mprog_total(entry). 83*053c8e1fSDaniel Borkmann * Adjustment to first and last entry is trivial. The bpf_mprog_insert() 84*053c8e1fSDaniel Borkmann * we have to deal with the following cases: 85*053c8e1fSDaniel Borkmann * 86*053c8e1fSDaniel Borkmann * idx + before: 87*053c8e1fSDaniel Borkmann * 88*053c8e1fSDaniel Borkmann * Insert P4 before P3: idx for old array is 1, idx for new array is 2, 89*053c8e1fSDaniel Borkmann * hence we adjust target idx for the new array, so that memmove copies 90*053c8e1fSDaniel Borkmann * P1 and P2 to the new entry, and we insert P4 into idx 2. Inserting 91*053c8e1fSDaniel Borkmann * before P1 would have old idx -1 and new idx 0. 92*053c8e1fSDaniel Borkmann * 93*053c8e1fSDaniel Borkmann * +--+--+--+ +--+--+--+--+ +--+--+--+--+ 94*053c8e1fSDaniel Borkmann * |P1|P2|P3| ==> |P1|P2| |P3| ==> |P1|P2|P4|P3| 95*053c8e1fSDaniel Borkmann * +--+--+--+ +--+--+--+--+ +--+--+--+--+ 96*053c8e1fSDaniel Borkmann * 97*053c8e1fSDaniel Borkmann * idx + after: 98*053c8e1fSDaniel Borkmann * 99*053c8e1fSDaniel Borkmann * Insert P4 after P2: idx for old array is 2, idx for new array is 2. 100*053c8e1fSDaniel Borkmann * Again, memmove copies P1 and P2 to the new entry, and we insert P4 101*053c8e1fSDaniel Borkmann * into idx 2. Inserting after P3 would have both old/new idx at 4 aka 102*053c8e1fSDaniel Borkmann * bpf_mprog_total(entry). 103*053c8e1fSDaniel Borkmann * 104*053c8e1fSDaniel Borkmann * +--+--+--+ +--+--+--+--+ +--+--+--+--+ 105*053c8e1fSDaniel Borkmann * |P1|P2|P3| ==> |P1|P2| |P3| ==> |P1|P2|P4|P3| 106*053c8e1fSDaniel Borkmann * +--+--+--+ +--+--+--+--+ +--+--+--+--+ 107*053c8e1fSDaniel Borkmann */ 108*053c8e1fSDaniel Borkmann static int bpf_mprog_replace(struct bpf_mprog_entry *entry, 109*053c8e1fSDaniel Borkmann struct bpf_mprog_entry **entry_new, 110*053c8e1fSDaniel Borkmann struct bpf_tuple *ntuple, int idx) 111*053c8e1fSDaniel Borkmann { 112*053c8e1fSDaniel Borkmann struct bpf_mprog_fp *fp; 113*053c8e1fSDaniel Borkmann struct bpf_mprog_cp *cp; 114*053c8e1fSDaniel Borkmann struct bpf_prog *oprog; 115*053c8e1fSDaniel Borkmann 116*053c8e1fSDaniel Borkmann bpf_mprog_read(entry, idx, &fp, &cp); 117*053c8e1fSDaniel Borkmann oprog = READ_ONCE(fp->prog); 118*053c8e1fSDaniel Borkmann bpf_mprog_write(fp, cp, ntuple); 119*053c8e1fSDaniel Borkmann if (!ntuple->link) { 120*053c8e1fSDaniel Borkmann WARN_ON_ONCE(cp->link); 121*053c8e1fSDaniel Borkmann bpf_prog_put(oprog); 122*053c8e1fSDaniel Borkmann } 123*053c8e1fSDaniel Borkmann *entry_new = entry; 124*053c8e1fSDaniel Borkmann return 0; 125*053c8e1fSDaniel Borkmann } 126*053c8e1fSDaniel Borkmann 127*053c8e1fSDaniel Borkmann static int bpf_mprog_insert(struct bpf_mprog_entry *entry, 128*053c8e1fSDaniel Borkmann struct bpf_mprog_entry **entry_new, 129*053c8e1fSDaniel Borkmann struct bpf_tuple *ntuple, int idx, u32 flags) 130*053c8e1fSDaniel Borkmann { 131*053c8e1fSDaniel Borkmann int total = bpf_mprog_total(entry); 132*053c8e1fSDaniel Borkmann struct bpf_mprog_entry *peer; 133*053c8e1fSDaniel Borkmann struct bpf_mprog_fp *fp; 134*053c8e1fSDaniel Borkmann struct bpf_mprog_cp *cp; 135*053c8e1fSDaniel Borkmann 136*053c8e1fSDaniel Borkmann peer = bpf_mprog_peer(entry); 137*053c8e1fSDaniel Borkmann bpf_mprog_entry_copy(peer, entry); 138*053c8e1fSDaniel Borkmann if (idx == total) 139*053c8e1fSDaniel Borkmann goto insert; 140*053c8e1fSDaniel Borkmann else if (flags & BPF_F_BEFORE) 141*053c8e1fSDaniel Borkmann idx += 1; 142*053c8e1fSDaniel Borkmann bpf_mprog_entry_grow(peer, idx); 143*053c8e1fSDaniel Borkmann insert: 144*053c8e1fSDaniel Borkmann bpf_mprog_read(peer, idx, &fp, &cp); 145*053c8e1fSDaniel Borkmann bpf_mprog_write(fp, cp, ntuple); 146*053c8e1fSDaniel Borkmann bpf_mprog_inc(peer); 147*053c8e1fSDaniel Borkmann *entry_new = peer; 148*053c8e1fSDaniel Borkmann return 0; 149*053c8e1fSDaniel Borkmann } 150*053c8e1fSDaniel Borkmann 151*053c8e1fSDaniel Borkmann static int bpf_mprog_delete(struct bpf_mprog_entry *entry, 152*053c8e1fSDaniel Borkmann struct bpf_mprog_entry **entry_new, 153*053c8e1fSDaniel Borkmann struct bpf_tuple *dtuple, int idx) 154*053c8e1fSDaniel Borkmann { 155*053c8e1fSDaniel Borkmann int total = bpf_mprog_total(entry); 156*053c8e1fSDaniel Borkmann struct bpf_mprog_entry *peer; 157*053c8e1fSDaniel Borkmann 158*053c8e1fSDaniel Borkmann peer = bpf_mprog_peer(entry); 159*053c8e1fSDaniel Borkmann bpf_mprog_entry_copy(peer, entry); 160*053c8e1fSDaniel Borkmann if (idx == -1) 161*053c8e1fSDaniel Borkmann idx = 0; 162*053c8e1fSDaniel Borkmann else if (idx == total) 163*053c8e1fSDaniel Borkmann idx = total - 1; 164*053c8e1fSDaniel Borkmann bpf_mprog_entry_shrink(peer, idx); 165*053c8e1fSDaniel Borkmann bpf_mprog_dec(peer); 166*053c8e1fSDaniel Borkmann bpf_mprog_mark_for_release(peer, dtuple); 167*053c8e1fSDaniel Borkmann *entry_new = peer; 168*053c8e1fSDaniel Borkmann return 0; 169*053c8e1fSDaniel Borkmann } 170*053c8e1fSDaniel Borkmann 171*053c8e1fSDaniel Borkmann /* In bpf_mprog_pos_*() we evaluate the target position for the BPF 172*053c8e1fSDaniel Borkmann * program/link that needs to be replaced, inserted or deleted for 173*053c8e1fSDaniel Borkmann * each "rule" independently. If all rules agree on that position 174*053c8e1fSDaniel Borkmann * or existing element, then enact replacement, addition or deletion. 175*053c8e1fSDaniel Borkmann * If this is not the case, then the request cannot be satisfied and 176*053c8e1fSDaniel Borkmann * we bail out with an error. 177*053c8e1fSDaniel Borkmann */ 178*053c8e1fSDaniel Borkmann static int bpf_mprog_pos_exact(struct bpf_mprog_entry *entry, 179*053c8e1fSDaniel Borkmann struct bpf_tuple *tuple) 180*053c8e1fSDaniel Borkmann { 181*053c8e1fSDaniel Borkmann struct bpf_mprog_fp *fp; 182*053c8e1fSDaniel Borkmann struct bpf_mprog_cp *cp; 183*053c8e1fSDaniel Borkmann int i; 184*053c8e1fSDaniel Borkmann 185*053c8e1fSDaniel Borkmann for (i = 0; i < bpf_mprog_total(entry); i++) { 186*053c8e1fSDaniel Borkmann bpf_mprog_read(entry, i, &fp, &cp); 187*053c8e1fSDaniel Borkmann if (tuple->prog == READ_ONCE(fp->prog)) 188*053c8e1fSDaniel Borkmann return tuple->link == cp->link ? i : -EBUSY; 189*053c8e1fSDaniel Borkmann } 190*053c8e1fSDaniel Borkmann return -ENOENT; 191*053c8e1fSDaniel Borkmann } 192*053c8e1fSDaniel Borkmann 193*053c8e1fSDaniel Borkmann static int bpf_mprog_pos_before(struct bpf_mprog_entry *entry, 194*053c8e1fSDaniel Borkmann struct bpf_tuple *tuple) 195*053c8e1fSDaniel Borkmann { 196*053c8e1fSDaniel Borkmann struct bpf_mprog_fp *fp; 197*053c8e1fSDaniel Borkmann struct bpf_mprog_cp *cp; 198*053c8e1fSDaniel Borkmann int i; 199*053c8e1fSDaniel Borkmann 200*053c8e1fSDaniel Borkmann for (i = 0; i < bpf_mprog_total(entry); i++) { 201*053c8e1fSDaniel Borkmann bpf_mprog_read(entry, i, &fp, &cp); 202*053c8e1fSDaniel Borkmann if (tuple->prog == READ_ONCE(fp->prog) && 203*053c8e1fSDaniel Borkmann (!tuple->link || tuple->link == cp->link)) 204*053c8e1fSDaniel Borkmann return i - 1; 205*053c8e1fSDaniel Borkmann } 206*053c8e1fSDaniel Borkmann return tuple->prog ? -ENOENT : -1; 207*053c8e1fSDaniel Borkmann } 208*053c8e1fSDaniel Borkmann 209*053c8e1fSDaniel Borkmann static int bpf_mprog_pos_after(struct bpf_mprog_entry *entry, 210*053c8e1fSDaniel Borkmann struct bpf_tuple *tuple) 211*053c8e1fSDaniel Borkmann { 212*053c8e1fSDaniel Borkmann struct bpf_mprog_fp *fp; 213*053c8e1fSDaniel Borkmann struct bpf_mprog_cp *cp; 214*053c8e1fSDaniel Borkmann int i; 215*053c8e1fSDaniel Borkmann 216*053c8e1fSDaniel Borkmann for (i = 0; i < bpf_mprog_total(entry); i++) { 217*053c8e1fSDaniel Borkmann bpf_mprog_read(entry, i, &fp, &cp); 218*053c8e1fSDaniel Borkmann if (tuple->prog == READ_ONCE(fp->prog) && 219*053c8e1fSDaniel Borkmann (!tuple->link || tuple->link == cp->link)) 220*053c8e1fSDaniel Borkmann return i + 1; 221*053c8e1fSDaniel Borkmann } 222*053c8e1fSDaniel Borkmann return tuple->prog ? -ENOENT : bpf_mprog_total(entry); 223*053c8e1fSDaniel Borkmann } 224*053c8e1fSDaniel Borkmann 225*053c8e1fSDaniel Borkmann int bpf_mprog_attach(struct bpf_mprog_entry *entry, 226*053c8e1fSDaniel Borkmann struct bpf_mprog_entry **entry_new, 227*053c8e1fSDaniel Borkmann struct bpf_prog *prog_new, struct bpf_link *link, 228*053c8e1fSDaniel Borkmann struct bpf_prog *prog_old, 229*053c8e1fSDaniel Borkmann u32 flags, u32 id_or_fd, u64 revision) 230*053c8e1fSDaniel Borkmann { 231*053c8e1fSDaniel Borkmann struct bpf_tuple rtuple, ntuple = { 232*053c8e1fSDaniel Borkmann .prog = prog_new, 233*053c8e1fSDaniel Borkmann .link = link, 234*053c8e1fSDaniel Borkmann }, otuple = { 235*053c8e1fSDaniel Borkmann .prog = prog_old, 236*053c8e1fSDaniel Borkmann .link = link, 237*053c8e1fSDaniel Borkmann }; 238*053c8e1fSDaniel Borkmann int ret, idx = -ERANGE, tidx; 239*053c8e1fSDaniel Borkmann 240*053c8e1fSDaniel Borkmann if (revision && revision != bpf_mprog_revision(entry)) 241*053c8e1fSDaniel Borkmann return -ESTALE; 242*053c8e1fSDaniel Borkmann if (bpf_mprog_exists(entry, prog_new)) 243*053c8e1fSDaniel Borkmann return -EEXIST; 244*053c8e1fSDaniel Borkmann ret = bpf_mprog_tuple_relative(&rtuple, id_or_fd, 245*053c8e1fSDaniel Borkmann flags & ~BPF_F_REPLACE, 246*053c8e1fSDaniel Borkmann prog_new->type); 247*053c8e1fSDaniel Borkmann if (ret) 248*053c8e1fSDaniel Borkmann return ret; 249*053c8e1fSDaniel Borkmann if (flags & BPF_F_REPLACE) { 250*053c8e1fSDaniel Borkmann tidx = bpf_mprog_pos_exact(entry, &otuple); 251*053c8e1fSDaniel Borkmann if (tidx < 0) { 252*053c8e1fSDaniel Borkmann ret = tidx; 253*053c8e1fSDaniel Borkmann goto out; 254*053c8e1fSDaniel Borkmann } 255*053c8e1fSDaniel Borkmann idx = tidx; 256*053c8e1fSDaniel Borkmann } 257*053c8e1fSDaniel Borkmann if (flags & BPF_F_BEFORE) { 258*053c8e1fSDaniel Borkmann tidx = bpf_mprog_pos_before(entry, &rtuple); 259*053c8e1fSDaniel Borkmann if (tidx < -1 || (idx >= -1 && tidx != idx)) { 260*053c8e1fSDaniel Borkmann ret = tidx < -1 ? tidx : -ERANGE; 261*053c8e1fSDaniel Borkmann goto out; 262*053c8e1fSDaniel Borkmann } 263*053c8e1fSDaniel Borkmann idx = tidx; 264*053c8e1fSDaniel Borkmann } 265*053c8e1fSDaniel Borkmann if (flags & BPF_F_AFTER) { 266*053c8e1fSDaniel Borkmann tidx = bpf_mprog_pos_after(entry, &rtuple); 267*053c8e1fSDaniel Borkmann if (tidx < -1 || (idx >= -1 && tidx != idx)) { 268*053c8e1fSDaniel Borkmann ret = tidx < 0 ? tidx : -ERANGE; 269*053c8e1fSDaniel Borkmann goto out; 270*053c8e1fSDaniel Borkmann } 271*053c8e1fSDaniel Borkmann idx = tidx; 272*053c8e1fSDaniel Borkmann } 273*053c8e1fSDaniel Borkmann if (idx < -1) { 274*053c8e1fSDaniel Borkmann if (rtuple.prog || flags) { 275*053c8e1fSDaniel Borkmann ret = -EINVAL; 276*053c8e1fSDaniel Borkmann goto out; 277*053c8e1fSDaniel Borkmann } 278*053c8e1fSDaniel Borkmann idx = bpf_mprog_total(entry); 279*053c8e1fSDaniel Borkmann flags = BPF_F_AFTER; 280*053c8e1fSDaniel Borkmann } 281*053c8e1fSDaniel Borkmann if (idx >= bpf_mprog_max()) { 282*053c8e1fSDaniel Borkmann ret = -ERANGE; 283*053c8e1fSDaniel Borkmann goto out; 284*053c8e1fSDaniel Borkmann } 285*053c8e1fSDaniel Borkmann if (flags & BPF_F_REPLACE) 286*053c8e1fSDaniel Borkmann ret = bpf_mprog_replace(entry, entry_new, &ntuple, idx); 287*053c8e1fSDaniel Borkmann else 288*053c8e1fSDaniel Borkmann ret = bpf_mprog_insert(entry, entry_new, &ntuple, idx, flags); 289*053c8e1fSDaniel Borkmann out: 290*053c8e1fSDaniel Borkmann bpf_mprog_tuple_put(&rtuple); 291*053c8e1fSDaniel Borkmann return ret; 292*053c8e1fSDaniel Borkmann } 293*053c8e1fSDaniel Borkmann 294*053c8e1fSDaniel Borkmann static int bpf_mprog_fetch(struct bpf_mprog_entry *entry, 295*053c8e1fSDaniel Borkmann struct bpf_tuple *tuple, int idx) 296*053c8e1fSDaniel Borkmann { 297*053c8e1fSDaniel Borkmann int total = bpf_mprog_total(entry); 298*053c8e1fSDaniel Borkmann struct bpf_mprog_cp *cp; 299*053c8e1fSDaniel Borkmann struct bpf_mprog_fp *fp; 300*053c8e1fSDaniel Borkmann struct bpf_prog *prog; 301*053c8e1fSDaniel Borkmann struct bpf_link *link; 302*053c8e1fSDaniel Borkmann 303*053c8e1fSDaniel Borkmann if (idx == -1) 304*053c8e1fSDaniel Borkmann idx = 0; 305*053c8e1fSDaniel Borkmann else if (idx == total) 306*053c8e1fSDaniel Borkmann idx = total - 1; 307*053c8e1fSDaniel Borkmann bpf_mprog_read(entry, idx, &fp, &cp); 308*053c8e1fSDaniel Borkmann prog = READ_ONCE(fp->prog); 309*053c8e1fSDaniel Borkmann link = cp->link; 310*053c8e1fSDaniel Borkmann /* The deletion request can either be without filled tuple in which 311*053c8e1fSDaniel Borkmann * case it gets populated here based on idx, or with filled tuple 312*053c8e1fSDaniel Borkmann * where the only thing we end up doing is the WARN_ON_ONCE() assert. 313*053c8e1fSDaniel Borkmann * If we hit a BPF link at the given index, it must not be removed 314*053c8e1fSDaniel Borkmann * from opts path. 315*053c8e1fSDaniel Borkmann */ 316*053c8e1fSDaniel Borkmann if (link && !tuple->link) 317*053c8e1fSDaniel Borkmann return -EBUSY; 318*053c8e1fSDaniel Borkmann WARN_ON_ONCE(tuple->prog && tuple->prog != prog); 319*053c8e1fSDaniel Borkmann WARN_ON_ONCE(tuple->link && tuple->link != link); 320*053c8e1fSDaniel Borkmann tuple->prog = prog; 321*053c8e1fSDaniel Borkmann tuple->link = link; 322*053c8e1fSDaniel Borkmann return 0; 323*053c8e1fSDaniel Borkmann } 324*053c8e1fSDaniel Borkmann 325*053c8e1fSDaniel Borkmann int bpf_mprog_detach(struct bpf_mprog_entry *entry, 326*053c8e1fSDaniel Borkmann struct bpf_mprog_entry **entry_new, 327*053c8e1fSDaniel Borkmann struct bpf_prog *prog, struct bpf_link *link, 328*053c8e1fSDaniel Borkmann u32 flags, u32 id_or_fd, u64 revision) 329*053c8e1fSDaniel Borkmann { 330*053c8e1fSDaniel Borkmann struct bpf_tuple rtuple, dtuple = { 331*053c8e1fSDaniel Borkmann .prog = prog, 332*053c8e1fSDaniel Borkmann .link = link, 333*053c8e1fSDaniel Borkmann }; 334*053c8e1fSDaniel Borkmann int ret, idx = -ERANGE, tidx; 335*053c8e1fSDaniel Borkmann 336*053c8e1fSDaniel Borkmann if (flags & BPF_F_REPLACE) 337*053c8e1fSDaniel Borkmann return -EINVAL; 338*053c8e1fSDaniel Borkmann if (revision && revision != bpf_mprog_revision(entry)) 339*053c8e1fSDaniel Borkmann return -ESTALE; 340*053c8e1fSDaniel Borkmann ret = bpf_mprog_tuple_relative(&rtuple, id_or_fd, flags, 341*053c8e1fSDaniel Borkmann prog ? prog->type : 342*053c8e1fSDaniel Borkmann BPF_PROG_TYPE_UNSPEC); 343*053c8e1fSDaniel Borkmann if (ret) 344*053c8e1fSDaniel Borkmann return ret; 345*053c8e1fSDaniel Borkmann if (dtuple.prog) { 346*053c8e1fSDaniel Borkmann tidx = bpf_mprog_pos_exact(entry, &dtuple); 347*053c8e1fSDaniel Borkmann if (tidx < 0) { 348*053c8e1fSDaniel Borkmann ret = tidx; 349*053c8e1fSDaniel Borkmann goto out; 350*053c8e1fSDaniel Borkmann } 351*053c8e1fSDaniel Borkmann idx = tidx; 352*053c8e1fSDaniel Borkmann } 353*053c8e1fSDaniel Borkmann if (flags & BPF_F_BEFORE) { 354*053c8e1fSDaniel Borkmann tidx = bpf_mprog_pos_before(entry, &rtuple); 355*053c8e1fSDaniel Borkmann if (tidx < -1 || (idx >= -1 && tidx != idx)) { 356*053c8e1fSDaniel Borkmann ret = tidx < -1 ? tidx : -ERANGE; 357*053c8e1fSDaniel Borkmann goto out; 358*053c8e1fSDaniel Borkmann } 359*053c8e1fSDaniel Borkmann idx = tidx; 360*053c8e1fSDaniel Borkmann } 361*053c8e1fSDaniel Borkmann if (flags & BPF_F_AFTER) { 362*053c8e1fSDaniel Borkmann tidx = bpf_mprog_pos_after(entry, &rtuple); 363*053c8e1fSDaniel Borkmann if (tidx < -1 || (idx >= -1 && tidx != idx)) { 364*053c8e1fSDaniel Borkmann ret = tidx < 0 ? tidx : -ERANGE; 365*053c8e1fSDaniel Borkmann goto out; 366*053c8e1fSDaniel Borkmann } 367*053c8e1fSDaniel Borkmann idx = tidx; 368*053c8e1fSDaniel Borkmann } 369*053c8e1fSDaniel Borkmann if (idx < -1) { 370*053c8e1fSDaniel Borkmann if (rtuple.prog || flags) { 371*053c8e1fSDaniel Borkmann ret = -EINVAL; 372*053c8e1fSDaniel Borkmann goto out; 373*053c8e1fSDaniel Borkmann } 374*053c8e1fSDaniel Borkmann idx = bpf_mprog_total(entry); 375*053c8e1fSDaniel Borkmann flags = BPF_F_AFTER; 376*053c8e1fSDaniel Borkmann } 377*053c8e1fSDaniel Borkmann if (idx >= bpf_mprog_max()) { 378*053c8e1fSDaniel Borkmann ret = -ERANGE; 379*053c8e1fSDaniel Borkmann goto out; 380*053c8e1fSDaniel Borkmann } 381*053c8e1fSDaniel Borkmann ret = bpf_mprog_fetch(entry, &dtuple, idx); 382*053c8e1fSDaniel Borkmann if (ret) 383*053c8e1fSDaniel Borkmann goto out; 384*053c8e1fSDaniel Borkmann ret = bpf_mprog_delete(entry, entry_new, &dtuple, idx); 385*053c8e1fSDaniel Borkmann out: 386*053c8e1fSDaniel Borkmann bpf_mprog_tuple_put(&rtuple); 387*053c8e1fSDaniel Borkmann return ret; 388*053c8e1fSDaniel Borkmann } 389*053c8e1fSDaniel Borkmann 390*053c8e1fSDaniel Borkmann int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr, 391*053c8e1fSDaniel Borkmann struct bpf_mprog_entry *entry) 392*053c8e1fSDaniel Borkmann { 393*053c8e1fSDaniel Borkmann u32 __user *uprog_flags, *ulink_flags; 394*053c8e1fSDaniel Borkmann u32 __user *uprog_id, *ulink_id; 395*053c8e1fSDaniel Borkmann struct bpf_mprog_fp *fp; 396*053c8e1fSDaniel Borkmann struct bpf_mprog_cp *cp; 397*053c8e1fSDaniel Borkmann struct bpf_prog *prog; 398*053c8e1fSDaniel Borkmann const u32 flags = 0; 399*053c8e1fSDaniel Borkmann int i, ret = 0; 400*053c8e1fSDaniel Borkmann u32 id, count; 401*053c8e1fSDaniel Borkmann u64 revision; 402*053c8e1fSDaniel Borkmann 403*053c8e1fSDaniel Borkmann if (attr->query.query_flags || attr->query.attach_flags) 404*053c8e1fSDaniel Borkmann return -EINVAL; 405*053c8e1fSDaniel Borkmann revision = bpf_mprog_revision(entry); 406*053c8e1fSDaniel Borkmann count = bpf_mprog_total(entry); 407*053c8e1fSDaniel Borkmann if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 408*053c8e1fSDaniel Borkmann return -EFAULT; 409*053c8e1fSDaniel Borkmann if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision))) 410*053c8e1fSDaniel Borkmann return -EFAULT; 411*053c8e1fSDaniel Borkmann if (copy_to_user(&uattr->query.count, &count, sizeof(count))) 412*053c8e1fSDaniel Borkmann return -EFAULT; 413*053c8e1fSDaniel Borkmann uprog_id = u64_to_user_ptr(attr->query.prog_ids); 414*053c8e1fSDaniel Borkmann uprog_flags = u64_to_user_ptr(attr->query.prog_attach_flags); 415*053c8e1fSDaniel Borkmann ulink_id = u64_to_user_ptr(attr->query.link_ids); 416*053c8e1fSDaniel Borkmann ulink_flags = u64_to_user_ptr(attr->query.link_attach_flags); 417*053c8e1fSDaniel Borkmann if (attr->query.count == 0 || !uprog_id || !count) 418*053c8e1fSDaniel Borkmann return 0; 419*053c8e1fSDaniel Borkmann if (attr->query.count < count) { 420*053c8e1fSDaniel Borkmann count = attr->query.count; 421*053c8e1fSDaniel Borkmann ret = -ENOSPC; 422*053c8e1fSDaniel Borkmann } 423*053c8e1fSDaniel Borkmann for (i = 0; i < bpf_mprog_max(); i++) { 424*053c8e1fSDaniel Borkmann bpf_mprog_read(entry, i, &fp, &cp); 425*053c8e1fSDaniel Borkmann prog = READ_ONCE(fp->prog); 426*053c8e1fSDaniel Borkmann if (!prog) 427*053c8e1fSDaniel Borkmann break; 428*053c8e1fSDaniel Borkmann id = prog->aux->id; 429*053c8e1fSDaniel Borkmann if (copy_to_user(uprog_id + i, &id, sizeof(id))) 430*053c8e1fSDaniel Borkmann return -EFAULT; 431*053c8e1fSDaniel Borkmann if (uprog_flags && 432*053c8e1fSDaniel Borkmann copy_to_user(uprog_flags + i, &flags, sizeof(flags))) 433*053c8e1fSDaniel Borkmann return -EFAULT; 434*053c8e1fSDaniel Borkmann id = cp->link ? cp->link->id : 0; 435*053c8e1fSDaniel Borkmann if (ulink_id && 436*053c8e1fSDaniel Borkmann copy_to_user(ulink_id + i, &id, sizeof(id))) 437*053c8e1fSDaniel Borkmann return -EFAULT; 438*053c8e1fSDaniel Borkmann if (ulink_flags && 439*053c8e1fSDaniel Borkmann copy_to_user(ulink_flags + i, &flags, sizeof(flags))) 440*053c8e1fSDaniel Borkmann return -EFAULT; 441*053c8e1fSDaniel Borkmann if (i + 1 == count) 442*053c8e1fSDaniel Borkmann break; 443*053c8e1fSDaniel Borkmann } 444*053c8e1fSDaniel Borkmann return ret; 445*053c8e1fSDaniel Borkmann } 446