1 // NOLINTNEXTLINE(bugprone-reserved-identifier,cert-dcl37-c,cert-dcl51-cpp) 2 #define _GNU_SOURCE 3 #include "libpldm/instance-id.h" 4 #include "libpldm/pldm.h" 5 #include <errno.h> 6 #include <fcntl.h> 7 #include <stdlib.h> 8 #include <unistd.h> 9 10 #define BIT(i) (1UL << (i)) 11 12 #define PLDM_TID_MAX 256 13 #define PLDM_INST_ID_MAX 32 14 15 /* We need to track our allocations explicitly due to OFD lock merging/splitting 16 */ 17 struct pldm_tid_state { 18 pldm_instance_id_t prev; 19 uint32_t allocations; 20 }; 21 22 struct pldm_instance_db { 23 struct pldm_tid_state state[PLDM_TID_MAX]; 24 int lock_db_fd; 25 }; 26 27 static inline int iid_next(pldm_instance_id_t cur) 28 { 29 return (cur + 1) % PLDM_INST_ID_MAX; 30 } 31 32 int pldm_instance_db_init(struct pldm_instance_db **ctx, const char *dbpath) 33 { 34 struct pldm_instance_db *l_ctx; 35 36 /* Make sure the provided pointer was initialised to NULL. In the future 37 * if we stabilise the ABI and expose the struct definition the caller 38 * can potentially pass a valid pointer to a struct they've allocated 39 */ 40 if (!ctx || *ctx) { 41 return -EINVAL; 42 } 43 44 l_ctx = calloc(1, sizeof(struct pldm_instance_db)); 45 if (!l_ctx) { 46 return -ENOMEM; 47 } 48 49 /* Initialise previous ID values so the next one is zero */ 50 for (int i = 0; i < PLDM_TID_MAX; i++) { 51 l_ctx->state[i].prev = 31; 52 } 53 54 /* Lock database may be read-only, either by permissions or mountpoint 55 */ 56 l_ctx->lock_db_fd = open(dbpath, O_RDONLY | O_CLOEXEC); 57 if (l_ctx->lock_db_fd < 0) { 58 free(l_ctx); 59 return -errno; 60 } 61 *ctx = l_ctx; 62 63 return 0; 64 } 65 66 int pldm_instance_db_init_default(struct pldm_instance_db **ctx) 67 { 68 return pldm_instance_db_init(ctx, 69 "/usr/share/libpldm/instance-db/default"); 70 } 71 72 int pldm_instance_db_destroy(struct pldm_instance_db *ctx) 73 { 74 if (!ctx) { 75 return 0; 76 } 77 close(ctx->lock_db_fd); 78 free(ctx); 79 return 0; 80 } 81 82 int pldm_instance_id_alloc(struct pldm_instance_db *ctx, pldm_tid_t tid, 83 pldm_instance_id_t *iid) 84 { 85 static const struct flock cfls = { 86 .l_type = F_RDLCK, 87 .l_whence = SEEK_SET, 88 .l_len = 1, 89 }; 90 static const struct flock cflx = { 91 .l_type = F_WRLCK, 92 .l_whence = SEEK_SET, 93 .l_len = 1, 94 }; 95 uint8_t l_iid; 96 97 if (!iid) { 98 return -EINVAL; 99 } 100 101 l_iid = ctx->state[tid].prev; 102 if (l_iid >= PLDM_INST_ID_MAX) { 103 return -EPROTO; 104 } 105 106 while ((l_iid = iid_next(l_iid)) != ctx->state[tid].prev) { 107 struct flock flop; 108 off_t loff; 109 int rc; 110 111 /* Have we already allocated this instance ID? */ 112 if (ctx->state[tid].allocations & BIT(l_iid)) { 113 continue; 114 } 115 116 /* Derive the instance ID offset in the lock database */ 117 loff = tid * PLDM_INST_ID_MAX + l_iid; 118 119 /* Reserving the TID's IID. Done via a shared lock */ 120 flop = cfls; 121 flop.l_start = loff; 122 rc = fcntl(ctx->lock_db_fd, F_OFD_SETLK, &flop); 123 if (rc < 0) { 124 if (errno == EAGAIN || errno == EINTR) { 125 return -EAGAIN; 126 } 127 return -EPROTO; 128 } 129 130 /* 131 * If we *may* promote the lock to exclusive then this IID is 132 * only reserved by us. This is now our allocated IID. 133 * 134 * If we *may not* promote the lock to exclusive then this IID 135 * is also reserved on another file descriptor. Move on to the 136 * next IID index. 137 * 138 * Note that we cannot actually *perform* the promotion in 139 * practice because this is prevented by the lock database being 140 * opened O_RDONLY. 141 */ 142 flop = cflx; 143 flop.l_start = loff; 144 rc = fcntl(ctx->lock_db_fd, F_OFD_GETLK, &flop); 145 if (rc < 0) { 146 if (errno == EAGAIN || errno == EINTR) { 147 return -EAGAIN; 148 } 149 return -EPROTO; 150 } 151 152 /* F_UNLCK is the type of the lock if we could successfully 153 * promote it to F_WRLCK */ 154 if (flop.l_type == F_UNLCK) { 155 ctx->state[tid].prev = l_iid; 156 ctx->state[tid].allocations |= BIT(l_iid); 157 *iid = l_iid; 158 return 0; 159 } 160 if (flop.l_type != F_RDLCK) { 161 return -EPROTO; 162 } 163 } 164 165 /* Failed to allocate an IID after a full loop. Make the caller try 166 * again */ 167 return -EAGAIN; 168 } 169 170 int pldm_instance_id_free(struct pldm_instance_db *ctx, pldm_tid_t tid, 171 pldm_instance_id_t iid) 172 { 173 static const struct flock cflu = { 174 .l_type = F_UNLCK, 175 .l_whence = SEEK_SET, 176 .l_len = 1, 177 }; 178 struct flock flop; 179 int rc; 180 181 /* Trying to free an instance ID that is not currently allocated */ 182 if (!(ctx->state[tid].allocations & BIT(iid))) { 183 return -EINVAL; 184 } 185 186 flop = cflu; 187 flop.l_start = tid * PLDM_INST_ID_MAX + iid; 188 rc = fcntl(ctx->lock_db_fd, F_OFD_SETLK, &flop); 189 if (rc < 0) { 190 if (errno == EAGAIN || errno == EINTR) { 191 return -EAGAIN; 192 } 193 return -EPROTO; 194 } 195 196 /* Mark the instance ID as no-longer allocated */ 197 ctx->state[tid].allocations &= ~BIT(iid); 198 199 return 0; 200 } 201