1 // SPDX-License-Identifier: GPL-2.0 2 /** 3 * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption 4 * 5 * Copyright (c) 2019, Ericsson AB 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <crypto/aead.h> 38 #include <crypto/aes.h> 39 #include <crypto/rng.h> 40 #include "crypto.h" 41 #include "msg.h" 42 #include "bcast.h" 43 44 #define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000) /* 5s */ 45 #define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000) /* 10s */ 46 #define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */ 47 #define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000) /* 15s */ 48 49 #define TIPC_MAX_TFMS_DEF 10 50 #define TIPC_MAX_TFMS_LIM 1000 51 52 #define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */ 53 54 /** 55 * TIPC Key ids 56 */ 57 enum { 58 KEY_MASTER = 0, 59 KEY_MIN = KEY_MASTER, 60 KEY_1 = 1, 61 KEY_2, 62 KEY_3, 63 KEY_MAX = KEY_3, 64 }; 65 66 /** 67 * TIPC Crypto statistics 68 */ 69 enum { 70 STAT_OK, 71 STAT_NOK, 72 STAT_ASYNC, 73 STAT_ASYNC_OK, 74 STAT_ASYNC_NOK, 75 STAT_BADKEYS, /* tx only */ 76 STAT_BADMSGS = STAT_BADKEYS, /* rx only */ 77 STAT_NOKEYS, 78 STAT_SWITCHES, 79 80 MAX_STATS, 81 }; 82 83 /* TIPC crypto statistics' header */ 84 static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok", 85 "async_nok", "badmsgs", "nokeys", 86 "switches"}; 87 88 /* Max TFMs number per key */ 89 int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF; 90 /* Key exchange switch, default: on */ 91 int sysctl_tipc_key_exchange_enabled __read_mostly = 1; 92 93 /** 94 * struct tipc_key - TIPC keys' status indicator 95 * 96 * 7 6 5 4 3 2 1 0 97 * +-----+-----+-----+-----+-----+-----+-----+-----+ 98 * key: | (reserved)|passive idx| active idx|pending idx| 99 * +-----+-----+-----+-----+-----+-----+-----+-----+ 100 */ 101 struct tipc_key { 102 #define KEY_BITS (2) 103 #define KEY_MASK ((1 << KEY_BITS) - 1) 104 union { 105 struct { 106 #if defined(__LITTLE_ENDIAN_BITFIELD) 107 u8 pending:2, 108 active:2, 109 passive:2, /* rx only */ 110 reserved:2; 111 #elif defined(__BIG_ENDIAN_BITFIELD) 112 u8 reserved:2, 113 passive:2, /* rx only */ 114 active:2, 115 pending:2; 116 #else 117 #error "Please fix <asm/byteorder.h>" 118 #endif 119 } __packed; 120 u8 keys; 121 }; 122 }; 123 124 /** 125 * struct tipc_tfm - TIPC TFM structure to form a list of TFMs 126 */ 127 struct tipc_tfm { 128 struct crypto_aead *tfm; 129 struct list_head list; 130 }; 131 132 /** 133 * struct tipc_aead - TIPC AEAD key structure 134 * @tfm_entry: per-cpu pointer to one entry in TFM list 135 * @crypto: TIPC crypto owns this key 136 * @cloned: reference to the source key in case cloning 137 * @users: the number of the key users (TX/RX) 138 * @salt: the key's SALT value 139 * @authsize: authentication tag size (max = 16) 140 * @mode: crypto mode is applied to the key 141 * @hint[]: a hint for user key 142 * @rcu: struct rcu_head 143 * @key: the aead key 144 * @gen: the key's generation 145 * @seqno: the key seqno (cluster scope) 146 * @refcnt: the key reference counter 147 */ 148 struct tipc_aead { 149 #define TIPC_AEAD_HINT_LEN (5) 150 struct tipc_tfm * __percpu *tfm_entry; 151 struct tipc_crypto *crypto; 152 struct tipc_aead *cloned; 153 atomic_t users; 154 u32 salt; 155 u8 authsize; 156 u8 mode; 157 char hint[2 * TIPC_AEAD_HINT_LEN + 1]; 158 struct rcu_head rcu; 159 struct tipc_aead_key *key; 160 u16 gen; 161 162 atomic64_t seqno ____cacheline_aligned; 163 refcount_t refcnt ____cacheline_aligned; 164 165 } ____cacheline_aligned; 166 167 /** 168 * struct tipc_crypto_stats - TIPC Crypto statistics 169 */ 170 struct tipc_crypto_stats { 171 unsigned int stat[MAX_STATS]; 172 }; 173 174 /** 175 * struct tipc_crypto - TIPC TX/RX crypto structure 176 * @net: struct net 177 * @node: TIPC node (RX) 178 * @aead: array of pointers to AEAD keys for encryption/decryption 179 * @peer_rx_active: replicated peer RX active key index 180 * @key_gen: TX/RX key generation 181 * @key: the key states 182 * @skey_mode: session key's mode 183 * @skey: received session key 184 * @wq: common workqueue on TX crypto 185 * @work: delayed work sched for TX/RX 186 * @key_distr: key distributing state 187 * @rekeying_intv: rekeying interval (in minutes) 188 * @stats: the crypto statistics 189 * @name: the crypto name 190 * @sndnxt: the per-peer sndnxt (TX) 191 * @timer1: general timer 1 (jiffies) 192 * @timer2: general timer 2 (jiffies) 193 * @working: the crypto is working or not 194 * @key_master: flag indicates if master key exists 195 * @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.) 196 * @nokey: no key indication 197 * @lock: tipc_key lock 198 */ 199 struct tipc_crypto { 200 struct net *net; 201 struct tipc_node *node; 202 struct tipc_aead __rcu *aead[KEY_MAX + 1]; 203 atomic_t peer_rx_active; 204 u16 key_gen; 205 struct tipc_key key; 206 u8 skey_mode; 207 struct tipc_aead_key *skey; 208 struct workqueue_struct *wq; 209 struct delayed_work work; 210 #define KEY_DISTR_SCHED 1 211 #define KEY_DISTR_COMPL 2 212 atomic_t key_distr; 213 u32 rekeying_intv; 214 215 struct tipc_crypto_stats __percpu *stats; 216 char name[48]; 217 218 atomic64_t sndnxt ____cacheline_aligned; 219 unsigned long timer1; 220 unsigned long timer2; 221 union { 222 struct { 223 u8 working:1; 224 u8 key_master:1; 225 u8 legacy_user:1; 226 u8 nokey: 1; 227 }; 228 u8 flags; 229 }; 230 spinlock_t lock; /* crypto lock */ 231 232 } ____cacheline_aligned; 233 234 /* struct tipc_crypto_tx_ctx - TX context for callbacks */ 235 struct tipc_crypto_tx_ctx { 236 struct tipc_aead *aead; 237 struct tipc_bearer *bearer; 238 struct tipc_media_addr dst; 239 }; 240 241 /* struct tipc_crypto_rx_ctx - RX context for callbacks */ 242 struct tipc_crypto_rx_ctx { 243 struct tipc_aead *aead; 244 struct tipc_bearer *bearer; 245 }; 246 247 static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead); 248 static inline void tipc_aead_put(struct tipc_aead *aead); 249 static void tipc_aead_free(struct rcu_head *rp); 250 static int tipc_aead_users(struct tipc_aead __rcu *aead); 251 static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim); 252 static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim); 253 static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val); 254 static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead); 255 static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, 256 u8 mode); 257 static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src); 258 static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, 259 unsigned int crypto_ctx_size, 260 u8 **iv, struct aead_request **req, 261 struct scatterlist **sg, int nsg); 262 static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, 263 struct tipc_bearer *b, 264 struct tipc_media_addr *dst, 265 struct tipc_node *__dnode); 266 static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err); 267 static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, 268 struct sk_buff *skb, struct tipc_bearer *b); 269 static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err); 270 static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr); 271 static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, 272 u8 tx_key, struct sk_buff *skb, 273 struct tipc_crypto *__rx); 274 static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, 275 u8 new_passive, 276 u8 new_active, 277 u8 new_pending); 278 static int tipc_crypto_key_attach(struct tipc_crypto *c, 279 struct tipc_aead *aead, u8 pos, 280 bool master_key); 281 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending); 282 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, 283 struct tipc_crypto *rx, 284 struct sk_buff *skb, 285 u8 tx_key); 286 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb); 287 static int tipc_crypto_key_revoke(struct net *net, u8 tx_key); 288 static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb, 289 struct tipc_bearer *b, 290 struct tipc_media_addr *dst, 291 struct tipc_node *__dnode, u8 type); 292 static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, 293 struct tipc_bearer *b, 294 struct sk_buff **skb, int err); 295 static void tipc_crypto_do_cmd(struct net *net, int cmd); 296 static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf); 297 static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, 298 char *buf); 299 static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, 300 u16 gen, u8 mode, u32 dnode); 301 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr); 302 static void tipc_crypto_work_tx(struct work_struct *work); 303 static void tipc_crypto_work_rx(struct work_struct *work); 304 static int tipc_aead_key_generate(struct tipc_aead_key *skey); 305 306 #define is_tx(crypto) (!(crypto)->node) 307 #define is_rx(crypto) (!is_tx(crypto)) 308 309 #define key_next(cur) ((cur) % KEY_MAX + 1) 310 311 #define tipc_aead_rcu_ptr(rcu_ptr, lock) \ 312 rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock)) 313 314 #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \ 315 do { \ 316 typeof(rcu_ptr) __tmp = rcu_dereference_protected((rcu_ptr), \ 317 lockdep_is_held(lock)); \ 318 rcu_assign_pointer((rcu_ptr), (ptr)); \ 319 tipc_aead_put(__tmp); \ 320 } while (0) 321 322 #define tipc_crypto_key_detach(rcu_ptr, lock) \ 323 tipc_aead_rcu_replace((rcu_ptr), NULL, lock) 324 325 /** 326 * tipc_aead_key_validate - Validate a AEAD user key 327 */ 328 int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info) 329 { 330 int keylen; 331 332 /* Check if algorithm exists */ 333 if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) { 334 GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)"); 335 return -ENODEV; 336 } 337 338 /* Currently, we only support the "gcm(aes)" cipher algorithm */ 339 if (strcmp(ukey->alg_name, "gcm(aes)")) { 340 GENL_SET_ERR_MSG(info, "not supported yet the algorithm"); 341 return -ENOTSUPP; 342 } 343 344 /* Check if key size is correct */ 345 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; 346 if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 && 347 keylen != TIPC_AES_GCM_KEY_SIZE_192 && 348 keylen != TIPC_AES_GCM_KEY_SIZE_256)) { 349 GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)"); 350 return -EKEYREJECTED; 351 } 352 353 return 0; 354 } 355 356 /** 357 * tipc_aead_key_generate - Generate new session key 358 * @skey: input/output key with new content 359 * 360 * Return: 0 in case of success, otherwise < 0 361 */ 362 static int tipc_aead_key_generate(struct tipc_aead_key *skey) 363 { 364 int rc = 0; 365 366 /* Fill the key's content with a random value via RNG cipher */ 367 rc = crypto_get_default_rng(); 368 if (likely(!rc)) { 369 rc = crypto_rng_get_bytes(crypto_default_rng, skey->key, 370 skey->keylen); 371 crypto_put_default_rng(); 372 } 373 374 return rc; 375 } 376 377 static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead) 378 { 379 struct tipc_aead *tmp; 380 381 rcu_read_lock(); 382 tmp = rcu_dereference(aead); 383 if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt))) 384 tmp = NULL; 385 rcu_read_unlock(); 386 387 return tmp; 388 } 389 390 static inline void tipc_aead_put(struct tipc_aead *aead) 391 { 392 if (aead && refcount_dec_and_test(&aead->refcnt)) 393 call_rcu(&aead->rcu, tipc_aead_free); 394 } 395 396 /** 397 * tipc_aead_free - Release AEAD key incl. all the TFMs in the list 398 * @rp: rcu head pointer 399 */ 400 static void tipc_aead_free(struct rcu_head *rp) 401 { 402 struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu); 403 struct tipc_tfm *tfm_entry, *head, *tmp; 404 405 if (aead->cloned) { 406 tipc_aead_put(aead->cloned); 407 } else { 408 head = *get_cpu_ptr(aead->tfm_entry); 409 put_cpu_ptr(aead->tfm_entry); 410 list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) { 411 crypto_free_aead(tfm_entry->tfm); 412 list_del(&tfm_entry->list); 413 kfree(tfm_entry); 414 } 415 /* Free the head */ 416 crypto_free_aead(head->tfm); 417 list_del(&head->list); 418 kfree(head); 419 } 420 free_percpu(aead->tfm_entry); 421 kfree_sensitive(aead->key); 422 kfree(aead); 423 } 424 425 static int tipc_aead_users(struct tipc_aead __rcu *aead) 426 { 427 struct tipc_aead *tmp; 428 int users = 0; 429 430 rcu_read_lock(); 431 tmp = rcu_dereference(aead); 432 if (tmp) 433 users = atomic_read(&tmp->users); 434 rcu_read_unlock(); 435 436 return users; 437 } 438 439 static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim) 440 { 441 struct tipc_aead *tmp; 442 443 rcu_read_lock(); 444 tmp = rcu_dereference(aead); 445 if (tmp) 446 atomic_add_unless(&tmp->users, 1, lim); 447 rcu_read_unlock(); 448 } 449 450 static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim) 451 { 452 struct tipc_aead *tmp; 453 454 rcu_read_lock(); 455 tmp = rcu_dereference(aead); 456 if (tmp) 457 atomic_add_unless(&rcu_dereference(aead)->users, -1, lim); 458 rcu_read_unlock(); 459 } 460 461 static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val) 462 { 463 struct tipc_aead *tmp; 464 int cur; 465 466 rcu_read_lock(); 467 tmp = rcu_dereference(aead); 468 if (tmp) { 469 do { 470 cur = atomic_read(&tmp->users); 471 if (cur == val) 472 break; 473 } while (atomic_cmpxchg(&tmp->users, cur, val) != cur); 474 } 475 rcu_read_unlock(); 476 } 477 478 /** 479 * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it 480 */ 481 static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead) 482 { 483 struct tipc_tfm **tfm_entry; 484 struct crypto_aead *tfm; 485 486 tfm_entry = get_cpu_ptr(aead->tfm_entry); 487 *tfm_entry = list_next_entry(*tfm_entry, list); 488 tfm = (*tfm_entry)->tfm; 489 put_cpu_ptr(tfm_entry); 490 491 return tfm; 492 } 493 494 /** 495 * tipc_aead_init - Initiate TIPC AEAD 496 * @aead: returned new TIPC AEAD key handle pointer 497 * @ukey: pointer to user key data 498 * @mode: the key mode 499 * 500 * Allocate a (list of) new cipher transformation (TFM) with the specific user 501 * key data if valid. The number of the allocated TFMs can be set via the sysfs 502 * "net/tipc/max_tfms" first. 503 * Also, all the other AEAD data are also initialized. 504 * 505 * Return: 0 if the initiation is successful, otherwise: < 0 506 */ 507 static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, 508 u8 mode) 509 { 510 struct tipc_tfm *tfm_entry, *head; 511 struct crypto_aead *tfm; 512 struct tipc_aead *tmp; 513 int keylen, err, cpu; 514 int tfm_cnt = 0; 515 516 if (unlikely(*aead)) 517 return -EEXIST; 518 519 /* Allocate a new AEAD */ 520 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); 521 if (unlikely(!tmp)) 522 return -ENOMEM; 523 524 /* The key consists of two parts: [AES-KEY][SALT] */ 525 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; 526 527 /* Allocate per-cpu TFM entry pointer */ 528 tmp->tfm_entry = alloc_percpu(struct tipc_tfm *); 529 if (!tmp->tfm_entry) { 530 kfree_sensitive(tmp); 531 return -ENOMEM; 532 } 533 534 /* Make a list of TFMs with the user key data */ 535 do { 536 tfm = crypto_alloc_aead(ukey->alg_name, 0, 0); 537 if (IS_ERR(tfm)) { 538 err = PTR_ERR(tfm); 539 break; 540 } 541 542 if (unlikely(!tfm_cnt && 543 crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) { 544 crypto_free_aead(tfm); 545 err = -ENOTSUPP; 546 break; 547 } 548 549 err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE); 550 err |= crypto_aead_setkey(tfm, ukey->key, keylen); 551 if (unlikely(err)) { 552 crypto_free_aead(tfm); 553 break; 554 } 555 556 tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL); 557 if (unlikely(!tfm_entry)) { 558 crypto_free_aead(tfm); 559 err = -ENOMEM; 560 break; 561 } 562 INIT_LIST_HEAD(&tfm_entry->list); 563 tfm_entry->tfm = tfm; 564 565 /* First entry? */ 566 if (!tfm_cnt) { 567 head = tfm_entry; 568 for_each_possible_cpu(cpu) { 569 *per_cpu_ptr(tmp->tfm_entry, cpu) = head; 570 } 571 } else { 572 list_add_tail(&tfm_entry->list, &head->list); 573 } 574 575 } while (++tfm_cnt < sysctl_tipc_max_tfms); 576 577 /* Not any TFM is allocated? */ 578 if (!tfm_cnt) { 579 free_percpu(tmp->tfm_entry); 580 kfree_sensitive(tmp); 581 return err; 582 } 583 584 /* Form a hex string of some last bytes as the key's hint */ 585 bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN, 586 TIPC_AEAD_HINT_LEN); 587 588 /* Initialize the other data */ 589 tmp->mode = mode; 590 tmp->cloned = NULL; 591 tmp->authsize = TIPC_AES_GCM_TAG_SIZE; 592 tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL); 593 memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE); 594 atomic_set(&tmp->users, 0); 595 atomic64_set(&tmp->seqno, 0); 596 refcount_set(&tmp->refcnt, 1); 597 598 *aead = tmp; 599 return 0; 600 } 601 602 /** 603 * tipc_aead_clone - Clone a TIPC AEAD key 604 * @dst: dest key for the cloning 605 * @src: source key to clone from 606 * 607 * Make a "copy" of the source AEAD key data to the dest, the TFMs list is 608 * common for the keys. 609 * A reference to the source is hold in the "cloned" pointer for the later 610 * freeing purposes. 611 * 612 * Note: this must be done in cluster-key mode only! 613 * Return: 0 in case of success, otherwise < 0 614 */ 615 static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src) 616 { 617 struct tipc_aead *aead; 618 int cpu; 619 620 if (!src) 621 return -ENOKEY; 622 623 if (src->mode != CLUSTER_KEY) 624 return -EINVAL; 625 626 if (unlikely(*dst)) 627 return -EEXIST; 628 629 aead = kzalloc(sizeof(*aead), GFP_ATOMIC); 630 if (unlikely(!aead)) 631 return -ENOMEM; 632 633 aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC); 634 if (unlikely(!aead->tfm_entry)) { 635 kfree_sensitive(aead); 636 return -ENOMEM; 637 } 638 639 for_each_possible_cpu(cpu) { 640 *per_cpu_ptr(aead->tfm_entry, cpu) = 641 *per_cpu_ptr(src->tfm_entry, cpu); 642 } 643 644 memcpy(aead->hint, src->hint, sizeof(src->hint)); 645 aead->mode = src->mode; 646 aead->salt = src->salt; 647 aead->authsize = src->authsize; 648 atomic_set(&aead->users, 0); 649 atomic64_set(&aead->seqno, 0); 650 refcount_set(&aead->refcnt, 1); 651 652 WARN_ON(!refcount_inc_not_zero(&src->refcnt)); 653 aead->cloned = src; 654 655 *dst = aead; 656 return 0; 657 } 658 659 /** 660 * tipc_aead_mem_alloc - Allocate memory for AEAD request operations 661 * @tfm: cipher handle to be registered with the request 662 * @crypto_ctx_size: size of crypto context for callback 663 * @iv: returned pointer to IV data 664 * @req: returned pointer to AEAD request data 665 * @sg: returned pointer to SG lists 666 * @nsg: number of SG lists to be allocated 667 * 668 * Allocate memory to store the crypto context data, AEAD request, IV and SG 669 * lists, the memory layout is as follows: 670 * crypto_ctx || iv || aead_req || sg[] 671 * 672 * Return: the pointer to the memory areas in case of success, otherwise NULL 673 */ 674 static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, 675 unsigned int crypto_ctx_size, 676 u8 **iv, struct aead_request **req, 677 struct scatterlist **sg, int nsg) 678 { 679 unsigned int iv_size, req_size; 680 unsigned int len; 681 u8 *mem; 682 683 iv_size = crypto_aead_ivsize(tfm); 684 req_size = sizeof(**req) + crypto_aead_reqsize(tfm); 685 686 len = crypto_ctx_size; 687 len += iv_size; 688 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); 689 len = ALIGN(len, crypto_tfm_ctx_alignment()); 690 len += req_size; 691 len = ALIGN(len, __alignof__(struct scatterlist)); 692 len += nsg * sizeof(**sg); 693 694 mem = kmalloc(len, GFP_ATOMIC); 695 if (!mem) 696 return NULL; 697 698 *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size, 699 crypto_aead_alignmask(tfm) + 1); 700 *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size, 701 crypto_tfm_ctx_alignment()); 702 *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size, 703 __alignof__(struct scatterlist)); 704 705 return (void *)mem; 706 } 707 708 /** 709 * tipc_aead_encrypt - Encrypt a message 710 * @aead: TIPC AEAD key for the message encryption 711 * @skb: the input/output skb 712 * @b: TIPC bearer where the message will be delivered after the encryption 713 * @dst: the destination media address 714 * @__dnode: TIPC dest node if "known" 715 * 716 * Return: 717 * 0 : if the encryption has completed 718 * -EINPROGRESS/-EBUSY : if a callback will be performed 719 * < 0 : the encryption has failed 720 */ 721 static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, 722 struct tipc_bearer *b, 723 struct tipc_media_addr *dst, 724 struct tipc_node *__dnode) 725 { 726 struct crypto_aead *tfm = tipc_aead_tfm_next(aead); 727 struct tipc_crypto_tx_ctx *tx_ctx; 728 struct aead_request *req; 729 struct sk_buff *trailer; 730 struct scatterlist *sg; 731 struct tipc_ehdr *ehdr; 732 int ehsz, len, tailen, nsg, rc; 733 void *ctx; 734 u32 salt; 735 u8 *iv; 736 737 /* Make sure message len at least 4-byte aligned */ 738 len = ALIGN(skb->len, 4); 739 tailen = len - skb->len + aead->authsize; 740 741 /* Expand skb tail for authentication tag: 742 * As for simplicity, we'd have made sure skb having enough tailroom 743 * for authentication tag @skb allocation. Even when skb is nonlinear 744 * but there is no frag_list, it should be still fine! 745 * Otherwise, we must cow it to be a writable buffer with the tailroom. 746 */ 747 SKB_LINEAR_ASSERT(skb); 748 if (tailen > skb_tailroom(skb)) { 749 pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n", 750 skb_tailroom(skb), tailen); 751 } 752 753 if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) { 754 nsg = 1; 755 trailer = skb; 756 } else { 757 /* TODO: We could avoid skb_cow_data() if skb has no frag_list 758 * e.g. by skb_fill_page_desc() to add another page to the skb 759 * with the wanted tailen... However, page skbs look not often, 760 * so take it easy now! 761 * Cloned skbs e.g. from link_xmit() seems no choice though :( 762 */ 763 nsg = skb_cow_data(skb, tailen, &trailer); 764 if (unlikely(nsg < 0)) { 765 pr_err("TX: skb_cow_data() returned %d\n", nsg); 766 return nsg; 767 } 768 } 769 770 pskb_put(skb, trailer, tailen); 771 772 /* Allocate memory for the AEAD operation */ 773 ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg); 774 if (unlikely(!ctx)) 775 return -ENOMEM; 776 TIPC_SKB_CB(skb)->crypto_ctx = ctx; 777 778 /* Map skb to the sg lists */ 779 sg_init_table(sg, nsg); 780 rc = skb_to_sgvec(skb, sg, 0, skb->len); 781 if (unlikely(rc < 0)) { 782 pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg); 783 goto exit; 784 } 785 786 /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)] 787 * In case we're in cluster-key mode, SALT is varied by xor-ing with 788 * the source address (or w0 of id), otherwise with the dest address 789 * if dest is known. 790 */ 791 ehdr = (struct tipc_ehdr *)skb->data; 792 salt = aead->salt; 793 if (aead->mode == CLUSTER_KEY) 794 salt ^= ehdr->addr; /* __be32 */ 795 else if (__dnode) 796 salt ^= tipc_node_get_addr(__dnode); 797 memcpy(iv, &salt, 4); 798 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); 799 800 /* Prepare request */ 801 ehsz = tipc_ehdr_size(ehdr); 802 aead_request_set_tfm(req, tfm); 803 aead_request_set_ad(req, ehsz); 804 aead_request_set_crypt(req, sg, sg, len - ehsz, iv); 805 806 /* Set callback function & data */ 807 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 808 tipc_aead_encrypt_done, skb); 809 tx_ctx = (struct tipc_crypto_tx_ctx *)ctx; 810 tx_ctx->aead = aead; 811 tx_ctx->bearer = b; 812 memcpy(&tx_ctx->dst, dst, sizeof(*dst)); 813 814 /* Hold bearer */ 815 if (unlikely(!tipc_bearer_hold(b))) { 816 rc = -ENODEV; 817 goto exit; 818 } 819 820 /* Now, do encrypt */ 821 rc = crypto_aead_encrypt(req); 822 if (rc == -EINPROGRESS || rc == -EBUSY) 823 return rc; 824 825 tipc_bearer_put(b); 826 827 exit: 828 kfree(ctx); 829 TIPC_SKB_CB(skb)->crypto_ctx = NULL; 830 return rc; 831 } 832 833 static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err) 834 { 835 struct sk_buff *skb = base->data; 836 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; 837 struct tipc_bearer *b = tx_ctx->bearer; 838 struct tipc_aead *aead = tx_ctx->aead; 839 struct tipc_crypto *tx = aead->crypto; 840 struct net *net = tx->net; 841 842 switch (err) { 843 case 0: 844 this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]); 845 rcu_read_lock(); 846 if (likely(test_bit(0, &b->up))) 847 b->media->send_msg(net, skb, b, &tx_ctx->dst); 848 else 849 kfree_skb(skb); 850 rcu_read_unlock(); 851 break; 852 case -EINPROGRESS: 853 return; 854 default: 855 this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]); 856 kfree_skb(skb); 857 break; 858 } 859 860 kfree(tx_ctx); 861 tipc_bearer_put(b); 862 tipc_aead_put(aead); 863 } 864 865 /** 866 * tipc_aead_decrypt - Decrypt an encrypted message 867 * @net: struct net 868 * @aead: TIPC AEAD for the message decryption 869 * @skb: the input/output skb 870 * @b: TIPC bearer where the message has been received 871 * 872 * Return: 873 * 0 : if the decryption has completed 874 * -EINPROGRESS/-EBUSY : if a callback will be performed 875 * < 0 : the decryption has failed 876 */ 877 static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, 878 struct sk_buff *skb, struct tipc_bearer *b) 879 { 880 struct tipc_crypto_rx_ctx *rx_ctx; 881 struct aead_request *req; 882 struct crypto_aead *tfm; 883 struct sk_buff *unused; 884 struct scatterlist *sg; 885 struct tipc_ehdr *ehdr; 886 int ehsz, nsg, rc; 887 void *ctx; 888 u32 salt; 889 u8 *iv; 890 891 if (unlikely(!aead)) 892 return -ENOKEY; 893 894 /* Cow skb data if needed */ 895 if (likely(!skb_cloned(skb) && 896 (!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) { 897 nsg = 1 + skb_shinfo(skb)->nr_frags; 898 } else { 899 nsg = skb_cow_data(skb, 0, &unused); 900 if (unlikely(nsg < 0)) { 901 pr_err("RX: skb_cow_data() returned %d\n", nsg); 902 return nsg; 903 } 904 } 905 906 /* Allocate memory for the AEAD operation */ 907 tfm = tipc_aead_tfm_next(aead); 908 ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg); 909 if (unlikely(!ctx)) 910 return -ENOMEM; 911 TIPC_SKB_CB(skb)->crypto_ctx = ctx; 912 913 /* Map skb to the sg lists */ 914 sg_init_table(sg, nsg); 915 rc = skb_to_sgvec(skb, sg, 0, skb->len); 916 if (unlikely(rc < 0)) { 917 pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg); 918 goto exit; 919 } 920 921 /* Reconstruct IV: */ 922 ehdr = (struct tipc_ehdr *)skb->data; 923 salt = aead->salt; 924 if (aead->mode == CLUSTER_KEY) 925 salt ^= ehdr->addr; /* __be32 */ 926 else if (ehdr->destined) 927 salt ^= tipc_own_addr(net); 928 memcpy(iv, &salt, 4); 929 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); 930 931 /* Prepare request */ 932 ehsz = tipc_ehdr_size(ehdr); 933 aead_request_set_tfm(req, tfm); 934 aead_request_set_ad(req, ehsz); 935 aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv); 936 937 /* Set callback function & data */ 938 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 939 tipc_aead_decrypt_done, skb); 940 rx_ctx = (struct tipc_crypto_rx_ctx *)ctx; 941 rx_ctx->aead = aead; 942 rx_ctx->bearer = b; 943 944 /* Hold bearer */ 945 if (unlikely(!tipc_bearer_hold(b))) { 946 rc = -ENODEV; 947 goto exit; 948 } 949 950 /* Now, do decrypt */ 951 rc = crypto_aead_decrypt(req); 952 if (rc == -EINPROGRESS || rc == -EBUSY) 953 return rc; 954 955 tipc_bearer_put(b); 956 957 exit: 958 kfree(ctx); 959 TIPC_SKB_CB(skb)->crypto_ctx = NULL; 960 return rc; 961 } 962 963 static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err) 964 { 965 struct sk_buff *skb = base->data; 966 struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; 967 struct tipc_bearer *b = rx_ctx->bearer; 968 struct tipc_aead *aead = rx_ctx->aead; 969 struct tipc_crypto_stats __percpu *stats = aead->crypto->stats; 970 struct net *net = aead->crypto->net; 971 972 switch (err) { 973 case 0: 974 this_cpu_inc(stats->stat[STAT_ASYNC_OK]); 975 break; 976 case -EINPROGRESS: 977 return; 978 default: 979 this_cpu_inc(stats->stat[STAT_ASYNC_NOK]); 980 break; 981 } 982 983 kfree(rx_ctx); 984 tipc_crypto_rcv_complete(net, aead, b, &skb, err); 985 if (likely(skb)) { 986 if (likely(test_bit(0, &b->up))) 987 tipc_rcv(net, skb, b); 988 else 989 kfree_skb(skb); 990 } 991 992 tipc_bearer_put(b); 993 } 994 995 static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr) 996 { 997 return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; 998 } 999 1000 /** 1001 * tipc_ehdr_validate - Validate an encryption message 1002 * @skb: the message buffer 1003 * 1004 * Returns "true" if this is a valid encryption message, otherwise "false" 1005 */ 1006 bool tipc_ehdr_validate(struct sk_buff *skb) 1007 { 1008 struct tipc_ehdr *ehdr; 1009 int ehsz; 1010 1011 if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE))) 1012 return false; 1013 1014 ehdr = (struct tipc_ehdr *)skb->data; 1015 if (unlikely(ehdr->version != TIPC_EVERSION)) 1016 return false; 1017 ehsz = tipc_ehdr_size(ehdr); 1018 if (unlikely(!pskb_may_pull(skb, ehsz))) 1019 return false; 1020 if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE)) 1021 return false; 1022 1023 return true; 1024 } 1025 1026 /** 1027 * tipc_ehdr_build - Build TIPC encryption message header 1028 * @net: struct net 1029 * @aead: TX AEAD key to be used for the message encryption 1030 * @tx_key: key id used for the message encryption 1031 * @skb: input/output message skb 1032 * @__rx: RX crypto handle if dest is "known" 1033 * 1034 * Return: the header size if the building is successful, otherwise < 0 1035 */ 1036 static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, 1037 u8 tx_key, struct sk_buff *skb, 1038 struct tipc_crypto *__rx) 1039 { 1040 struct tipc_msg *hdr = buf_msg(skb); 1041 struct tipc_ehdr *ehdr; 1042 u32 user = msg_user(hdr); 1043 u64 seqno; 1044 int ehsz; 1045 1046 /* Make room for encryption header */ 1047 ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; 1048 WARN_ON(skb_headroom(skb) < ehsz); 1049 ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz); 1050 1051 /* Obtain a seqno first: 1052 * Use the key seqno (= cluster wise) if dest is unknown or we're in 1053 * cluster key mode, otherwise it's better for a per-peer seqno! 1054 */ 1055 if (!__rx || aead->mode == CLUSTER_KEY) 1056 seqno = atomic64_inc_return(&aead->seqno); 1057 else 1058 seqno = atomic64_inc_return(&__rx->sndnxt); 1059 1060 /* Revoke the key if seqno is wrapped around */ 1061 if (unlikely(!seqno)) 1062 return tipc_crypto_key_revoke(net, tx_key); 1063 1064 /* Word 1-2 */ 1065 ehdr->seqno = cpu_to_be64(seqno); 1066 1067 /* Words 0, 3- */ 1068 ehdr->version = TIPC_EVERSION; 1069 ehdr->user = 0; 1070 ehdr->keepalive = 0; 1071 ehdr->tx_key = tx_key; 1072 ehdr->destined = (__rx) ? 1 : 0; 1073 ehdr->rx_key_active = (__rx) ? __rx->key.active : 0; 1074 ehdr->rx_nokey = (__rx) ? __rx->nokey : 0; 1075 ehdr->master_key = aead->crypto->key_master; 1076 ehdr->reserved_1 = 0; 1077 ehdr->reserved_2 = 0; 1078 1079 switch (user) { 1080 case LINK_CONFIG: 1081 ehdr->user = LINK_CONFIG; 1082 memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN); 1083 break; 1084 default: 1085 if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) { 1086 ehdr->user = LINK_PROTOCOL; 1087 ehdr->keepalive = msg_is_keepalive(hdr); 1088 } 1089 ehdr->addr = hdr->hdr[3]; 1090 break; 1091 } 1092 1093 return ehsz; 1094 } 1095 1096 static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, 1097 u8 new_passive, 1098 u8 new_active, 1099 u8 new_pending) 1100 { 1101 struct tipc_key old = c->key; 1102 char buf[32]; 1103 1104 c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) | 1105 ((new_active & KEY_MASK) << (KEY_BITS)) | 1106 ((new_pending & KEY_MASK)); 1107 1108 pr_debug("%s: key changing %s ::%pS\n", c->name, 1109 tipc_key_change_dump(old, c->key, buf), 1110 __builtin_return_address(0)); 1111 } 1112 1113 /** 1114 * tipc_crypto_key_init - Initiate a new user / AEAD key 1115 * @c: TIPC crypto to which new key is attached 1116 * @ukey: the user key 1117 * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY) 1118 * @master_key: specify this is a cluster master key 1119 * 1120 * A new TIPC AEAD key will be allocated and initiated with the specified user 1121 * key, then attached to the TIPC crypto. 1122 * 1123 * Return: new key id in case of success, otherwise: < 0 1124 */ 1125 int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey, 1126 u8 mode, bool master_key) 1127 { 1128 struct tipc_aead *aead = NULL; 1129 int rc = 0; 1130 1131 /* Initiate with the new user key */ 1132 rc = tipc_aead_init(&aead, ukey, mode); 1133 1134 /* Attach it to the crypto */ 1135 if (likely(!rc)) { 1136 rc = tipc_crypto_key_attach(c, aead, 0, master_key); 1137 if (rc < 0) 1138 tipc_aead_free(&aead->rcu); 1139 } 1140 1141 return rc; 1142 } 1143 1144 /** 1145 * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto 1146 * @c: TIPC crypto to which the new AEAD key is attached 1147 * @aead: the new AEAD key pointer 1148 * @pos: desired slot in the crypto key array, = 0 if any! 1149 * @master_key: specify this is a cluster master key 1150 * 1151 * Return: new key id in case of success, otherwise: -EBUSY 1152 */ 1153 static int tipc_crypto_key_attach(struct tipc_crypto *c, 1154 struct tipc_aead *aead, u8 pos, 1155 bool master_key) 1156 { 1157 struct tipc_key key; 1158 int rc = -EBUSY; 1159 u8 new_key; 1160 1161 spin_lock_bh(&c->lock); 1162 key = c->key; 1163 if (master_key) { 1164 new_key = KEY_MASTER; 1165 goto attach; 1166 } 1167 if (key.active && key.passive) 1168 goto exit; 1169 if (key.pending) { 1170 if (tipc_aead_users(c->aead[key.pending]) > 0) 1171 goto exit; 1172 /* if (pos): ok with replacing, will be aligned when needed */ 1173 /* Replace it */ 1174 new_key = key.pending; 1175 } else { 1176 if (pos) { 1177 if (key.active && pos != key_next(key.active)) { 1178 key.passive = pos; 1179 new_key = pos; 1180 goto attach; 1181 } else if (!key.active && !key.passive) { 1182 key.pending = pos; 1183 new_key = pos; 1184 goto attach; 1185 } 1186 } 1187 key.pending = key_next(key.active ?: key.passive); 1188 new_key = key.pending; 1189 } 1190 1191 attach: 1192 aead->crypto = c; 1193 aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen; 1194 tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock); 1195 if (likely(c->key.keys != key.keys)) 1196 tipc_crypto_key_set_state(c, key.passive, key.active, 1197 key.pending); 1198 c->working = 1; 1199 c->nokey = 0; 1200 c->key_master |= master_key; 1201 rc = new_key; 1202 1203 exit: 1204 spin_unlock_bh(&c->lock); 1205 return rc; 1206 } 1207 1208 void tipc_crypto_key_flush(struct tipc_crypto *c) 1209 { 1210 struct tipc_crypto *tx, *rx; 1211 int k; 1212 1213 spin_lock_bh(&c->lock); 1214 if (is_rx(c)) { 1215 /* Try to cancel pending work */ 1216 rx = c; 1217 tx = tipc_net(rx->net)->crypto_tx; 1218 if (cancel_delayed_work(&rx->work)) { 1219 kfree(rx->skey); 1220 rx->skey = NULL; 1221 atomic_xchg(&rx->key_distr, 0); 1222 tipc_node_put(rx->node); 1223 } 1224 /* RX stopping => decrease TX key users if any */ 1225 k = atomic_xchg(&rx->peer_rx_active, 0); 1226 if (k) { 1227 tipc_aead_users_dec(tx->aead[k], 0); 1228 /* Mark the point TX key users changed */ 1229 tx->timer1 = jiffies; 1230 } 1231 } 1232 1233 c->flags = 0; 1234 tipc_crypto_key_set_state(c, 0, 0, 0); 1235 for (k = KEY_MIN; k <= KEY_MAX; k++) 1236 tipc_crypto_key_detach(c->aead[k], &c->lock); 1237 atomic64_set(&c->sndnxt, 0); 1238 spin_unlock_bh(&c->lock); 1239 } 1240 1241 /** 1242 * tipc_crypto_key_try_align - Align RX keys if possible 1243 * @rx: RX crypto handle 1244 * @new_pending: new pending slot if aligned (= TX key from peer) 1245 * 1246 * Peer has used an unknown key slot, this only happens when peer has left and 1247 * rejoned, or we are newcomer. 1248 * That means, there must be no active key but a pending key at unaligned slot. 1249 * If so, we try to move the pending key to the new slot. 1250 * Note: A potential passive key can exist, it will be shifted correspondingly! 1251 * 1252 * Return: "true" if key is successfully aligned, otherwise "false" 1253 */ 1254 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending) 1255 { 1256 struct tipc_aead *tmp1, *tmp2 = NULL; 1257 struct tipc_key key; 1258 bool aligned = false; 1259 u8 new_passive = 0; 1260 int x; 1261 1262 spin_lock(&rx->lock); 1263 key = rx->key; 1264 if (key.pending == new_pending) { 1265 aligned = true; 1266 goto exit; 1267 } 1268 if (key.active) 1269 goto exit; 1270 if (!key.pending) 1271 goto exit; 1272 if (tipc_aead_users(rx->aead[key.pending]) > 0) 1273 goto exit; 1274 1275 /* Try to "isolate" this pending key first */ 1276 tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock); 1277 if (!refcount_dec_if_one(&tmp1->refcnt)) 1278 goto exit; 1279 rcu_assign_pointer(rx->aead[key.pending], NULL); 1280 1281 /* Move passive key if any */ 1282 if (key.passive) { 1283 tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock)); 1284 x = (key.passive - key.pending + new_pending) % KEY_MAX; 1285 new_passive = (x <= 0) ? x + KEY_MAX : x; 1286 } 1287 1288 /* Re-allocate the key(s) */ 1289 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending); 1290 rcu_assign_pointer(rx->aead[new_pending], tmp1); 1291 if (new_passive) 1292 rcu_assign_pointer(rx->aead[new_passive], tmp2); 1293 refcount_set(&tmp1->refcnt, 1); 1294 aligned = true; 1295 pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending, 1296 new_pending); 1297 1298 exit: 1299 spin_unlock(&rx->lock); 1300 return aligned; 1301 } 1302 1303 /** 1304 * tipc_crypto_key_pick_tx - Pick one TX key for message decryption 1305 * @tx: TX crypto handle 1306 * @rx: RX crypto handle (can be NULL) 1307 * @skb: the message skb which will be decrypted later 1308 * @tx_key: peer TX key id 1309 * 1310 * This function looks up the existing TX keys and pick one which is suitable 1311 * for the message decryption, that must be a cluster key and not used before 1312 * on the same message (i.e. recursive). 1313 * 1314 * Return: the TX AEAD key handle in case of success, otherwise NULL 1315 */ 1316 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, 1317 struct tipc_crypto *rx, 1318 struct sk_buff *skb, 1319 u8 tx_key) 1320 { 1321 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb); 1322 struct tipc_aead *aead = NULL; 1323 struct tipc_key key = tx->key; 1324 u8 k, i = 0; 1325 1326 /* Initialize data if not yet */ 1327 if (!skb_cb->tx_clone_deferred) { 1328 skb_cb->tx_clone_deferred = 1; 1329 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); 1330 } 1331 1332 skb_cb->tx_clone_ctx.rx = rx; 1333 if (++skb_cb->tx_clone_ctx.recurs > 2) 1334 return NULL; 1335 1336 /* Pick one TX key */ 1337 spin_lock(&tx->lock); 1338 if (tx_key == KEY_MASTER) { 1339 aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock); 1340 goto done; 1341 } 1342 do { 1343 k = (i == 0) ? key.pending : 1344 ((i == 1) ? key.active : key.passive); 1345 if (!k) 1346 continue; 1347 aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock); 1348 if (!aead) 1349 continue; 1350 if (aead->mode != CLUSTER_KEY || 1351 aead == skb_cb->tx_clone_ctx.last) { 1352 aead = NULL; 1353 continue; 1354 } 1355 /* Ok, found one cluster key */ 1356 skb_cb->tx_clone_ctx.last = aead; 1357 WARN_ON(skb->next); 1358 skb->next = skb_clone(skb, GFP_ATOMIC); 1359 if (unlikely(!skb->next)) 1360 pr_warn("Failed to clone skb for next round if any\n"); 1361 break; 1362 } while (++i < 3); 1363 1364 done: 1365 if (likely(aead)) 1366 WARN_ON(!refcount_inc_not_zero(&aead->refcnt)); 1367 spin_unlock(&tx->lock); 1368 1369 return aead; 1370 } 1371 1372 /** 1373 * tipc_crypto_key_synch: Synch own key data according to peer key status 1374 * @rx: RX crypto handle 1375 * @skb: TIPCv2 message buffer (incl. the ehdr from peer) 1376 * 1377 * This function updates the peer node related data as the peer RX active key 1378 * has changed, so the number of TX keys' users on this node are increased and 1379 * decreased correspondingly. 1380 * 1381 * It also considers if peer has no key, then we need to make own master key 1382 * (if any) taking over i.e. starting grace period and also trigger key 1383 * distributing process. 1384 * 1385 * The "per-peer" sndnxt is also reset when the peer key has switched. 1386 */ 1387 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb) 1388 { 1389 struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb); 1390 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; 1391 struct tipc_msg *hdr = buf_msg(skb); 1392 u32 self = tipc_own_addr(rx->net); 1393 u8 cur, new; 1394 unsigned long delay; 1395 1396 /* Update RX 'key_master' flag according to peer, also mark "legacy" if 1397 * a peer has no master key. 1398 */ 1399 rx->key_master = ehdr->master_key; 1400 if (!rx->key_master) 1401 tx->legacy_user = 1; 1402 1403 /* For later cases, apply only if message is destined to this node */ 1404 if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self) 1405 return; 1406 1407 /* Case 1: Peer has no keys, let's make master key take over */ 1408 if (ehdr->rx_nokey) { 1409 /* Set or extend grace period */ 1410 tx->timer2 = jiffies; 1411 /* Schedule key distributing for the peer if not yet */ 1412 if (tx->key.keys && 1413 !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) { 1414 get_random_bytes(&delay, 2); 1415 delay %= 5; 1416 delay = msecs_to_jiffies(500 * ++delay); 1417 if (queue_delayed_work(tx->wq, &rx->work, delay)) 1418 tipc_node_get(rx->node); 1419 } 1420 } else { 1421 /* Cancel a pending key distributing if any */ 1422 atomic_xchg(&rx->key_distr, 0); 1423 } 1424 1425 /* Case 2: Peer RX active key has changed, let's update own TX users */ 1426 cur = atomic_read(&rx->peer_rx_active); 1427 new = ehdr->rx_key_active; 1428 if (tx->key.keys && 1429 cur != new && 1430 atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) { 1431 if (new) 1432 tipc_aead_users_inc(tx->aead[new], INT_MAX); 1433 if (cur) 1434 tipc_aead_users_dec(tx->aead[cur], 0); 1435 1436 atomic64_set(&rx->sndnxt, 0); 1437 /* Mark the point TX key users changed */ 1438 tx->timer1 = jiffies; 1439 1440 pr_debug("%s: key users changed %d-- %d++, peer %s\n", 1441 tx->name, cur, new, rx->name); 1442 } 1443 } 1444 1445 static int tipc_crypto_key_revoke(struct net *net, u8 tx_key) 1446 { 1447 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1448 struct tipc_key key; 1449 1450 spin_lock(&tx->lock); 1451 key = tx->key; 1452 WARN_ON(!key.active || tx_key != key.active); 1453 1454 /* Free the active key */ 1455 tipc_crypto_key_set_state(tx, key.passive, 0, key.pending); 1456 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); 1457 spin_unlock(&tx->lock); 1458 1459 pr_warn("%s: key is revoked\n", tx->name); 1460 return -EKEYREVOKED; 1461 } 1462 1463 int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, 1464 struct tipc_node *node) 1465 { 1466 struct tipc_crypto *c; 1467 1468 if (*crypto) 1469 return -EEXIST; 1470 1471 /* Allocate crypto */ 1472 c = kzalloc(sizeof(*c), GFP_ATOMIC); 1473 if (!c) 1474 return -ENOMEM; 1475 1476 /* Allocate workqueue on TX */ 1477 if (!node) { 1478 c->wq = alloc_ordered_workqueue("tipc_crypto", 0); 1479 if (!c->wq) { 1480 kfree(c); 1481 return -ENOMEM; 1482 } 1483 } 1484 1485 /* Allocate statistic structure */ 1486 c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); 1487 if (!c->stats) { 1488 kfree_sensitive(c); 1489 return -ENOMEM; 1490 } 1491 1492 c->flags = 0; 1493 c->net = net; 1494 c->node = node; 1495 get_random_bytes(&c->key_gen, 2); 1496 tipc_crypto_key_set_state(c, 0, 0, 0); 1497 atomic_set(&c->key_distr, 0); 1498 atomic_set(&c->peer_rx_active, 0); 1499 atomic64_set(&c->sndnxt, 0); 1500 c->timer1 = jiffies; 1501 c->timer2 = jiffies; 1502 c->rekeying_intv = TIPC_REKEYING_INTV_DEF; 1503 spin_lock_init(&c->lock); 1504 scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX", 1505 (is_rx(c)) ? tipc_node_get_id_str(c->node) : 1506 tipc_own_id_string(c->net)); 1507 1508 if (is_rx(c)) 1509 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx); 1510 else 1511 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx); 1512 1513 *crypto = c; 1514 return 0; 1515 } 1516 1517 void tipc_crypto_stop(struct tipc_crypto **crypto) 1518 { 1519 struct tipc_crypto *c = *crypto; 1520 u8 k; 1521 1522 if (!c) 1523 return; 1524 1525 /* Flush any queued works & destroy wq */ 1526 if (is_tx(c)) { 1527 c->rekeying_intv = 0; 1528 cancel_delayed_work_sync(&c->work); 1529 destroy_workqueue(c->wq); 1530 } 1531 1532 /* Release AEAD keys */ 1533 rcu_read_lock(); 1534 for (k = KEY_MIN; k <= KEY_MAX; k++) 1535 tipc_aead_put(rcu_dereference(c->aead[k])); 1536 rcu_read_unlock(); 1537 pr_debug("%s: has been stopped\n", c->name); 1538 1539 /* Free this crypto statistics */ 1540 free_percpu(c->stats); 1541 1542 *crypto = NULL; 1543 kfree_sensitive(c); 1544 } 1545 1546 void tipc_crypto_timeout(struct tipc_crypto *rx) 1547 { 1548 struct tipc_net *tn = tipc_net(rx->net); 1549 struct tipc_crypto *tx = tn->crypto_tx; 1550 struct tipc_key key; 1551 int cmd; 1552 1553 /* TX pending: taking all users & stable -> active */ 1554 spin_lock(&tx->lock); 1555 key = tx->key; 1556 if (key.active && tipc_aead_users(tx->aead[key.active]) > 0) 1557 goto s1; 1558 if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0) 1559 goto s1; 1560 if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME)) 1561 goto s1; 1562 1563 tipc_crypto_key_set_state(tx, key.passive, key.pending, 0); 1564 if (key.active) 1565 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); 1566 this_cpu_inc(tx->stats->stat[STAT_SWITCHES]); 1567 pr_info("%s: key[%d] is activated\n", tx->name, key.pending); 1568 1569 s1: 1570 spin_unlock(&tx->lock); 1571 1572 /* RX pending: having user -> active */ 1573 spin_lock(&rx->lock); 1574 key = rx->key; 1575 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0) 1576 goto s2; 1577 1578 if (key.active) 1579 key.passive = key.active; 1580 key.active = key.pending; 1581 rx->timer2 = jiffies; 1582 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); 1583 this_cpu_inc(rx->stats->stat[STAT_SWITCHES]); 1584 pr_info("%s: key[%d] is activated\n", rx->name, key.pending); 1585 goto s5; 1586 1587 s2: 1588 /* RX pending: not working -> remove */ 1589 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10) 1590 goto s3; 1591 1592 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); 1593 tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock); 1594 pr_debug("%s: key[%d] is removed\n", rx->name, key.pending); 1595 goto s5; 1596 1597 s3: 1598 /* RX active: timed out or no user -> pending */ 1599 if (!key.active) 1600 goto s4; 1601 if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) && 1602 tipc_aead_users(rx->aead[key.active]) > 0) 1603 goto s4; 1604 1605 if (key.pending) 1606 key.passive = key.active; 1607 else 1608 key.pending = key.active; 1609 rx->timer2 = jiffies; 1610 tipc_crypto_key_set_state(rx, key.passive, 0, key.pending); 1611 tipc_aead_users_set(rx->aead[key.pending], 0); 1612 pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active); 1613 goto s5; 1614 1615 s4: 1616 /* RX passive: outdated or not working -> free */ 1617 if (!key.passive) 1618 goto s5; 1619 if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) && 1620 tipc_aead_users(rx->aead[key.passive]) > -10) 1621 goto s5; 1622 1623 tipc_crypto_key_set_state(rx, 0, key.active, key.pending); 1624 tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock); 1625 pr_debug("%s: key[%d] is freed\n", rx->name, key.passive); 1626 1627 s5: 1628 spin_unlock(&rx->lock); 1629 1630 /* Relax it here, the flag will be set again if it really is, but only 1631 * when we are not in grace period for safety! 1632 */ 1633 if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) 1634 tx->legacy_user = 0; 1635 1636 /* Limit max_tfms & do debug commands if needed */ 1637 if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM)) 1638 return; 1639 1640 cmd = sysctl_tipc_max_tfms; 1641 sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF; 1642 tipc_crypto_do_cmd(rx->net, cmd); 1643 } 1644 1645 static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb, 1646 struct tipc_bearer *b, 1647 struct tipc_media_addr *dst, 1648 struct tipc_node *__dnode, u8 type) 1649 { 1650 struct sk_buff *skb; 1651 1652 skb = skb_clone(_skb, GFP_ATOMIC); 1653 if (skb) { 1654 TIPC_SKB_CB(skb)->xmit_type = type; 1655 tipc_crypto_xmit(net, &skb, b, dst, __dnode); 1656 if (skb) 1657 b->media->send_msg(net, skb, b, dst); 1658 } 1659 } 1660 1661 /** 1662 * tipc_crypto_xmit - Build & encrypt TIPC message for xmit 1663 * @net: struct net 1664 * @skb: input/output message skb pointer 1665 * @b: bearer used for xmit later 1666 * @dst: destination media address 1667 * @__dnode: destination node for reference if any 1668 * 1669 * First, build an encryption message header on the top of the message, then 1670 * encrypt the original TIPC message by using the pending, master or active 1671 * key with this preference order. 1672 * If the encryption is successful, the encrypted skb is returned directly or 1673 * via the callback. 1674 * Otherwise, the skb is freed! 1675 * 1676 * Return: 1677 * 0 : the encryption has succeeded (or no encryption) 1678 * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made 1679 * -ENOKEK : the encryption has failed due to no key 1680 * -EKEYREVOKED : the encryption has failed due to key revoked 1681 * -ENOMEM : the encryption has failed due to no memory 1682 * < 0 : the encryption has failed due to other reasons 1683 */ 1684 int tipc_crypto_xmit(struct net *net, struct sk_buff **skb, 1685 struct tipc_bearer *b, struct tipc_media_addr *dst, 1686 struct tipc_node *__dnode) 1687 { 1688 struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode); 1689 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1690 struct tipc_crypto_stats __percpu *stats = tx->stats; 1691 struct tipc_msg *hdr = buf_msg(*skb); 1692 struct tipc_key key = tx->key; 1693 struct tipc_aead *aead = NULL; 1694 u32 user = msg_user(hdr); 1695 u32 type = msg_type(hdr); 1696 int rc = -ENOKEY; 1697 u8 tx_key = 0; 1698 1699 /* No encryption? */ 1700 if (!tx->working) 1701 return 0; 1702 1703 /* Pending key if peer has active on it or probing time */ 1704 if (unlikely(key.pending)) { 1705 tx_key = key.pending; 1706 if (!tx->key_master && !key.active) 1707 goto encrypt; 1708 if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key) 1709 goto encrypt; 1710 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) { 1711 pr_debug("%s: probing for key[%d]\n", tx->name, 1712 key.pending); 1713 goto encrypt; 1714 } 1715 if (user == LINK_CONFIG || user == LINK_PROTOCOL) 1716 tipc_crypto_clone_msg(net, *skb, b, dst, __dnode, 1717 SKB_PROBING); 1718 } 1719 1720 /* Master key if this is a *vital* message or in grace period */ 1721 if (tx->key_master) { 1722 tx_key = KEY_MASTER; 1723 if (!key.active) 1724 goto encrypt; 1725 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) { 1726 pr_debug("%s: gracing for msg (%d %d)\n", tx->name, 1727 user, type); 1728 goto encrypt; 1729 } 1730 if (user == LINK_CONFIG || 1731 (user == LINK_PROTOCOL && type == RESET_MSG) || 1732 (user == MSG_CRYPTO && type == KEY_DISTR_MSG) || 1733 time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) { 1734 if (__rx && __rx->key_master && 1735 !atomic_read(&__rx->peer_rx_active)) 1736 goto encrypt; 1737 if (!__rx) { 1738 if (likely(!tx->legacy_user)) 1739 goto encrypt; 1740 tipc_crypto_clone_msg(net, *skb, b, dst, 1741 __dnode, SKB_GRACING); 1742 } 1743 } 1744 } 1745 1746 /* Else, use the active key if any */ 1747 if (likely(key.active)) { 1748 tx_key = key.active; 1749 goto encrypt; 1750 } 1751 1752 goto exit; 1753 1754 encrypt: 1755 aead = tipc_aead_get(tx->aead[tx_key]); 1756 if (unlikely(!aead)) 1757 goto exit; 1758 rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx); 1759 if (likely(rc > 0)) 1760 rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode); 1761 1762 exit: 1763 switch (rc) { 1764 case 0: 1765 this_cpu_inc(stats->stat[STAT_OK]); 1766 break; 1767 case -EINPROGRESS: 1768 case -EBUSY: 1769 this_cpu_inc(stats->stat[STAT_ASYNC]); 1770 *skb = NULL; 1771 return rc; 1772 default: 1773 this_cpu_inc(stats->stat[STAT_NOK]); 1774 if (rc == -ENOKEY) 1775 this_cpu_inc(stats->stat[STAT_NOKEYS]); 1776 else if (rc == -EKEYREVOKED) 1777 this_cpu_inc(stats->stat[STAT_BADKEYS]); 1778 kfree_skb(*skb); 1779 *skb = NULL; 1780 break; 1781 } 1782 1783 tipc_aead_put(aead); 1784 return rc; 1785 } 1786 1787 /** 1788 * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer 1789 * @net: struct net 1790 * @rx: RX crypto handle 1791 * @skb: input/output message skb pointer 1792 * @b: bearer where the message has been received 1793 * 1794 * If the decryption is successful, the decrypted skb is returned directly or 1795 * as the callback, the encryption header and auth tag will be trimed out 1796 * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete(). 1797 * Otherwise, the skb will be freed! 1798 * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX 1799 * cluster key(s) can be taken for decryption (- recursive). 1800 * 1801 * Return: 1802 * 0 : the decryption has successfully completed 1803 * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made 1804 * -ENOKEY : the decryption has failed due to no key 1805 * -EBADMSG : the decryption has failed due to bad message 1806 * -ENOMEM : the decryption has failed due to no memory 1807 * < 0 : the decryption has failed due to other reasons 1808 */ 1809 int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx, 1810 struct sk_buff **skb, struct tipc_bearer *b) 1811 { 1812 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1813 struct tipc_crypto_stats __percpu *stats; 1814 struct tipc_aead *aead = NULL; 1815 struct tipc_key key; 1816 int rc = -ENOKEY; 1817 u8 tx_key, n; 1818 1819 tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key; 1820 1821 /* New peer? 1822 * Let's try with TX key (i.e. cluster mode) & verify the skb first! 1823 */ 1824 if (unlikely(!rx || tx_key == KEY_MASTER)) 1825 goto pick_tx; 1826 1827 /* Pick RX key according to TX key if any */ 1828 key = rx->key; 1829 if (tx_key == key.active || tx_key == key.pending || 1830 tx_key == key.passive) 1831 goto decrypt; 1832 1833 /* Unknown key, let's try to align RX key(s) */ 1834 if (tipc_crypto_key_try_align(rx, tx_key)) 1835 goto decrypt; 1836 1837 pick_tx: 1838 /* No key suitable? Try to pick one from TX... */ 1839 aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key); 1840 if (aead) 1841 goto decrypt; 1842 goto exit; 1843 1844 decrypt: 1845 rcu_read_lock(); 1846 if (!aead) 1847 aead = tipc_aead_get(rx->aead[tx_key]); 1848 rc = tipc_aead_decrypt(net, aead, *skb, b); 1849 rcu_read_unlock(); 1850 1851 exit: 1852 stats = ((rx) ?: tx)->stats; 1853 switch (rc) { 1854 case 0: 1855 this_cpu_inc(stats->stat[STAT_OK]); 1856 break; 1857 case -EINPROGRESS: 1858 case -EBUSY: 1859 this_cpu_inc(stats->stat[STAT_ASYNC]); 1860 *skb = NULL; 1861 return rc; 1862 default: 1863 this_cpu_inc(stats->stat[STAT_NOK]); 1864 if (rc == -ENOKEY) { 1865 kfree_skb(*skb); 1866 *skb = NULL; 1867 if (rx) { 1868 /* Mark rx->nokey only if we dont have a 1869 * pending received session key, nor a newer 1870 * one i.e. in the next slot. 1871 */ 1872 n = key_next(tx_key); 1873 rx->nokey = !(rx->skey || 1874 rcu_access_pointer(rx->aead[n])); 1875 pr_debug_ratelimited("%s: nokey %d, key %d/%x\n", 1876 rx->name, rx->nokey, 1877 tx_key, rx->key.keys); 1878 tipc_node_put(rx->node); 1879 } 1880 this_cpu_inc(stats->stat[STAT_NOKEYS]); 1881 return rc; 1882 } else if (rc == -EBADMSG) { 1883 this_cpu_inc(stats->stat[STAT_BADMSGS]); 1884 } 1885 break; 1886 } 1887 1888 tipc_crypto_rcv_complete(net, aead, b, skb, rc); 1889 return rc; 1890 } 1891 1892 static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, 1893 struct tipc_bearer *b, 1894 struct sk_buff **skb, int err) 1895 { 1896 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb); 1897 struct tipc_crypto *rx = aead->crypto; 1898 struct tipc_aead *tmp = NULL; 1899 struct tipc_ehdr *ehdr; 1900 struct tipc_node *n; 1901 1902 /* Is this completed by TX? */ 1903 if (unlikely(is_tx(aead->crypto))) { 1904 rx = skb_cb->tx_clone_ctx.rx; 1905 pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n", 1906 (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead, 1907 (*skb)->next, skb_cb->flags); 1908 pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n", 1909 skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last, 1910 aead->crypto->aead[1], aead->crypto->aead[2], 1911 aead->crypto->aead[3]); 1912 if (unlikely(err)) { 1913 if (err == -EBADMSG && (*skb)->next) 1914 tipc_rcv(net, (*skb)->next, b); 1915 goto free_skb; 1916 } 1917 1918 if (likely((*skb)->next)) { 1919 kfree_skb((*skb)->next); 1920 (*skb)->next = NULL; 1921 } 1922 ehdr = (struct tipc_ehdr *)(*skb)->data; 1923 if (!rx) { 1924 WARN_ON(ehdr->user != LINK_CONFIG); 1925 n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0, 1926 true); 1927 rx = tipc_node_crypto_rx(n); 1928 if (unlikely(!rx)) 1929 goto free_skb; 1930 } 1931 1932 /* Ignore cloning if it was TX master key */ 1933 if (ehdr->tx_key == KEY_MASTER) 1934 goto rcv; 1935 if (tipc_aead_clone(&tmp, aead) < 0) 1936 goto rcv; 1937 if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) { 1938 tipc_aead_free(&tmp->rcu); 1939 goto rcv; 1940 } 1941 tipc_aead_put(aead); 1942 aead = tipc_aead_get(tmp); 1943 } 1944 1945 if (unlikely(err)) { 1946 tipc_aead_users_dec(aead, INT_MIN); 1947 goto free_skb; 1948 } 1949 1950 /* Set the RX key's user */ 1951 tipc_aead_users_set(aead, 1); 1952 1953 /* Mark this point, RX works */ 1954 rx->timer1 = jiffies; 1955 1956 rcv: 1957 /* Remove ehdr & auth. tag prior to tipc_rcv() */ 1958 ehdr = (struct tipc_ehdr *)(*skb)->data; 1959 1960 /* Mark this point, RX passive still works */ 1961 if (rx->key.passive && ehdr->tx_key == rx->key.passive) 1962 rx->timer2 = jiffies; 1963 1964 skb_reset_network_header(*skb); 1965 skb_pull(*skb, tipc_ehdr_size(ehdr)); 1966 pskb_trim(*skb, (*skb)->len - aead->authsize); 1967 1968 /* Validate TIPCv2 message */ 1969 if (unlikely(!tipc_msg_validate(skb))) { 1970 pr_err_ratelimited("Packet dropped after decryption!\n"); 1971 goto free_skb; 1972 } 1973 1974 /* Ok, everything's fine, try to synch own keys according to peers' */ 1975 tipc_crypto_key_synch(rx, *skb); 1976 1977 /* Mark skb decrypted */ 1978 skb_cb->decrypted = 1; 1979 1980 /* Clear clone cxt if any */ 1981 if (likely(!skb_cb->tx_clone_deferred)) 1982 goto exit; 1983 skb_cb->tx_clone_deferred = 0; 1984 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); 1985 goto exit; 1986 1987 free_skb: 1988 kfree_skb(*skb); 1989 *skb = NULL; 1990 1991 exit: 1992 tipc_aead_put(aead); 1993 if (rx) 1994 tipc_node_put(rx->node); 1995 } 1996 1997 static void tipc_crypto_do_cmd(struct net *net, int cmd) 1998 { 1999 struct tipc_net *tn = tipc_net(net); 2000 struct tipc_crypto *tx = tn->crypto_tx, *rx; 2001 struct list_head *p; 2002 unsigned int stat; 2003 int i, j, cpu; 2004 char buf[200]; 2005 2006 /* Currently only one command is supported */ 2007 switch (cmd) { 2008 case 0xfff1: 2009 goto print_stats; 2010 default: 2011 return; 2012 } 2013 2014 print_stats: 2015 /* Print a header */ 2016 pr_info("\n=============== TIPC Crypto Statistics ===============\n\n"); 2017 2018 /* Print key status */ 2019 pr_info("Key status:\n"); 2020 pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net), 2021 tipc_crypto_key_dump(tx, buf)); 2022 2023 rcu_read_lock(); 2024 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { 2025 rx = tipc_node_crypto_rx_by_list(p); 2026 pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node), 2027 tipc_crypto_key_dump(rx, buf)); 2028 } 2029 rcu_read_unlock(); 2030 2031 /* Print crypto statistics */ 2032 for (i = 0, j = 0; i < MAX_STATS; i++) 2033 j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]); 2034 pr_info("Counter %s", buf); 2035 2036 memset(buf, '-', 115); 2037 buf[115] = '\0'; 2038 pr_info("%s\n", buf); 2039 2040 j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net)); 2041 for_each_possible_cpu(cpu) { 2042 for (i = 0; i < MAX_STATS; i++) { 2043 stat = per_cpu_ptr(tx->stats, cpu)->stat[i]; 2044 j += scnprintf(buf + j, 200 - j, "|%11d ", stat); 2045 } 2046 pr_info("%s", buf); 2047 j = scnprintf(buf, 200, "%12s", " "); 2048 } 2049 2050 rcu_read_lock(); 2051 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { 2052 rx = tipc_node_crypto_rx_by_list(p); 2053 j = scnprintf(buf, 200, "RX(%7.7s) ", 2054 tipc_node_get_id_str(rx->node)); 2055 for_each_possible_cpu(cpu) { 2056 for (i = 0; i < MAX_STATS; i++) { 2057 stat = per_cpu_ptr(rx->stats, cpu)->stat[i]; 2058 j += scnprintf(buf + j, 200 - j, "|%11d ", 2059 stat); 2060 } 2061 pr_info("%s", buf); 2062 j = scnprintf(buf, 200, "%12s", " "); 2063 } 2064 } 2065 rcu_read_unlock(); 2066 2067 pr_info("\n======================== Done ========================\n"); 2068 } 2069 2070 static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf) 2071 { 2072 struct tipc_key key = c->key; 2073 struct tipc_aead *aead; 2074 int k, i = 0; 2075 char *s; 2076 2077 for (k = KEY_MIN; k <= KEY_MAX; k++) { 2078 if (k == KEY_MASTER) { 2079 if (is_rx(c)) 2080 continue; 2081 if (time_before(jiffies, 2082 c->timer2 + TIPC_TX_GRACE_PERIOD)) 2083 s = "ACT"; 2084 else 2085 s = "PAS"; 2086 } else { 2087 if (k == key.passive) 2088 s = "PAS"; 2089 else if (k == key.active) 2090 s = "ACT"; 2091 else if (k == key.pending) 2092 s = "PEN"; 2093 else 2094 s = "-"; 2095 } 2096 i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s); 2097 2098 rcu_read_lock(); 2099 aead = rcu_dereference(c->aead[k]); 2100 if (aead) 2101 i += scnprintf(buf + i, 200 - i, 2102 "{\"0x...%s\", \"%s\"}/%d:%d", 2103 aead->hint, 2104 (aead->mode == CLUSTER_KEY) ? "c" : "p", 2105 atomic_read(&aead->users), 2106 refcount_read(&aead->refcnt)); 2107 rcu_read_unlock(); 2108 i += scnprintf(buf + i, 200 - i, "\n"); 2109 } 2110 2111 if (is_rx(c)) 2112 i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n", 2113 atomic_read(&c->peer_rx_active)); 2114 2115 return buf; 2116 } 2117 2118 static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, 2119 char *buf) 2120 { 2121 struct tipc_key *key = &old; 2122 int k, i = 0; 2123 char *s; 2124 2125 /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */ 2126 again: 2127 i += scnprintf(buf + i, 32 - i, "["); 2128 for (k = KEY_1; k <= KEY_3; k++) { 2129 if (k == key->passive) 2130 s = "pas"; 2131 else if (k == key->active) 2132 s = "act"; 2133 else if (k == key->pending) 2134 s = "pen"; 2135 else 2136 s = "-"; 2137 i += scnprintf(buf + i, 32 - i, 2138 (k != KEY_3) ? "%s " : "%s", s); 2139 } 2140 if (key != &new) { 2141 i += scnprintf(buf + i, 32 - i, "] -> "); 2142 key = &new; 2143 goto again; 2144 } 2145 i += scnprintf(buf + i, 32 - i, "]"); 2146 return buf; 2147 } 2148 2149 /** 2150 * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point 2151 * @net: the struct net 2152 * @skb: the receiving message buffer 2153 */ 2154 void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb) 2155 { 2156 struct tipc_crypto *rx; 2157 struct tipc_msg *hdr; 2158 2159 if (unlikely(skb_linearize(skb))) 2160 goto exit; 2161 2162 hdr = buf_msg(skb); 2163 rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr)); 2164 if (unlikely(!rx)) 2165 goto exit; 2166 2167 switch (msg_type(hdr)) { 2168 case KEY_DISTR_MSG: 2169 if (tipc_crypto_key_rcv(rx, hdr)) 2170 goto exit; 2171 break; 2172 default: 2173 break; 2174 } 2175 2176 tipc_node_put(rx->node); 2177 2178 exit: 2179 kfree_skb(skb); 2180 } 2181 2182 /** 2183 * tipc_crypto_key_distr - Distribute a TX key 2184 * @tx: the TX crypto 2185 * @key: the key's index 2186 * @dest: the destination tipc node, = NULL if distributing to all nodes 2187 * 2188 * Return: 0 in case of success, otherwise < 0 2189 */ 2190 int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key, 2191 struct tipc_node *dest) 2192 { 2193 struct tipc_aead *aead; 2194 u32 dnode = tipc_node_get_addr(dest); 2195 int rc = -ENOKEY; 2196 2197 if (!sysctl_tipc_key_exchange_enabled) 2198 return 0; 2199 2200 if (key) { 2201 rcu_read_lock(); 2202 aead = tipc_aead_get(tx->aead[key]); 2203 if (likely(aead)) { 2204 rc = tipc_crypto_key_xmit(tx->net, aead->key, 2205 aead->gen, aead->mode, 2206 dnode); 2207 tipc_aead_put(aead); 2208 } 2209 rcu_read_unlock(); 2210 } 2211 2212 return rc; 2213 } 2214 2215 /** 2216 * tipc_crypto_key_xmit - Send a session key 2217 * @net: the struct net 2218 * @skey: the session key to be sent 2219 * @gen: the key's generation 2220 * @mode: the key's mode 2221 * @dnode: the destination node address, = 0 if broadcasting to all nodes 2222 * 2223 * The session key 'skey' is packed in a TIPC v2 'MSG_CRYPTO/KEY_DISTR_MSG' 2224 * as its data section, then xmit-ed through the uc/bc link. 2225 * 2226 * Return: 0 in case of success, otherwise < 0 2227 */ 2228 static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, 2229 u16 gen, u8 mode, u32 dnode) 2230 { 2231 struct sk_buff_head pkts; 2232 struct tipc_msg *hdr; 2233 struct sk_buff *skb; 2234 u16 size, cong_link_cnt; 2235 u8 *data; 2236 int rc; 2237 2238 size = tipc_aead_key_size(skey); 2239 skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC); 2240 if (!skb) 2241 return -ENOMEM; 2242 2243 hdr = buf_msg(skb); 2244 tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG, 2245 INT_H_SIZE, dnode); 2246 msg_set_size(hdr, INT_H_SIZE + size); 2247 msg_set_key_gen(hdr, gen); 2248 msg_set_key_mode(hdr, mode); 2249 2250 data = msg_data(hdr); 2251 *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen); 2252 memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME); 2253 memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key, 2254 skey->keylen); 2255 2256 __skb_queue_head_init(&pkts); 2257 __skb_queue_tail(&pkts, skb); 2258 if (dnode) 2259 rc = tipc_node_xmit(net, &pkts, dnode, 0); 2260 else 2261 rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt); 2262 2263 return rc; 2264 } 2265 2266 /** 2267 * tipc_crypto_key_rcv - Receive a session key 2268 * @rx: the RX crypto 2269 * @hdr: the TIPC v2 message incl. the receiving session key in its data 2270 * 2271 * This function retrieves the session key in the message from peer, then 2272 * schedules a RX work to attach the key to the corresponding RX crypto. 2273 * 2274 * Return: "true" if the key has been scheduled for attaching, otherwise 2275 * "false". 2276 */ 2277 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) 2278 { 2279 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; 2280 struct tipc_aead_key *skey = NULL; 2281 u16 key_gen = msg_key_gen(hdr); 2282 u16 size = msg_data_sz(hdr); 2283 u8 *data = msg_data(hdr); 2284 2285 spin_lock(&rx->lock); 2286 if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) { 2287 pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name, 2288 rx->skey, key_gen, rx->key_gen); 2289 goto exit; 2290 } 2291 2292 /* Allocate memory for the key */ 2293 skey = kmalloc(size, GFP_ATOMIC); 2294 if (unlikely(!skey)) { 2295 pr_err("%s: unable to allocate memory for skey\n", rx->name); 2296 goto exit; 2297 } 2298 2299 /* Copy key from msg data */ 2300 skey->keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME))); 2301 memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME); 2302 memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32), 2303 skey->keylen); 2304 2305 /* Sanity check */ 2306 if (unlikely(size != tipc_aead_key_size(skey))) { 2307 kfree(skey); 2308 skey = NULL; 2309 goto exit; 2310 } 2311 2312 rx->key_gen = key_gen; 2313 rx->skey_mode = msg_key_mode(hdr); 2314 rx->skey = skey; 2315 rx->nokey = 0; 2316 mb(); /* for nokey flag */ 2317 2318 exit: 2319 spin_unlock(&rx->lock); 2320 2321 /* Schedule the key attaching on this crypto */ 2322 if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0))) 2323 return true; 2324 2325 return false; 2326 } 2327 2328 /** 2329 * tipc_crypto_work_rx - Scheduled RX works handler 2330 * @work: the struct RX work 2331 * 2332 * The function processes the previous scheduled works i.e. distributing TX key 2333 * or attaching a received session key on RX crypto. 2334 */ 2335 static void tipc_crypto_work_rx(struct work_struct *work) 2336 { 2337 struct delayed_work *dwork = to_delayed_work(work); 2338 struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work); 2339 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; 2340 unsigned long delay = msecs_to_jiffies(5000); 2341 bool resched = false; 2342 u8 key; 2343 int rc; 2344 2345 /* Case 1: Distribute TX key to peer if scheduled */ 2346 if (atomic_cmpxchg(&rx->key_distr, 2347 KEY_DISTR_SCHED, 2348 KEY_DISTR_COMPL) == KEY_DISTR_SCHED) { 2349 /* Always pick the newest one for distributing */ 2350 key = tx->key.pending ?: tx->key.active; 2351 rc = tipc_crypto_key_distr(tx, key, rx->node); 2352 if (unlikely(rc)) 2353 pr_warn("%s: unable to distr key[%d] to %s, err %d\n", 2354 tx->name, key, tipc_node_get_id_str(rx->node), 2355 rc); 2356 2357 /* Sched for key_distr releasing */ 2358 resched = true; 2359 } else { 2360 atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0); 2361 } 2362 2363 /* Case 2: Attach a pending received session key from peer if any */ 2364 if (rx->skey) { 2365 rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false); 2366 if (unlikely(rc < 0)) 2367 pr_warn("%s: unable to attach received skey, err %d\n", 2368 rx->name, rc); 2369 switch (rc) { 2370 case -EBUSY: 2371 case -ENOMEM: 2372 /* Resched the key attaching */ 2373 resched = true; 2374 break; 2375 default: 2376 synchronize_rcu(); 2377 kfree(rx->skey); 2378 rx->skey = NULL; 2379 break; 2380 } 2381 } 2382 2383 if (resched && queue_delayed_work(tx->wq, &rx->work, delay)) 2384 return; 2385 2386 tipc_node_put(rx->node); 2387 } 2388 2389 /** 2390 * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval 2391 * @tx: TX crypto 2392 * @changed: if the rekeying needs to be rescheduled with new interval 2393 * @new_intv: new rekeying interval (when "changed" = true) 2394 */ 2395 void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed, 2396 u32 new_intv) 2397 { 2398 unsigned long delay; 2399 bool now = false; 2400 2401 if (changed) { 2402 if (new_intv == TIPC_REKEYING_NOW) 2403 now = true; 2404 else 2405 tx->rekeying_intv = new_intv; 2406 cancel_delayed_work_sync(&tx->work); 2407 } 2408 2409 if (tx->rekeying_intv || now) { 2410 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000; 2411 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay)); 2412 } 2413 } 2414 2415 /** 2416 * tipc_crypto_work_tx - Scheduled TX works handler 2417 * @work: the struct TX work 2418 * 2419 * The function processes the previous scheduled work, i.e. key rekeying, by 2420 * generating a new session key based on current one, then attaching it to the 2421 * TX crypto and finally distributing it to peers. It also re-schedules the 2422 * rekeying if needed. 2423 */ 2424 static void tipc_crypto_work_tx(struct work_struct *work) 2425 { 2426 struct delayed_work *dwork = to_delayed_work(work); 2427 struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work); 2428 struct tipc_aead_key *skey = NULL; 2429 struct tipc_key key = tx->key; 2430 struct tipc_aead *aead; 2431 int rc = -ENOMEM; 2432 2433 if (unlikely(key.pending)) 2434 goto resched; 2435 2436 /* Take current key as a template */ 2437 rcu_read_lock(); 2438 aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]); 2439 if (unlikely(!aead)) { 2440 rcu_read_unlock(); 2441 /* At least one key should exist for securing */ 2442 return; 2443 } 2444 2445 /* Lets duplicate it first */ 2446 skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC); 2447 rcu_read_unlock(); 2448 2449 /* Now, generate new key, initiate & distribute it */ 2450 if (likely(skey)) { 2451 rc = tipc_aead_key_generate(skey) ?: 2452 tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false); 2453 if (likely(rc > 0)) 2454 rc = tipc_crypto_key_distr(tx, rc, NULL); 2455 kfree_sensitive(skey); 2456 } 2457 2458 if (unlikely(rc)) 2459 pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc); 2460 2461 resched: 2462 /* Re-schedule rekeying if any */ 2463 tipc_crypto_rekeying_sched(tx, false, 0); 2464 } 2465