1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption 4 * 5 * Copyright (c) 2019, Ericsson AB 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <crypto/aead.h> 38 #include <crypto/aes.h> 39 #include <crypto/rng.h> 40 #include "crypto.h" 41 #include "msg.h" 42 #include "bcast.h" 43 44 #define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000) /* 5s */ 45 #define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000) /* 10s */ 46 #define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */ 47 #define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000) /* 15s */ 48 49 #define TIPC_MAX_TFMS_DEF 10 50 #define TIPC_MAX_TFMS_LIM 1000 51 52 #define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */ 53 54 /* 55 * TIPC Key ids 56 */ 57 enum { 58 KEY_MASTER = 0, 59 KEY_MIN = KEY_MASTER, 60 KEY_1 = 1, 61 KEY_2, 62 KEY_3, 63 KEY_MAX = KEY_3, 64 }; 65 66 /* 67 * TIPC Crypto statistics 68 */ 69 enum { 70 STAT_OK, 71 STAT_NOK, 72 STAT_ASYNC, 73 STAT_ASYNC_OK, 74 STAT_ASYNC_NOK, 75 STAT_BADKEYS, /* tx only */ 76 STAT_BADMSGS = STAT_BADKEYS, /* rx only */ 77 STAT_NOKEYS, 78 STAT_SWITCHES, 79 80 MAX_STATS, 81 }; 82 83 /* TIPC crypto statistics' header */ 84 static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok", 85 "async_nok", "badmsgs", "nokeys", 86 "switches"}; 87 88 /* Max TFMs number per key */ 89 int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF; 90 /* Key exchange switch, default: on */ 91 int sysctl_tipc_key_exchange_enabled __read_mostly = 1; 92 93 /* 94 * struct tipc_key - TIPC keys' status indicator 95 * 96 * 7 6 5 4 3 2 1 0 97 * +-----+-----+-----+-----+-----+-----+-----+-----+ 98 * key: | (reserved)|passive idx| active idx|pending idx| 99 * +-----+-----+-----+-----+-----+-----+-----+-----+ 100 */ 101 struct tipc_key { 102 #define KEY_BITS (2) 103 #define KEY_MASK ((1 << KEY_BITS) - 1) 104 union { 105 struct { 106 #if defined(__LITTLE_ENDIAN_BITFIELD) 107 u8 pending:2, 108 active:2, 109 passive:2, /* rx only */ 110 reserved:2; 111 #elif defined(__BIG_ENDIAN_BITFIELD) 112 u8 reserved:2, 113 passive:2, /* rx only */ 114 active:2, 115 pending:2; 116 #else 117 #error "Please fix <asm/byteorder.h>" 118 #endif 119 } __packed; 120 u8 keys; 121 }; 122 }; 123 124 /** 125 * struct tipc_tfm - TIPC TFM structure to form a list of TFMs 126 * @tfm: cipher handle/key 127 * @list: linked list of TFMs 128 */ 129 struct tipc_tfm { 130 struct crypto_aead *tfm; 131 struct list_head list; 132 }; 133 134 /** 135 * struct tipc_aead - TIPC AEAD key structure 136 * @tfm_entry: per-cpu pointer to one entry in TFM list 137 * @crypto: TIPC crypto owns this key 138 * @cloned: reference to the source key in case cloning 139 * @users: the number of the key users (TX/RX) 140 * @salt: the key's SALT value 141 * @authsize: authentication tag size (max = 16) 142 * @mode: crypto mode is applied to the key 143 * @hint: a hint for user key 144 * @rcu: struct rcu_head 145 * @key: the aead key 146 * @gen: the key's generation 147 * @seqno: the key seqno (cluster scope) 148 * @refcnt: the key reference counter 149 */ 150 struct tipc_aead { 151 #define TIPC_AEAD_HINT_LEN (5) 152 struct tipc_tfm * __percpu *tfm_entry; 153 struct tipc_crypto *crypto; 154 struct tipc_aead *cloned; 155 atomic_t users; 156 u32 salt; 157 u8 authsize; 158 u8 mode; 159 char hint[2 * TIPC_AEAD_HINT_LEN + 1]; 160 struct rcu_head rcu; 161 struct tipc_aead_key *key; 162 u16 gen; 163 164 atomic64_t seqno ____cacheline_aligned; 165 refcount_t refcnt ____cacheline_aligned; 166 167 } ____cacheline_aligned; 168 169 /** 170 * struct tipc_crypto_stats - TIPC Crypto statistics 171 * @stat: array of crypto statistics 172 */ 173 struct tipc_crypto_stats { 174 unsigned int stat[MAX_STATS]; 175 }; 176 177 /** 178 * struct tipc_crypto - TIPC TX/RX crypto structure 179 * @net: struct net 180 * @node: TIPC node (RX) 181 * @aead: array of pointers to AEAD keys for encryption/decryption 182 * @peer_rx_active: replicated peer RX active key index 183 * @key_gen: TX/RX key generation 184 * @key: the key states 185 * @skey_mode: session key's mode 186 * @skey: received session key 187 * @wq: common workqueue on TX crypto 188 * @work: delayed work sched for TX/RX 189 * @key_distr: key distributing state 190 * @rekeying_intv: rekeying interval (in minutes) 191 * @stats: the crypto statistics 192 * @name: the crypto name 193 * @sndnxt: the per-peer sndnxt (TX) 194 * @timer1: general timer 1 (jiffies) 195 * @timer2: general timer 2 (jiffies) 196 * @working: the crypto is working or not 197 * @key_master: flag indicates if master key exists 198 * @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.) 199 * @nokey: no key indication 200 * @flags: combined flags field 201 * @lock: tipc_key lock 202 */ 203 struct tipc_crypto { 204 struct net *net; 205 struct tipc_node *node; 206 struct tipc_aead __rcu *aead[KEY_MAX + 1]; 207 atomic_t peer_rx_active; 208 u16 key_gen; 209 struct tipc_key key; 210 u8 skey_mode; 211 struct tipc_aead_key *skey; 212 struct workqueue_struct *wq; 213 struct delayed_work work; 214 #define KEY_DISTR_SCHED 1 215 #define KEY_DISTR_COMPL 2 216 atomic_t key_distr; 217 u32 rekeying_intv; 218 219 struct tipc_crypto_stats __percpu *stats; 220 char name[48]; 221 222 atomic64_t sndnxt ____cacheline_aligned; 223 unsigned long timer1; 224 unsigned long timer2; 225 union { 226 struct { 227 u8 working:1; 228 u8 key_master:1; 229 u8 legacy_user:1; 230 u8 nokey: 1; 231 }; 232 u8 flags; 233 }; 234 spinlock_t lock; /* crypto lock */ 235 236 } ____cacheline_aligned; 237 238 /* struct tipc_crypto_tx_ctx - TX context for callbacks */ 239 struct tipc_crypto_tx_ctx { 240 struct tipc_aead *aead; 241 struct tipc_bearer *bearer; 242 struct tipc_media_addr dst; 243 }; 244 245 /* struct tipc_crypto_rx_ctx - RX context for callbacks */ 246 struct tipc_crypto_rx_ctx { 247 struct tipc_aead *aead; 248 struct tipc_bearer *bearer; 249 }; 250 251 static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead); 252 static inline void tipc_aead_put(struct tipc_aead *aead); 253 static void tipc_aead_free(struct rcu_head *rp); 254 static int tipc_aead_users(struct tipc_aead __rcu *aead); 255 static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim); 256 static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim); 257 static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val); 258 static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead); 259 static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, 260 u8 mode); 261 static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src); 262 static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, 263 unsigned int crypto_ctx_size, 264 u8 **iv, struct aead_request **req, 265 struct scatterlist **sg, int nsg); 266 static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, 267 struct tipc_bearer *b, 268 struct tipc_media_addr *dst, 269 struct tipc_node *__dnode); 270 static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err); 271 static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, 272 struct sk_buff *skb, struct tipc_bearer *b); 273 static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err); 274 static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr); 275 static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, 276 u8 tx_key, struct sk_buff *skb, 277 struct tipc_crypto *__rx); 278 static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, 279 u8 new_passive, 280 u8 new_active, 281 u8 new_pending); 282 static int tipc_crypto_key_attach(struct tipc_crypto *c, 283 struct tipc_aead *aead, u8 pos, 284 bool master_key); 285 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending); 286 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, 287 struct tipc_crypto *rx, 288 struct sk_buff *skb, 289 u8 tx_key); 290 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb); 291 static int tipc_crypto_key_revoke(struct net *net, u8 tx_key); 292 static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb, 293 struct tipc_bearer *b, 294 struct tipc_media_addr *dst, 295 struct tipc_node *__dnode, u8 type); 296 static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, 297 struct tipc_bearer *b, 298 struct sk_buff **skb, int err); 299 static void tipc_crypto_do_cmd(struct net *net, int cmd); 300 static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf); 301 static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, 302 char *buf); 303 static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, 304 u16 gen, u8 mode, u32 dnode); 305 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr); 306 static void tipc_crypto_work_tx(struct work_struct *work); 307 static void tipc_crypto_work_rx(struct work_struct *work); 308 static int tipc_aead_key_generate(struct tipc_aead_key *skey); 309 310 #define is_tx(crypto) (!(crypto)->node) 311 #define is_rx(crypto) (!is_tx(crypto)) 312 313 #define key_next(cur) ((cur) % KEY_MAX + 1) 314 315 #define tipc_aead_rcu_ptr(rcu_ptr, lock) \ 316 rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock)) 317 318 #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \ 319 do { \ 320 struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \ 321 lockdep_is_held(lock)); \ 322 rcu_assign_pointer((rcu_ptr), (ptr)); \ 323 tipc_aead_put(__tmp); \ 324 } while (0) 325 326 #define tipc_crypto_key_detach(rcu_ptr, lock) \ 327 tipc_aead_rcu_replace((rcu_ptr), NULL, lock) 328 329 /** 330 * tipc_aead_key_validate - Validate a AEAD user key 331 * @ukey: pointer to user key data 332 * @info: netlink info pointer 333 */ 334 int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info) 335 { 336 int keylen; 337 338 /* Check if algorithm exists */ 339 if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) { 340 GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)"); 341 return -ENODEV; 342 } 343 344 /* Currently, we only support the "gcm(aes)" cipher algorithm */ 345 if (strcmp(ukey->alg_name, "gcm(aes)")) { 346 GENL_SET_ERR_MSG(info, "not supported yet the algorithm"); 347 return -ENOTSUPP; 348 } 349 350 /* Check if key size is correct */ 351 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; 352 if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 && 353 keylen != TIPC_AES_GCM_KEY_SIZE_192 && 354 keylen != TIPC_AES_GCM_KEY_SIZE_256)) { 355 GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)"); 356 return -EKEYREJECTED; 357 } 358 359 return 0; 360 } 361 362 /** 363 * tipc_aead_key_generate - Generate new session key 364 * @skey: input/output key with new content 365 * 366 * Return: 0 in case of success, otherwise < 0 367 */ 368 static int tipc_aead_key_generate(struct tipc_aead_key *skey) 369 { 370 int rc = 0; 371 372 /* Fill the key's content with a random value via RNG cipher */ 373 rc = crypto_get_default_rng(); 374 if (likely(!rc)) { 375 rc = crypto_rng_get_bytes(crypto_default_rng, skey->key, 376 skey->keylen); 377 crypto_put_default_rng(); 378 } 379 380 return rc; 381 } 382 383 static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead) 384 { 385 struct tipc_aead *tmp; 386 387 rcu_read_lock(); 388 tmp = rcu_dereference(aead); 389 if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt))) 390 tmp = NULL; 391 rcu_read_unlock(); 392 393 return tmp; 394 } 395 396 static inline void tipc_aead_put(struct tipc_aead *aead) 397 { 398 if (aead && refcount_dec_and_test(&aead->refcnt)) 399 call_rcu(&aead->rcu, tipc_aead_free); 400 } 401 402 /** 403 * tipc_aead_free - Release AEAD key incl. all the TFMs in the list 404 * @rp: rcu head pointer 405 */ 406 static void tipc_aead_free(struct rcu_head *rp) 407 { 408 struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu); 409 struct tipc_tfm *tfm_entry, *head, *tmp; 410 411 if (aead->cloned) { 412 tipc_aead_put(aead->cloned); 413 } else { 414 head = *get_cpu_ptr(aead->tfm_entry); 415 put_cpu_ptr(aead->tfm_entry); 416 list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) { 417 crypto_free_aead(tfm_entry->tfm); 418 list_del(&tfm_entry->list); 419 kfree(tfm_entry); 420 } 421 /* Free the head */ 422 crypto_free_aead(head->tfm); 423 list_del(&head->list); 424 kfree(head); 425 } 426 free_percpu(aead->tfm_entry); 427 kfree_sensitive(aead->key); 428 kfree(aead); 429 } 430 431 static int tipc_aead_users(struct tipc_aead __rcu *aead) 432 { 433 struct tipc_aead *tmp; 434 int users = 0; 435 436 rcu_read_lock(); 437 tmp = rcu_dereference(aead); 438 if (tmp) 439 users = atomic_read(&tmp->users); 440 rcu_read_unlock(); 441 442 return users; 443 } 444 445 static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim) 446 { 447 struct tipc_aead *tmp; 448 449 rcu_read_lock(); 450 tmp = rcu_dereference(aead); 451 if (tmp) 452 atomic_add_unless(&tmp->users, 1, lim); 453 rcu_read_unlock(); 454 } 455 456 static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim) 457 { 458 struct tipc_aead *tmp; 459 460 rcu_read_lock(); 461 tmp = rcu_dereference(aead); 462 if (tmp) 463 atomic_add_unless(&rcu_dereference(aead)->users, -1, lim); 464 rcu_read_unlock(); 465 } 466 467 static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val) 468 { 469 struct tipc_aead *tmp; 470 int cur; 471 472 rcu_read_lock(); 473 tmp = rcu_dereference(aead); 474 if (tmp) { 475 do { 476 cur = atomic_read(&tmp->users); 477 if (cur == val) 478 break; 479 } while (atomic_cmpxchg(&tmp->users, cur, val) != cur); 480 } 481 rcu_read_unlock(); 482 } 483 484 /** 485 * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it 486 * @aead: the AEAD key pointer 487 */ 488 static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead) 489 { 490 struct tipc_tfm **tfm_entry; 491 struct crypto_aead *tfm; 492 493 tfm_entry = get_cpu_ptr(aead->tfm_entry); 494 *tfm_entry = list_next_entry(*tfm_entry, list); 495 tfm = (*tfm_entry)->tfm; 496 put_cpu_ptr(tfm_entry); 497 498 return tfm; 499 } 500 501 /** 502 * tipc_aead_init - Initiate TIPC AEAD 503 * @aead: returned new TIPC AEAD key handle pointer 504 * @ukey: pointer to user key data 505 * @mode: the key mode 506 * 507 * Allocate a (list of) new cipher transformation (TFM) with the specific user 508 * key data if valid. The number of the allocated TFMs can be set via the sysfs 509 * "net/tipc/max_tfms" first. 510 * Also, all the other AEAD data are also initialized. 511 * 512 * Return: 0 if the initiation is successful, otherwise: < 0 513 */ 514 static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, 515 u8 mode) 516 { 517 struct tipc_tfm *tfm_entry, *head; 518 struct crypto_aead *tfm; 519 struct tipc_aead *tmp; 520 int keylen, err, cpu; 521 int tfm_cnt = 0; 522 523 if (unlikely(*aead)) 524 return -EEXIST; 525 526 /* Allocate a new AEAD */ 527 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); 528 if (unlikely(!tmp)) 529 return -ENOMEM; 530 531 /* The key consists of two parts: [AES-KEY][SALT] */ 532 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; 533 534 /* Allocate per-cpu TFM entry pointer */ 535 tmp->tfm_entry = alloc_percpu(struct tipc_tfm *); 536 if (!tmp->tfm_entry) { 537 kfree_sensitive(tmp); 538 return -ENOMEM; 539 } 540 541 /* Make a list of TFMs with the user key data */ 542 do { 543 tfm = crypto_alloc_aead(ukey->alg_name, 0, 0); 544 if (IS_ERR(tfm)) { 545 err = PTR_ERR(tfm); 546 break; 547 } 548 549 if (unlikely(!tfm_cnt && 550 crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) { 551 crypto_free_aead(tfm); 552 err = -ENOTSUPP; 553 break; 554 } 555 556 err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE); 557 err |= crypto_aead_setkey(tfm, ukey->key, keylen); 558 if (unlikely(err)) { 559 crypto_free_aead(tfm); 560 break; 561 } 562 563 tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL); 564 if (unlikely(!tfm_entry)) { 565 crypto_free_aead(tfm); 566 err = -ENOMEM; 567 break; 568 } 569 INIT_LIST_HEAD(&tfm_entry->list); 570 tfm_entry->tfm = tfm; 571 572 /* First entry? */ 573 if (!tfm_cnt) { 574 head = tfm_entry; 575 for_each_possible_cpu(cpu) { 576 *per_cpu_ptr(tmp->tfm_entry, cpu) = head; 577 } 578 } else { 579 list_add_tail(&tfm_entry->list, &head->list); 580 } 581 582 } while (++tfm_cnt < sysctl_tipc_max_tfms); 583 584 /* Not any TFM is allocated? */ 585 if (!tfm_cnt) { 586 free_percpu(tmp->tfm_entry); 587 kfree_sensitive(tmp); 588 return err; 589 } 590 591 /* Form a hex string of some last bytes as the key's hint */ 592 bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN, 593 TIPC_AEAD_HINT_LEN); 594 595 /* Initialize the other data */ 596 tmp->mode = mode; 597 tmp->cloned = NULL; 598 tmp->authsize = TIPC_AES_GCM_TAG_SIZE; 599 tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL); 600 if (!tmp->key) { 601 tipc_aead_free(&tmp->rcu); 602 return -ENOMEM; 603 } 604 memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE); 605 atomic_set(&tmp->users, 0); 606 atomic64_set(&tmp->seqno, 0); 607 refcount_set(&tmp->refcnt, 1); 608 609 *aead = tmp; 610 return 0; 611 } 612 613 /** 614 * tipc_aead_clone - Clone a TIPC AEAD key 615 * @dst: dest key for the cloning 616 * @src: source key to clone from 617 * 618 * Make a "copy" of the source AEAD key data to the dest, the TFMs list is 619 * common for the keys. 620 * A reference to the source is hold in the "cloned" pointer for the later 621 * freeing purposes. 622 * 623 * Note: this must be done in cluster-key mode only! 624 * Return: 0 in case of success, otherwise < 0 625 */ 626 static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src) 627 { 628 struct tipc_aead *aead; 629 int cpu; 630 631 if (!src) 632 return -ENOKEY; 633 634 if (src->mode != CLUSTER_KEY) 635 return -EINVAL; 636 637 if (unlikely(*dst)) 638 return -EEXIST; 639 640 aead = kzalloc(sizeof(*aead), GFP_ATOMIC); 641 if (unlikely(!aead)) 642 return -ENOMEM; 643 644 aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC); 645 if (unlikely(!aead->tfm_entry)) { 646 kfree_sensitive(aead); 647 return -ENOMEM; 648 } 649 650 for_each_possible_cpu(cpu) { 651 *per_cpu_ptr(aead->tfm_entry, cpu) = 652 *per_cpu_ptr(src->tfm_entry, cpu); 653 } 654 655 memcpy(aead->hint, src->hint, sizeof(src->hint)); 656 aead->mode = src->mode; 657 aead->salt = src->salt; 658 aead->authsize = src->authsize; 659 atomic_set(&aead->users, 0); 660 atomic64_set(&aead->seqno, 0); 661 refcount_set(&aead->refcnt, 1); 662 663 WARN_ON(!refcount_inc_not_zero(&src->refcnt)); 664 aead->cloned = src; 665 666 *dst = aead; 667 return 0; 668 } 669 670 /** 671 * tipc_aead_mem_alloc - Allocate memory for AEAD request operations 672 * @tfm: cipher handle to be registered with the request 673 * @crypto_ctx_size: size of crypto context for callback 674 * @iv: returned pointer to IV data 675 * @req: returned pointer to AEAD request data 676 * @sg: returned pointer to SG lists 677 * @nsg: number of SG lists to be allocated 678 * 679 * Allocate memory to store the crypto context data, AEAD request, IV and SG 680 * lists, the memory layout is as follows: 681 * crypto_ctx || iv || aead_req || sg[] 682 * 683 * Return: the pointer to the memory areas in case of success, otherwise NULL 684 */ 685 static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, 686 unsigned int crypto_ctx_size, 687 u8 **iv, struct aead_request **req, 688 struct scatterlist **sg, int nsg) 689 { 690 unsigned int iv_size, req_size; 691 unsigned int len; 692 u8 *mem; 693 694 iv_size = crypto_aead_ivsize(tfm); 695 req_size = sizeof(**req) + crypto_aead_reqsize(tfm); 696 697 len = crypto_ctx_size; 698 len += iv_size; 699 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); 700 len = ALIGN(len, crypto_tfm_ctx_alignment()); 701 len += req_size; 702 len = ALIGN(len, __alignof__(struct scatterlist)); 703 len += nsg * sizeof(**sg); 704 705 mem = kmalloc(len, GFP_ATOMIC); 706 if (!mem) 707 return NULL; 708 709 *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size, 710 crypto_aead_alignmask(tfm) + 1); 711 *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size, 712 crypto_tfm_ctx_alignment()); 713 *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size, 714 __alignof__(struct scatterlist)); 715 716 return (void *)mem; 717 } 718 719 /** 720 * tipc_aead_encrypt - Encrypt a message 721 * @aead: TIPC AEAD key for the message encryption 722 * @skb: the input/output skb 723 * @b: TIPC bearer where the message will be delivered after the encryption 724 * @dst: the destination media address 725 * @__dnode: TIPC dest node if "known" 726 * 727 * Return: 728 * * 0 : if the encryption has completed 729 * * -EINPROGRESS/-EBUSY : if a callback will be performed 730 * * < 0 : the encryption has failed 731 */ 732 static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, 733 struct tipc_bearer *b, 734 struct tipc_media_addr *dst, 735 struct tipc_node *__dnode) 736 { 737 struct crypto_aead *tfm = tipc_aead_tfm_next(aead); 738 struct tipc_crypto_tx_ctx *tx_ctx; 739 struct aead_request *req; 740 struct sk_buff *trailer; 741 struct scatterlist *sg; 742 struct tipc_ehdr *ehdr; 743 int ehsz, len, tailen, nsg, rc; 744 void *ctx; 745 u32 salt; 746 u8 *iv; 747 748 /* Make sure message len at least 4-byte aligned */ 749 len = ALIGN(skb->len, 4); 750 tailen = len - skb->len + aead->authsize; 751 752 /* Expand skb tail for authentication tag: 753 * As for simplicity, we'd have made sure skb having enough tailroom 754 * for authentication tag @skb allocation. Even when skb is nonlinear 755 * but there is no frag_list, it should be still fine! 756 * Otherwise, we must cow it to be a writable buffer with the tailroom. 757 */ 758 SKB_LINEAR_ASSERT(skb); 759 if (tailen > skb_tailroom(skb)) { 760 pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n", 761 skb_tailroom(skb), tailen); 762 } 763 764 nsg = skb_cow_data(skb, tailen, &trailer); 765 if (unlikely(nsg < 0)) { 766 pr_err("TX: skb_cow_data() returned %d\n", nsg); 767 return nsg; 768 } 769 770 pskb_put(skb, trailer, tailen); 771 772 /* Allocate memory for the AEAD operation */ 773 ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg); 774 if (unlikely(!ctx)) 775 return -ENOMEM; 776 TIPC_SKB_CB(skb)->crypto_ctx = ctx; 777 778 /* Map skb to the sg lists */ 779 sg_init_table(sg, nsg); 780 rc = skb_to_sgvec(skb, sg, 0, skb->len); 781 if (unlikely(rc < 0)) { 782 pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg); 783 goto exit; 784 } 785 786 /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)] 787 * In case we're in cluster-key mode, SALT is varied by xor-ing with 788 * the source address (or w0 of id), otherwise with the dest address 789 * if dest is known. 790 */ 791 ehdr = (struct tipc_ehdr *)skb->data; 792 salt = aead->salt; 793 if (aead->mode == CLUSTER_KEY) 794 salt ^= __be32_to_cpu(ehdr->addr); 795 else if (__dnode) 796 salt ^= tipc_node_get_addr(__dnode); 797 memcpy(iv, &salt, 4); 798 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); 799 800 /* Prepare request */ 801 ehsz = tipc_ehdr_size(ehdr); 802 aead_request_set_tfm(req, tfm); 803 aead_request_set_ad(req, ehsz); 804 aead_request_set_crypt(req, sg, sg, len - ehsz, iv); 805 806 /* Set callback function & data */ 807 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 808 tipc_aead_encrypt_done, skb); 809 tx_ctx = (struct tipc_crypto_tx_ctx *)ctx; 810 tx_ctx->aead = aead; 811 tx_ctx->bearer = b; 812 memcpy(&tx_ctx->dst, dst, sizeof(*dst)); 813 814 /* Hold bearer */ 815 if (unlikely(!tipc_bearer_hold(b))) { 816 rc = -ENODEV; 817 goto exit; 818 } 819 820 /* Now, do encrypt */ 821 rc = crypto_aead_encrypt(req); 822 if (rc == -EINPROGRESS || rc == -EBUSY) 823 return rc; 824 825 tipc_bearer_put(b); 826 827 exit: 828 kfree(ctx); 829 TIPC_SKB_CB(skb)->crypto_ctx = NULL; 830 return rc; 831 } 832 833 static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err) 834 { 835 struct sk_buff *skb = base->data; 836 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; 837 struct tipc_bearer *b = tx_ctx->bearer; 838 struct tipc_aead *aead = tx_ctx->aead; 839 struct tipc_crypto *tx = aead->crypto; 840 struct net *net = tx->net; 841 842 switch (err) { 843 case 0: 844 this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]); 845 rcu_read_lock(); 846 if (likely(test_bit(0, &b->up))) 847 b->media->send_msg(net, skb, b, &tx_ctx->dst); 848 else 849 kfree_skb(skb); 850 rcu_read_unlock(); 851 break; 852 case -EINPROGRESS: 853 return; 854 default: 855 this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]); 856 kfree_skb(skb); 857 break; 858 } 859 860 kfree(tx_ctx); 861 tipc_bearer_put(b); 862 tipc_aead_put(aead); 863 } 864 865 /** 866 * tipc_aead_decrypt - Decrypt an encrypted message 867 * @net: struct net 868 * @aead: TIPC AEAD for the message decryption 869 * @skb: the input/output skb 870 * @b: TIPC bearer where the message has been received 871 * 872 * Return: 873 * * 0 : if the decryption has completed 874 * * -EINPROGRESS/-EBUSY : if a callback will be performed 875 * * < 0 : the decryption has failed 876 */ 877 static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, 878 struct sk_buff *skb, struct tipc_bearer *b) 879 { 880 struct tipc_crypto_rx_ctx *rx_ctx; 881 struct aead_request *req; 882 struct crypto_aead *tfm; 883 struct sk_buff *unused; 884 struct scatterlist *sg; 885 struct tipc_ehdr *ehdr; 886 int ehsz, nsg, rc; 887 void *ctx; 888 u32 salt; 889 u8 *iv; 890 891 if (unlikely(!aead)) 892 return -ENOKEY; 893 894 nsg = skb_cow_data(skb, 0, &unused); 895 if (unlikely(nsg < 0)) { 896 pr_err("RX: skb_cow_data() returned %d\n", nsg); 897 return nsg; 898 } 899 900 /* Allocate memory for the AEAD operation */ 901 tfm = tipc_aead_tfm_next(aead); 902 ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg); 903 if (unlikely(!ctx)) 904 return -ENOMEM; 905 TIPC_SKB_CB(skb)->crypto_ctx = ctx; 906 907 /* Map skb to the sg lists */ 908 sg_init_table(sg, nsg); 909 rc = skb_to_sgvec(skb, sg, 0, skb->len); 910 if (unlikely(rc < 0)) { 911 pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg); 912 goto exit; 913 } 914 915 /* Reconstruct IV: */ 916 ehdr = (struct tipc_ehdr *)skb->data; 917 salt = aead->salt; 918 if (aead->mode == CLUSTER_KEY) 919 salt ^= __be32_to_cpu(ehdr->addr); 920 else if (ehdr->destined) 921 salt ^= tipc_own_addr(net); 922 memcpy(iv, &salt, 4); 923 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); 924 925 /* Prepare request */ 926 ehsz = tipc_ehdr_size(ehdr); 927 aead_request_set_tfm(req, tfm); 928 aead_request_set_ad(req, ehsz); 929 aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv); 930 931 /* Set callback function & data */ 932 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 933 tipc_aead_decrypt_done, skb); 934 rx_ctx = (struct tipc_crypto_rx_ctx *)ctx; 935 rx_ctx->aead = aead; 936 rx_ctx->bearer = b; 937 938 /* Hold bearer */ 939 if (unlikely(!tipc_bearer_hold(b))) { 940 rc = -ENODEV; 941 goto exit; 942 } 943 944 /* Now, do decrypt */ 945 rc = crypto_aead_decrypt(req); 946 if (rc == -EINPROGRESS || rc == -EBUSY) 947 return rc; 948 949 tipc_bearer_put(b); 950 951 exit: 952 kfree(ctx); 953 TIPC_SKB_CB(skb)->crypto_ctx = NULL; 954 return rc; 955 } 956 957 static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err) 958 { 959 struct sk_buff *skb = base->data; 960 struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; 961 struct tipc_bearer *b = rx_ctx->bearer; 962 struct tipc_aead *aead = rx_ctx->aead; 963 struct tipc_crypto_stats __percpu *stats = aead->crypto->stats; 964 struct net *net = aead->crypto->net; 965 966 switch (err) { 967 case 0: 968 this_cpu_inc(stats->stat[STAT_ASYNC_OK]); 969 break; 970 case -EINPROGRESS: 971 return; 972 default: 973 this_cpu_inc(stats->stat[STAT_ASYNC_NOK]); 974 break; 975 } 976 977 kfree(rx_ctx); 978 tipc_crypto_rcv_complete(net, aead, b, &skb, err); 979 if (likely(skb)) { 980 if (likely(test_bit(0, &b->up))) 981 tipc_rcv(net, skb, b); 982 else 983 kfree_skb(skb); 984 } 985 986 tipc_bearer_put(b); 987 } 988 989 static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr) 990 { 991 return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; 992 } 993 994 /** 995 * tipc_ehdr_validate - Validate an encryption message 996 * @skb: the message buffer 997 * 998 * Return: "true" if this is a valid encryption message, otherwise "false" 999 */ 1000 bool tipc_ehdr_validate(struct sk_buff *skb) 1001 { 1002 struct tipc_ehdr *ehdr; 1003 int ehsz; 1004 1005 if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE))) 1006 return false; 1007 1008 ehdr = (struct tipc_ehdr *)skb->data; 1009 if (unlikely(ehdr->version != TIPC_EVERSION)) 1010 return false; 1011 ehsz = tipc_ehdr_size(ehdr); 1012 if (unlikely(!pskb_may_pull(skb, ehsz))) 1013 return false; 1014 if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE)) 1015 return false; 1016 1017 return true; 1018 } 1019 1020 /** 1021 * tipc_ehdr_build - Build TIPC encryption message header 1022 * @net: struct net 1023 * @aead: TX AEAD key to be used for the message encryption 1024 * @tx_key: key id used for the message encryption 1025 * @skb: input/output message skb 1026 * @__rx: RX crypto handle if dest is "known" 1027 * 1028 * Return: the header size if the building is successful, otherwise < 0 1029 */ 1030 static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, 1031 u8 tx_key, struct sk_buff *skb, 1032 struct tipc_crypto *__rx) 1033 { 1034 struct tipc_msg *hdr = buf_msg(skb); 1035 struct tipc_ehdr *ehdr; 1036 u32 user = msg_user(hdr); 1037 u64 seqno; 1038 int ehsz; 1039 1040 /* Make room for encryption header */ 1041 ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; 1042 WARN_ON(skb_headroom(skb) < ehsz); 1043 ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz); 1044 1045 /* Obtain a seqno first: 1046 * Use the key seqno (= cluster wise) if dest is unknown or we're in 1047 * cluster key mode, otherwise it's better for a per-peer seqno! 1048 */ 1049 if (!__rx || aead->mode == CLUSTER_KEY) 1050 seqno = atomic64_inc_return(&aead->seqno); 1051 else 1052 seqno = atomic64_inc_return(&__rx->sndnxt); 1053 1054 /* Revoke the key if seqno is wrapped around */ 1055 if (unlikely(!seqno)) 1056 return tipc_crypto_key_revoke(net, tx_key); 1057 1058 /* Word 1-2 */ 1059 ehdr->seqno = cpu_to_be64(seqno); 1060 1061 /* Words 0, 3- */ 1062 ehdr->version = TIPC_EVERSION; 1063 ehdr->user = 0; 1064 ehdr->keepalive = 0; 1065 ehdr->tx_key = tx_key; 1066 ehdr->destined = (__rx) ? 1 : 0; 1067 ehdr->rx_key_active = (__rx) ? __rx->key.active : 0; 1068 ehdr->rx_nokey = (__rx) ? __rx->nokey : 0; 1069 ehdr->master_key = aead->crypto->key_master; 1070 ehdr->reserved_1 = 0; 1071 ehdr->reserved_2 = 0; 1072 1073 switch (user) { 1074 case LINK_CONFIG: 1075 ehdr->user = LINK_CONFIG; 1076 memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN); 1077 break; 1078 default: 1079 if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) { 1080 ehdr->user = LINK_PROTOCOL; 1081 ehdr->keepalive = msg_is_keepalive(hdr); 1082 } 1083 ehdr->addr = hdr->hdr[3]; 1084 break; 1085 } 1086 1087 return ehsz; 1088 } 1089 1090 static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, 1091 u8 new_passive, 1092 u8 new_active, 1093 u8 new_pending) 1094 { 1095 struct tipc_key old = c->key; 1096 char buf[32]; 1097 1098 c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) | 1099 ((new_active & KEY_MASK) << (KEY_BITS)) | 1100 ((new_pending & KEY_MASK)); 1101 1102 pr_debug("%s: key changing %s ::%pS\n", c->name, 1103 tipc_key_change_dump(old, c->key, buf), 1104 __builtin_return_address(0)); 1105 } 1106 1107 /** 1108 * tipc_crypto_key_init - Initiate a new user / AEAD key 1109 * @c: TIPC crypto to which new key is attached 1110 * @ukey: the user key 1111 * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY) 1112 * @master_key: specify this is a cluster master key 1113 * 1114 * A new TIPC AEAD key will be allocated and initiated with the specified user 1115 * key, then attached to the TIPC crypto. 1116 * 1117 * Return: new key id in case of success, otherwise: < 0 1118 */ 1119 int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey, 1120 u8 mode, bool master_key) 1121 { 1122 struct tipc_aead *aead = NULL; 1123 int rc = 0; 1124 1125 /* Initiate with the new user key */ 1126 rc = tipc_aead_init(&aead, ukey, mode); 1127 1128 /* Attach it to the crypto */ 1129 if (likely(!rc)) { 1130 rc = tipc_crypto_key_attach(c, aead, 0, master_key); 1131 if (rc < 0) 1132 tipc_aead_free(&aead->rcu); 1133 } 1134 1135 return rc; 1136 } 1137 1138 /** 1139 * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto 1140 * @c: TIPC crypto to which the new AEAD key is attached 1141 * @aead: the new AEAD key pointer 1142 * @pos: desired slot in the crypto key array, = 0 if any! 1143 * @master_key: specify this is a cluster master key 1144 * 1145 * Return: new key id in case of success, otherwise: -EBUSY 1146 */ 1147 static int tipc_crypto_key_attach(struct tipc_crypto *c, 1148 struct tipc_aead *aead, u8 pos, 1149 bool master_key) 1150 { 1151 struct tipc_key key; 1152 int rc = -EBUSY; 1153 u8 new_key; 1154 1155 spin_lock_bh(&c->lock); 1156 key = c->key; 1157 if (master_key) { 1158 new_key = KEY_MASTER; 1159 goto attach; 1160 } 1161 if (key.active && key.passive) 1162 goto exit; 1163 if (key.pending) { 1164 if (tipc_aead_users(c->aead[key.pending]) > 0) 1165 goto exit; 1166 /* if (pos): ok with replacing, will be aligned when needed */ 1167 /* Replace it */ 1168 new_key = key.pending; 1169 } else { 1170 if (pos) { 1171 if (key.active && pos != key_next(key.active)) { 1172 key.passive = pos; 1173 new_key = pos; 1174 goto attach; 1175 } else if (!key.active && !key.passive) { 1176 key.pending = pos; 1177 new_key = pos; 1178 goto attach; 1179 } 1180 } 1181 key.pending = key_next(key.active ?: key.passive); 1182 new_key = key.pending; 1183 } 1184 1185 attach: 1186 aead->crypto = c; 1187 aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen; 1188 tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock); 1189 if (likely(c->key.keys != key.keys)) 1190 tipc_crypto_key_set_state(c, key.passive, key.active, 1191 key.pending); 1192 c->working = 1; 1193 c->nokey = 0; 1194 c->key_master |= master_key; 1195 rc = new_key; 1196 1197 exit: 1198 spin_unlock_bh(&c->lock); 1199 return rc; 1200 } 1201 1202 void tipc_crypto_key_flush(struct tipc_crypto *c) 1203 { 1204 struct tipc_crypto *tx, *rx; 1205 int k; 1206 1207 spin_lock_bh(&c->lock); 1208 if (is_rx(c)) { 1209 /* Try to cancel pending work */ 1210 rx = c; 1211 tx = tipc_net(rx->net)->crypto_tx; 1212 if (cancel_delayed_work(&rx->work)) { 1213 kfree(rx->skey); 1214 rx->skey = NULL; 1215 atomic_xchg(&rx->key_distr, 0); 1216 tipc_node_put(rx->node); 1217 } 1218 /* RX stopping => decrease TX key users if any */ 1219 k = atomic_xchg(&rx->peer_rx_active, 0); 1220 if (k) { 1221 tipc_aead_users_dec(tx->aead[k], 0); 1222 /* Mark the point TX key users changed */ 1223 tx->timer1 = jiffies; 1224 } 1225 } 1226 1227 c->flags = 0; 1228 tipc_crypto_key_set_state(c, 0, 0, 0); 1229 for (k = KEY_MIN; k <= KEY_MAX; k++) 1230 tipc_crypto_key_detach(c->aead[k], &c->lock); 1231 atomic64_set(&c->sndnxt, 0); 1232 spin_unlock_bh(&c->lock); 1233 } 1234 1235 /** 1236 * tipc_crypto_key_try_align - Align RX keys if possible 1237 * @rx: RX crypto handle 1238 * @new_pending: new pending slot if aligned (= TX key from peer) 1239 * 1240 * Peer has used an unknown key slot, this only happens when peer has left and 1241 * rejoned, or we are newcomer. 1242 * That means, there must be no active key but a pending key at unaligned slot. 1243 * If so, we try to move the pending key to the new slot. 1244 * Note: A potential passive key can exist, it will be shifted correspondingly! 1245 * 1246 * Return: "true" if key is successfully aligned, otherwise "false" 1247 */ 1248 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending) 1249 { 1250 struct tipc_aead *tmp1, *tmp2 = NULL; 1251 struct tipc_key key; 1252 bool aligned = false; 1253 u8 new_passive = 0; 1254 int x; 1255 1256 spin_lock(&rx->lock); 1257 key = rx->key; 1258 if (key.pending == new_pending) { 1259 aligned = true; 1260 goto exit; 1261 } 1262 if (key.active) 1263 goto exit; 1264 if (!key.pending) 1265 goto exit; 1266 if (tipc_aead_users(rx->aead[key.pending]) > 0) 1267 goto exit; 1268 1269 /* Try to "isolate" this pending key first */ 1270 tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock); 1271 if (!refcount_dec_if_one(&tmp1->refcnt)) 1272 goto exit; 1273 rcu_assign_pointer(rx->aead[key.pending], NULL); 1274 1275 /* Move passive key if any */ 1276 if (key.passive) { 1277 tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock)); 1278 x = (key.passive - key.pending + new_pending) % KEY_MAX; 1279 new_passive = (x <= 0) ? x + KEY_MAX : x; 1280 } 1281 1282 /* Re-allocate the key(s) */ 1283 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending); 1284 rcu_assign_pointer(rx->aead[new_pending], tmp1); 1285 if (new_passive) 1286 rcu_assign_pointer(rx->aead[new_passive], tmp2); 1287 refcount_set(&tmp1->refcnt, 1); 1288 aligned = true; 1289 pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending, 1290 new_pending); 1291 1292 exit: 1293 spin_unlock(&rx->lock); 1294 return aligned; 1295 } 1296 1297 /** 1298 * tipc_crypto_key_pick_tx - Pick one TX key for message decryption 1299 * @tx: TX crypto handle 1300 * @rx: RX crypto handle (can be NULL) 1301 * @skb: the message skb which will be decrypted later 1302 * @tx_key: peer TX key id 1303 * 1304 * This function looks up the existing TX keys and pick one which is suitable 1305 * for the message decryption, that must be a cluster key and not used before 1306 * on the same message (i.e. recursive). 1307 * 1308 * Return: the TX AEAD key handle in case of success, otherwise NULL 1309 */ 1310 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, 1311 struct tipc_crypto *rx, 1312 struct sk_buff *skb, 1313 u8 tx_key) 1314 { 1315 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb); 1316 struct tipc_aead *aead = NULL; 1317 struct tipc_key key = tx->key; 1318 u8 k, i = 0; 1319 1320 /* Initialize data if not yet */ 1321 if (!skb_cb->tx_clone_deferred) { 1322 skb_cb->tx_clone_deferred = 1; 1323 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); 1324 } 1325 1326 skb_cb->tx_clone_ctx.rx = rx; 1327 if (++skb_cb->tx_clone_ctx.recurs > 2) 1328 return NULL; 1329 1330 /* Pick one TX key */ 1331 spin_lock(&tx->lock); 1332 if (tx_key == KEY_MASTER) { 1333 aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock); 1334 goto done; 1335 } 1336 do { 1337 k = (i == 0) ? key.pending : 1338 ((i == 1) ? key.active : key.passive); 1339 if (!k) 1340 continue; 1341 aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock); 1342 if (!aead) 1343 continue; 1344 if (aead->mode != CLUSTER_KEY || 1345 aead == skb_cb->tx_clone_ctx.last) { 1346 aead = NULL; 1347 continue; 1348 } 1349 /* Ok, found one cluster key */ 1350 skb_cb->tx_clone_ctx.last = aead; 1351 WARN_ON(skb->next); 1352 skb->next = skb_clone(skb, GFP_ATOMIC); 1353 if (unlikely(!skb->next)) 1354 pr_warn("Failed to clone skb for next round if any\n"); 1355 break; 1356 } while (++i < 3); 1357 1358 done: 1359 if (likely(aead)) 1360 WARN_ON(!refcount_inc_not_zero(&aead->refcnt)); 1361 spin_unlock(&tx->lock); 1362 1363 return aead; 1364 } 1365 1366 /** 1367 * tipc_crypto_key_synch: Synch own key data according to peer key status 1368 * @rx: RX crypto handle 1369 * @skb: TIPCv2 message buffer (incl. the ehdr from peer) 1370 * 1371 * This function updates the peer node related data as the peer RX active key 1372 * has changed, so the number of TX keys' users on this node are increased and 1373 * decreased correspondingly. 1374 * 1375 * It also considers if peer has no key, then we need to make own master key 1376 * (if any) taking over i.e. starting grace period and also trigger key 1377 * distributing process. 1378 * 1379 * The "per-peer" sndnxt is also reset when the peer key has switched. 1380 */ 1381 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb) 1382 { 1383 struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb); 1384 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; 1385 struct tipc_msg *hdr = buf_msg(skb); 1386 u32 self = tipc_own_addr(rx->net); 1387 u8 cur, new; 1388 unsigned long delay; 1389 1390 /* Update RX 'key_master' flag according to peer, also mark "legacy" if 1391 * a peer has no master key. 1392 */ 1393 rx->key_master = ehdr->master_key; 1394 if (!rx->key_master) 1395 tx->legacy_user = 1; 1396 1397 /* For later cases, apply only if message is destined to this node */ 1398 if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self) 1399 return; 1400 1401 /* Case 1: Peer has no keys, let's make master key take over */ 1402 if (ehdr->rx_nokey) { 1403 /* Set or extend grace period */ 1404 tx->timer2 = jiffies; 1405 /* Schedule key distributing for the peer if not yet */ 1406 if (tx->key.keys && 1407 !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) { 1408 get_random_bytes(&delay, 2); 1409 delay %= 5; 1410 delay = msecs_to_jiffies(500 * ++delay); 1411 if (queue_delayed_work(tx->wq, &rx->work, delay)) 1412 tipc_node_get(rx->node); 1413 } 1414 } else { 1415 /* Cancel a pending key distributing if any */ 1416 atomic_xchg(&rx->key_distr, 0); 1417 } 1418 1419 /* Case 2: Peer RX active key has changed, let's update own TX users */ 1420 cur = atomic_read(&rx->peer_rx_active); 1421 new = ehdr->rx_key_active; 1422 if (tx->key.keys && 1423 cur != new && 1424 atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) { 1425 if (new) 1426 tipc_aead_users_inc(tx->aead[new], INT_MAX); 1427 if (cur) 1428 tipc_aead_users_dec(tx->aead[cur], 0); 1429 1430 atomic64_set(&rx->sndnxt, 0); 1431 /* Mark the point TX key users changed */ 1432 tx->timer1 = jiffies; 1433 1434 pr_debug("%s: key users changed %d-- %d++, peer %s\n", 1435 tx->name, cur, new, rx->name); 1436 } 1437 } 1438 1439 static int tipc_crypto_key_revoke(struct net *net, u8 tx_key) 1440 { 1441 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1442 struct tipc_key key; 1443 1444 spin_lock(&tx->lock); 1445 key = tx->key; 1446 WARN_ON(!key.active || tx_key != key.active); 1447 1448 /* Free the active key */ 1449 tipc_crypto_key_set_state(tx, key.passive, 0, key.pending); 1450 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); 1451 spin_unlock(&tx->lock); 1452 1453 pr_warn("%s: key is revoked\n", tx->name); 1454 return -EKEYREVOKED; 1455 } 1456 1457 int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, 1458 struct tipc_node *node) 1459 { 1460 struct tipc_crypto *c; 1461 1462 if (*crypto) 1463 return -EEXIST; 1464 1465 /* Allocate crypto */ 1466 c = kzalloc(sizeof(*c), GFP_ATOMIC); 1467 if (!c) 1468 return -ENOMEM; 1469 1470 /* Allocate workqueue on TX */ 1471 if (!node) { 1472 c->wq = alloc_ordered_workqueue("tipc_crypto", 0); 1473 if (!c->wq) { 1474 kfree(c); 1475 return -ENOMEM; 1476 } 1477 } 1478 1479 /* Allocate statistic structure */ 1480 c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); 1481 if (!c->stats) { 1482 if (c->wq) 1483 destroy_workqueue(c->wq); 1484 kfree_sensitive(c); 1485 return -ENOMEM; 1486 } 1487 1488 c->flags = 0; 1489 c->net = net; 1490 c->node = node; 1491 get_random_bytes(&c->key_gen, 2); 1492 tipc_crypto_key_set_state(c, 0, 0, 0); 1493 atomic_set(&c->key_distr, 0); 1494 atomic_set(&c->peer_rx_active, 0); 1495 atomic64_set(&c->sndnxt, 0); 1496 c->timer1 = jiffies; 1497 c->timer2 = jiffies; 1498 c->rekeying_intv = TIPC_REKEYING_INTV_DEF; 1499 spin_lock_init(&c->lock); 1500 scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX", 1501 (is_rx(c)) ? tipc_node_get_id_str(c->node) : 1502 tipc_own_id_string(c->net)); 1503 1504 if (is_rx(c)) 1505 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx); 1506 else 1507 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx); 1508 1509 *crypto = c; 1510 return 0; 1511 } 1512 1513 void tipc_crypto_stop(struct tipc_crypto **crypto) 1514 { 1515 struct tipc_crypto *c = *crypto; 1516 u8 k; 1517 1518 if (!c) 1519 return; 1520 1521 /* Flush any queued works & destroy wq */ 1522 if (is_tx(c)) { 1523 c->rekeying_intv = 0; 1524 cancel_delayed_work_sync(&c->work); 1525 destroy_workqueue(c->wq); 1526 } 1527 1528 /* Release AEAD keys */ 1529 rcu_read_lock(); 1530 for (k = KEY_MIN; k <= KEY_MAX; k++) 1531 tipc_aead_put(rcu_dereference(c->aead[k])); 1532 rcu_read_unlock(); 1533 pr_debug("%s: has been stopped\n", c->name); 1534 1535 /* Free this crypto statistics */ 1536 free_percpu(c->stats); 1537 1538 *crypto = NULL; 1539 kfree_sensitive(c); 1540 } 1541 1542 void tipc_crypto_timeout(struct tipc_crypto *rx) 1543 { 1544 struct tipc_net *tn = tipc_net(rx->net); 1545 struct tipc_crypto *tx = tn->crypto_tx; 1546 struct tipc_key key; 1547 int cmd; 1548 1549 /* TX pending: taking all users & stable -> active */ 1550 spin_lock(&tx->lock); 1551 key = tx->key; 1552 if (key.active && tipc_aead_users(tx->aead[key.active]) > 0) 1553 goto s1; 1554 if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0) 1555 goto s1; 1556 if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME)) 1557 goto s1; 1558 1559 tipc_crypto_key_set_state(tx, key.passive, key.pending, 0); 1560 if (key.active) 1561 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); 1562 this_cpu_inc(tx->stats->stat[STAT_SWITCHES]); 1563 pr_info("%s: key[%d] is activated\n", tx->name, key.pending); 1564 1565 s1: 1566 spin_unlock(&tx->lock); 1567 1568 /* RX pending: having user -> active */ 1569 spin_lock(&rx->lock); 1570 key = rx->key; 1571 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0) 1572 goto s2; 1573 1574 if (key.active) 1575 key.passive = key.active; 1576 key.active = key.pending; 1577 rx->timer2 = jiffies; 1578 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); 1579 this_cpu_inc(rx->stats->stat[STAT_SWITCHES]); 1580 pr_info("%s: key[%d] is activated\n", rx->name, key.pending); 1581 goto s5; 1582 1583 s2: 1584 /* RX pending: not working -> remove */ 1585 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10) 1586 goto s3; 1587 1588 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); 1589 tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock); 1590 pr_debug("%s: key[%d] is removed\n", rx->name, key.pending); 1591 goto s5; 1592 1593 s3: 1594 /* RX active: timed out or no user -> pending */ 1595 if (!key.active) 1596 goto s4; 1597 if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) && 1598 tipc_aead_users(rx->aead[key.active]) > 0) 1599 goto s4; 1600 1601 if (key.pending) 1602 key.passive = key.active; 1603 else 1604 key.pending = key.active; 1605 rx->timer2 = jiffies; 1606 tipc_crypto_key_set_state(rx, key.passive, 0, key.pending); 1607 tipc_aead_users_set(rx->aead[key.pending], 0); 1608 pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active); 1609 goto s5; 1610 1611 s4: 1612 /* RX passive: outdated or not working -> free */ 1613 if (!key.passive) 1614 goto s5; 1615 if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) && 1616 tipc_aead_users(rx->aead[key.passive]) > -10) 1617 goto s5; 1618 1619 tipc_crypto_key_set_state(rx, 0, key.active, key.pending); 1620 tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock); 1621 pr_debug("%s: key[%d] is freed\n", rx->name, key.passive); 1622 1623 s5: 1624 spin_unlock(&rx->lock); 1625 1626 /* Relax it here, the flag will be set again if it really is, but only 1627 * when we are not in grace period for safety! 1628 */ 1629 if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) 1630 tx->legacy_user = 0; 1631 1632 /* Limit max_tfms & do debug commands if needed */ 1633 if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM)) 1634 return; 1635 1636 cmd = sysctl_tipc_max_tfms; 1637 sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF; 1638 tipc_crypto_do_cmd(rx->net, cmd); 1639 } 1640 1641 static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb, 1642 struct tipc_bearer *b, 1643 struct tipc_media_addr *dst, 1644 struct tipc_node *__dnode, u8 type) 1645 { 1646 struct sk_buff *skb; 1647 1648 skb = skb_clone(_skb, GFP_ATOMIC); 1649 if (skb) { 1650 TIPC_SKB_CB(skb)->xmit_type = type; 1651 tipc_crypto_xmit(net, &skb, b, dst, __dnode); 1652 if (skb) 1653 b->media->send_msg(net, skb, b, dst); 1654 } 1655 } 1656 1657 /** 1658 * tipc_crypto_xmit - Build & encrypt TIPC message for xmit 1659 * @net: struct net 1660 * @skb: input/output message skb pointer 1661 * @b: bearer used for xmit later 1662 * @dst: destination media address 1663 * @__dnode: destination node for reference if any 1664 * 1665 * First, build an encryption message header on the top of the message, then 1666 * encrypt the original TIPC message by using the pending, master or active 1667 * key with this preference order. 1668 * If the encryption is successful, the encrypted skb is returned directly or 1669 * via the callback. 1670 * Otherwise, the skb is freed! 1671 * 1672 * Return: 1673 * * 0 : the encryption has succeeded (or no encryption) 1674 * * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made 1675 * * -ENOKEK : the encryption has failed due to no key 1676 * * -EKEYREVOKED : the encryption has failed due to key revoked 1677 * * -ENOMEM : the encryption has failed due to no memory 1678 * * < 0 : the encryption has failed due to other reasons 1679 */ 1680 int tipc_crypto_xmit(struct net *net, struct sk_buff **skb, 1681 struct tipc_bearer *b, struct tipc_media_addr *dst, 1682 struct tipc_node *__dnode) 1683 { 1684 struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode); 1685 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1686 struct tipc_crypto_stats __percpu *stats = tx->stats; 1687 struct tipc_msg *hdr = buf_msg(*skb); 1688 struct tipc_key key = tx->key; 1689 struct tipc_aead *aead = NULL; 1690 u32 user = msg_user(hdr); 1691 u32 type = msg_type(hdr); 1692 int rc = -ENOKEY; 1693 u8 tx_key = 0; 1694 1695 /* No encryption? */ 1696 if (!tx->working) 1697 return 0; 1698 1699 /* Pending key if peer has active on it or probing time */ 1700 if (unlikely(key.pending)) { 1701 tx_key = key.pending; 1702 if (!tx->key_master && !key.active) 1703 goto encrypt; 1704 if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key) 1705 goto encrypt; 1706 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) { 1707 pr_debug("%s: probing for key[%d]\n", tx->name, 1708 key.pending); 1709 goto encrypt; 1710 } 1711 if (user == LINK_CONFIG || user == LINK_PROTOCOL) 1712 tipc_crypto_clone_msg(net, *skb, b, dst, __dnode, 1713 SKB_PROBING); 1714 } 1715 1716 /* Master key if this is a *vital* message or in grace period */ 1717 if (tx->key_master) { 1718 tx_key = KEY_MASTER; 1719 if (!key.active) 1720 goto encrypt; 1721 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) { 1722 pr_debug("%s: gracing for msg (%d %d)\n", tx->name, 1723 user, type); 1724 goto encrypt; 1725 } 1726 if (user == LINK_CONFIG || 1727 (user == LINK_PROTOCOL && type == RESET_MSG) || 1728 (user == MSG_CRYPTO && type == KEY_DISTR_MSG) || 1729 time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) { 1730 if (__rx && __rx->key_master && 1731 !atomic_read(&__rx->peer_rx_active)) 1732 goto encrypt; 1733 if (!__rx) { 1734 if (likely(!tx->legacy_user)) 1735 goto encrypt; 1736 tipc_crypto_clone_msg(net, *skb, b, dst, 1737 __dnode, SKB_GRACING); 1738 } 1739 } 1740 } 1741 1742 /* Else, use the active key if any */ 1743 if (likely(key.active)) { 1744 tx_key = key.active; 1745 goto encrypt; 1746 } 1747 1748 goto exit; 1749 1750 encrypt: 1751 aead = tipc_aead_get(tx->aead[tx_key]); 1752 if (unlikely(!aead)) 1753 goto exit; 1754 rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx); 1755 if (likely(rc > 0)) 1756 rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode); 1757 1758 exit: 1759 switch (rc) { 1760 case 0: 1761 this_cpu_inc(stats->stat[STAT_OK]); 1762 break; 1763 case -EINPROGRESS: 1764 case -EBUSY: 1765 this_cpu_inc(stats->stat[STAT_ASYNC]); 1766 *skb = NULL; 1767 return rc; 1768 default: 1769 this_cpu_inc(stats->stat[STAT_NOK]); 1770 if (rc == -ENOKEY) 1771 this_cpu_inc(stats->stat[STAT_NOKEYS]); 1772 else if (rc == -EKEYREVOKED) 1773 this_cpu_inc(stats->stat[STAT_BADKEYS]); 1774 kfree_skb(*skb); 1775 *skb = NULL; 1776 break; 1777 } 1778 1779 tipc_aead_put(aead); 1780 return rc; 1781 } 1782 1783 /** 1784 * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer 1785 * @net: struct net 1786 * @rx: RX crypto handle 1787 * @skb: input/output message skb pointer 1788 * @b: bearer where the message has been received 1789 * 1790 * If the decryption is successful, the decrypted skb is returned directly or 1791 * as the callback, the encryption header and auth tag will be trimed out 1792 * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete(). 1793 * Otherwise, the skb will be freed! 1794 * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX 1795 * cluster key(s) can be taken for decryption (- recursive). 1796 * 1797 * Return: 1798 * * 0 : the decryption has successfully completed 1799 * * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made 1800 * * -ENOKEY : the decryption has failed due to no key 1801 * * -EBADMSG : the decryption has failed due to bad message 1802 * * -ENOMEM : the decryption has failed due to no memory 1803 * * < 0 : the decryption has failed due to other reasons 1804 */ 1805 int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx, 1806 struct sk_buff **skb, struct tipc_bearer *b) 1807 { 1808 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1809 struct tipc_crypto_stats __percpu *stats; 1810 struct tipc_aead *aead = NULL; 1811 struct tipc_key key; 1812 int rc = -ENOKEY; 1813 u8 tx_key, n; 1814 1815 tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key; 1816 1817 /* New peer? 1818 * Let's try with TX key (i.e. cluster mode) & verify the skb first! 1819 */ 1820 if (unlikely(!rx || tx_key == KEY_MASTER)) 1821 goto pick_tx; 1822 1823 /* Pick RX key according to TX key if any */ 1824 key = rx->key; 1825 if (tx_key == key.active || tx_key == key.pending || 1826 tx_key == key.passive) 1827 goto decrypt; 1828 1829 /* Unknown key, let's try to align RX key(s) */ 1830 if (tipc_crypto_key_try_align(rx, tx_key)) 1831 goto decrypt; 1832 1833 pick_tx: 1834 /* No key suitable? Try to pick one from TX... */ 1835 aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key); 1836 if (aead) 1837 goto decrypt; 1838 goto exit; 1839 1840 decrypt: 1841 rcu_read_lock(); 1842 if (!aead) 1843 aead = tipc_aead_get(rx->aead[tx_key]); 1844 rc = tipc_aead_decrypt(net, aead, *skb, b); 1845 rcu_read_unlock(); 1846 1847 exit: 1848 stats = ((rx) ?: tx)->stats; 1849 switch (rc) { 1850 case 0: 1851 this_cpu_inc(stats->stat[STAT_OK]); 1852 break; 1853 case -EINPROGRESS: 1854 case -EBUSY: 1855 this_cpu_inc(stats->stat[STAT_ASYNC]); 1856 *skb = NULL; 1857 return rc; 1858 default: 1859 this_cpu_inc(stats->stat[STAT_NOK]); 1860 if (rc == -ENOKEY) { 1861 kfree_skb(*skb); 1862 *skb = NULL; 1863 if (rx) { 1864 /* Mark rx->nokey only if we dont have a 1865 * pending received session key, nor a newer 1866 * one i.e. in the next slot. 1867 */ 1868 n = key_next(tx_key); 1869 rx->nokey = !(rx->skey || 1870 rcu_access_pointer(rx->aead[n])); 1871 pr_debug_ratelimited("%s: nokey %d, key %d/%x\n", 1872 rx->name, rx->nokey, 1873 tx_key, rx->key.keys); 1874 tipc_node_put(rx->node); 1875 } 1876 this_cpu_inc(stats->stat[STAT_NOKEYS]); 1877 return rc; 1878 } else if (rc == -EBADMSG) { 1879 this_cpu_inc(stats->stat[STAT_BADMSGS]); 1880 } 1881 break; 1882 } 1883 1884 tipc_crypto_rcv_complete(net, aead, b, skb, rc); 1885 return rc; 1886 } 1887 1888 static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, 1889 struct tipc_bearer *b, 1890 struct sk_buff **skb, int err) 1891 { 1892 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb); 1893 struct tipc_crypto *rx = aead->crypto; 1894 struct tipc_aead *tmp = NULL; 1895 struct tipc_ehdr *ehdr; 1896 struct tipc_node *n; 1897 1898 /* Is this completed by TX? */ 1899 if (unlikely(is_tx(aead->crypto))) { 1900 rx = skb_cb->tx_clone_ctx.rx; 1901 pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n", 1902 (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead, 1903 (*skb)->next, skb_cb->flags); 1904 pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n", 1905 skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last, 1906 aead->crypto->aead[1], aead->crypto->aead[2], 1907 aead->crypto->aead[3]); 1908 if (unlikely(err)) { 1909 if (err == -EBADMSG && (*skb)->next) 1910 tipc_rcv(net, (*skb)->next, b); 1911 goto free_skb; 1912 } 1913 1914 if (likely((*skb)->next)) { 1915 kfree_skb((*skb)->next); 1916 (*skb)->next = NULL; 1917 } 1918 ehdr = (struct tipc_ehdr *)(*skb)->data; 1919 if (!rx) { 1920 WARN_ON(ehdr->user != LINK_CONFIG); 1921 n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0, 1922 true); 1923 rx = tipc_node_crypto_rx(n); 1924 if (unlikely(!rx)) 1925 goto free_skb; 1926 } 1927 1928 /* Ignore cloning if it was TX master key */ 1929 if (ehdr->tx_key == KEY_MASTER) 1930 goto rcv; 1931 if (tipc_aead_clone(&tmp, aead) < 0) 1932 goto rcv; 1933 WARN_ON(!refcount_inc_not_zero(&tmp->refcnt)); 1934 if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) { 1935 tipc_aead_free(&tmp->rcu); 1936 goto rcv; 1937 } 1938 tipc_aead_put(aead); 1939 aead = tmp; 1940 } 1941 1942 if (unlikely(err)) { 1943 tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN); 1944 goto free_skb; 1945 } 1946 1947 /* Set the RX key's user */ 1948 tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1); 1949 1950 /* Mark this point, RX works */ 1951 rx->timer1 = jiffies; 1952 1953 rcv: 1954 /* Remove ehdr & auth. tag prior to tipc_rcv() */ 1955 ehdr = (struct tipc_ehdr *)(*skb)->data; 1956 1957 /* Mark this point, RX passive still works */ 1958 if (rx->key.passive && ehdr->tx_key == rx->key.passive) 1959 rx->timer2 = jiffies; 1960 1961 skb_reset_network_header(*skb); 1962 skb_pull(*skb, tipc_ehdr_size(ehdr)); 1963 pskb_trim(*skb, (*skb)->len - aead->authsize); 1964 1965 /* Validate TIPCv2 message */ 1966 if (unlikely(!tipc_msg_validate(skb))) { 1967 pr_err_ratelimited("Packet dropped after decryption!\n"); 1968 goto free_skb; 1969 } 1970 1971 /* Ok, everything's fine, try to synch own keys according to peers' */ 1972 tipc_crypto_key_synch(rx, *skb); 1973 1974 /* Mark skb decrypted */ 1975 skb_cb->decrypted = 1; 1976 1977 /* Clear clone cxt if any */ 1978 if (likely(!skb_cb->tx_clone_deferred)) 1979 goto exit; 1980 skb_cb->tx_clone_deferred = 0; 1981 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); 1982 goto exit; 1983 1984 free_skb: 1985 kfree_skb(*skb); 1986 *skb = NULL; 1987 1988 exit: 1989 tipc_aead_put(aead); 1990 if (rx) 1991 tipc_node_put(rx->node); 1992 } 1993 1994 static void tipc_crypto_do_cmd(struct net *net, int cmd) 1995 { 1996 struct tipc_net *tn = tipc_net(net); 1997 struct tipc_crypto *tx = tn->crypto_tx, *rx; 1998 struct list_head *p; 1999 unsigned int stat; 2000 int i, j, cpu; 2001 char buf[200]; 2002 2003 /* Currently only one command is supported */ 2004 switch (cmd) { 2005 case 0xfff1: 2006 goto print_stats; 2007 default: 2008 return; 2009 } 2010 2011 print_stats: 2012 /* Print a header */ 2013 pr_info("\n=============== TIPC Crypto Statistics ===============\n\n"); 2014 2015 /* Print key status */ 2016 pr_info("Key status:\n"); 2017 pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net), 2018 tipc_crypto_key_dump(tx, buf)); 2019 2020 rcu_read_lock(); 2021 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { 2022 rx = tipc_node_crypto_rx_by_list(p); 2023 pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node), 2024 tipc_crypto_key_dump(rx, buf)); 2025 } 2026 rcu_read_unlock(); 2027 2028 /* Print crypto statistics */ 2029 for (i = 0, j = 0; i < MAX_STATS; i++) 2030 j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]); 2031 pr_info("Counter %s", buf); 2032 2033 memset(buf, '-', 115); 2034 buf[115] = '\0'; 2035 pr_info("%s\n", buf); 2036 2037 j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net)); 2038 for_each_possible_cpu(cpu) { 2039 for (i = 0; i < MAX_STATS; i++) { 2040 stat = per_cpu_ptr(tx->stats, cpu)->stat[i]; 2041 j += scnprintf(buf + j, 200 - j, "|%11d ", stat); 2042 } 2043 pr_info("%s", buf); 2044 j = scnprintf(buf, 200, "%12s", " "); 2045 } 2046 2047 rcu_read_lock(); 2048 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { 2049 rx = tipc_node_crypto_rx_by_list(p); 2050 j = scnprintf(buf, 200, "RX(%7.7s) ", 2051 tipc_node_get_id_str(rx->node)); 2052 for_each_possible_cpu(cpu) { 2053 for (i = 0; i < MAX_STATS; i++) { 2054 stat = per_cpu_ptr(rx->stats, cpu)->stat[i]; 2055 j += scnprintf(buf + j, 200 - j, "|%11d ", 2056 stat); 2057 } 2058 pr_info("%s", buf); 2059 j = scnprintf(buf, 200, "%12s", " "); 2060 } 2061 } 2062 rcu_read_unlock(); 2063 2064 pr_info("\n======================== Done ========================\n"); 2065 } 2066 2067 static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf) 2068 { 2069 struct tipc_key key = c->key; 2070 struct tipc_aead *aead; 2071 int k, i = 0; 2072 char *s; 2073 2074 for (k = KEY_MIN; k <= KEY_MAX; k++) { 2075 if (k == KEY_MASTER) { 2076 if (is_rx(c)) 2077 continue; 2078 if (time_before(jiffies, 2079 c->timer2 + TIPC_TX_GRACE_PERIOD)) 2080 s = "ACT"; 2081 else 2082 s = "PAS"; 2083 } else { 2084 if (k == key.passive) 2085 s = "PAS"; 2086 else if (k == key.active) 2087 s = "ACT"; 2088 else if (k == key.pending) 2089 s = "PEN"; 2090 else 2091 s = "-"; 2092 } 2093 i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s); 2094 2095 rcu_read_lock(); 2096 aead = rcu_dereference(c->aead[k]); 2097 if (aead) 2098 i += scnprintf(buf + i, 200 - i, 2099 "{\"0x...%s\", \"%s\"}/%d:%d", 2100 aead->hint, 2101 (aead->mode == CLUSTER_KEY) ? "c" : "p", 2102 atomic_read(&aead->users), 2103 refcount_read(&aead->refcnt)); 2104 rcu_read_unlock(); 2105 i += scnprintf(buf + i, 200 - i, "\n"); 2106 } 2107 2108 if (is_rx(c)) 2109 i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n", 2110 atomic_read(&c->peer_rx_active)); 2111 2112 return buf; 2113 } 2114 2115 static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, 2116 char *buf) 2117 { 2118 struct tipc_key *key = &old; 2119 int k, i = 0; 2120 char *s; 2121 2122 /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */ 2123 again: 2124 i += scnprintf(buf + i, 32 - i, "["); 2125 for (k = KEY_1; k <= KEY_3; k++) { 2126 if (k == key->passive) 2127 s = "pas"; 2128 else if (k == key->active) 2129 s = "act"; 2130 else if (k == key->pending) 2131 s = "pen"; 2132 else 2133 s = "-"; 2134 i += scnprintf(buf + i, 32 - i, 2135 (k != KEY_3) ? "%s " : "%s", s); 2136 } 2137 if (key != &new) { 2138 i += scnprintf(buf + i, 32 - i, "] -> "); 2139 key = &new; 2140 goto again; 2141 } 2142 i += scnprintf(buf + i, 32 - i, "]"); 2143 return buf; 2144 } 2145 2146 /** 2147 * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point 2148 * @net: the struct net 2149 * @skb: the receiving message buffer 2150 */ 2151 void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb) 2152 { 2153 struct tipc_crypto *rx; 2154 struct tipc_msg *hdr; 2155 2156 if (unlikely(skb_linearize(skb))) 2157 goto exit; 2158 2159 hdr = buf_msg(skb); 2160 rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr)); 2161 if (unlikely(!rx)) 2162 goto exit; 2163 2164 switch (msg_type(hdr)) { 2165 case KEY_DISTR_MSG: 2166 if (tipc_crypto_key_rcv(rx, hdr)) 2167 goto exit; 2168 break; 2169 default: 2170 break; 2171 } 2172 2173 tipc_node_put(rx->node); 2174 2175 exit: 2176 kfree_skb(skb); 2177 } 2178 2179 /** 2180 * tipc_crypto_key_distr - Distribute a TX key 2181 * @tx: the TX crypto 2182 * @key: the key's index 2183 * @dest: the destination tipc node, = NULL if distributing to all nodes 2184 * 2185 * Return: 0 in case of success, otherwise < 0 2186 */ 2187 int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key, 2188 struct tipc_node *dest) 2189 { 2190 struct tipc_aead *aead; 2191 u32 dnode = tipc_node_get_addr(dest); 2192 int rc = -ENOKEY; 2193 2194 if (!sysctl_tipc_key_exchange_enabled) 2195 return 0; 2196 2197 if (key) { 2198 rcu_read_lock(); 2199 aead = tipc_aead_get(tx->aead[key]); 2200 if (likely(aead)) { 2201 rc = tipc_crypto_key_xmit(tx->net, aead->key, 2202 aead->gen, aead->mode, 2203 dnode); 2204 tipc_aead_put(aead); 2205 } 2206 rcu_read_unlock(); 2207 } 2208 2209 return rc; 2210 } 2211 2212 /** 2213 * tipc_crypto_key_xmit - Send a session key 2214 * @net: the struct net 2215 * @skey: the session key to be sent 2216 * @gen: the key's generation 2217 * @mode: the key's mode 2218 * @dnode: the destination node address, = 0 if broadcasting to all nodes 2219 * 2220 * The session key 'skey' is packed in a TIPC v2 'MSG_CRYPTO/KEY_DISTR_MSG' 2221 * as its data section, then xmit-ed through the uc/bc link. 2222 * 2223 * Return: 0 in case of success, otherwise < 0 2224 */ 2225 static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, 2226 u16 gen, u8 mode, u32 dnode) 2227 { 2228 struct sk_buff_head pkts; 2229 struct tipc_msg *hdr; 2230 struct sk_buff *skb; 2231 u16 size, cong_link_cnt; 2232 u8 *data; 2233 int rc; 2234 2235 size = tipc_aead_key_size(skey); 2236 skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC); 2237 if (!skb) 2238 return -ENOMEM; 2239 2240 hdr = buf_msg(skb); 2241 tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG, 2242 INT_H_SIZE, dnode); 2243 msg_set_size(hdr, INT_H_SIZE + size); 2244 msg_set_key_gen(hdr, gen); 2245 msg_set_key_mode(hdr, mode); 2246 2247 data = msg_data(hdr); 2248 *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen); 2249 memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME); 2250 memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key, 2251 skey->keylen); 2252 2253 __skb_queue_head_init(&pkts); 2254 __skb_queue_tail(&pkts, skb); 2255 if (dnode) 2256 rc = tipc_node_xmit(net, &pkts, dnode, 0); 2257 else 2258 rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt); 2259 2260 return rc; 2261 } 2262 2263 /** 2264 * tipc_crypto_key_rcv - Receive a session key 2265 * @rx: the RX crypto 2266 * @hdr: the TIPC v2 message incl. the receiving session key in its data 2267 * 2268 * This function retrieves the session key in the message from peer, then 2269 * schedules a RX work to attach the key to the corresponding RX crypto. 2270 * 2271 * Return: "true" if the key has been scheduled for attaching, otherwise 2272 * "false". 2273 */ 2274 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) 2275 { 2276 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; 2277 struct tipc_aead_key *skey = NULL; 2278 u16 key_gen = msg_key_gen(hdr); 2279 u32 size = msg_data_sz(hdr); 2280 u8 *data = msg_data(hdr); 2281 unsigned int keylen; 2282 2283 /* Verify whether the size can exist in the packet */ 2284 if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) { 2285 pr_debug("%s: message data size is too small\n", rx->name); 2286 goto exit; 2287 } 2288 2289 keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME))); 2290 2291 /* Verify the supplied size values */ 2292 if (unlikely(size != keylen + sizeof(struct tipc_aead_key) || 2293 keylen > TIPC_AEAD_KEY_SIZE_MAX)) { 2294 pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name); 2295 goto exit; 2296 } 2297 2298 spin_lock(&rx->lock); 2299 if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) { 2300 pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name, 2301 rx->skey, key_gen, rx->key_gen); 2302 goto exit_unlock; 2303 } 2304 2305 /* Allocate memory for the key */ 2306 skey = kmalloc(size, GFP_ATOMIC); 2307 if (unlikely(!skey)) { 2308 pr_err("%s: unable to allocate memory for skey\n", rx->name); 2309 goto exit_unlock; 2310 } 2311 2312 /* Copy key from msg data */ 2313 skey->keylen = keylen; 2314 memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME); 2315 memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32), 2316 skey->keylen); 2317 2318 rx->key_gen = key_gen; 2319 rx->skey_mode = msg_key_mode(hdr); 2320 rx->skey = skey; 2321 rx->nokey = 0; 2322 mb(); /* for nokey flag */ 2323 2324 exit_unlock: 2325 spin_unlock(&rx->lock); 2326 2327 exit: 2328 /* Schedule the key attaching on this crypto */ 2329 if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0))) 2330 return true; 2331 2332 return false; 2333 } 2334 2335 /** 2336 * tipc_crypto_work_rx - Scheduled RX works handler 2337 * @work: the struct RX work 2338 * 2339 * The function processes the previous scheduled works i.e. distributing TX key 2340 * or attaching a received session key on RX crypto. 2341 */ 2342 static void tipc_crypto_work_rx(struct work_struct *work) 2343 { 2344 struct delayed_work *dwork = to_delayed_work(work); 2345 struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work); 2346 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; 2347 unsigned long delay = msecs_to_jiffies(5000); 2348 bool resched = false; 2349 u8 key; 2350 int rc; 2351 2352 /* Case 1: Distribute TX key to peer if scheduled */ 2353 if (atomic_cmpxchg(&rx->key_distr, 2354 KEY_DISTR_SCHED, 2355 KEY_DISTR_COMPL) == KEY_DISTR_SCHED) { 2356 /* Always pick the newest one for distributing */ 2357 key = tx->key.pending ?: tx->key.active; 2358 rc = tipc_crypto_key_distr(tx, key, rx->node); 2359 if (unlikely(rc)) 2360 pr_warn("%s: unable to distr key[%d] to %s, err %d\n", 2361 tx->name, key, tipc_node_get_id_str(rx->node), 2362 rc); 2363 2364 /* Sched for key_distr releasing */ 2365 resched = true; 2366 } else { 2367 atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0); 2368 } 2369 2370 /* Case 2: Attach a pending received session key from peer if any */ 2371 if (rx->skey) { 2372 rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false); 2373 if (unlikely(rc < 0)) 2374 pr_warn("%s: unable to attach received skey, err %d\n", 2375 rx->name, rc); 2376 switch (rc) { 2377 case -EBUSY: 2378 case -ENOMEM: 2379 /* Resched the key attaching */ 2380 resched = true; 2381 break; 2382 default: 2383 synchronize_rcu(); 2384 kfree(rx->skey); 2385 rx->skey = NULL; 2386 break; 2387 } 2388 } 2389 2390 if (resched && queue_delayed_work(tx->wq, &rx->work, delay)) 2391 return; 2392 2393 tipc_node_put(rx->node); 2394 } 2395 2396 /** 2397 * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval 2398 * @tx: TX crypto 2399 * @changed: if the rekeying needs to be rescheduled with new interval 2400 * @new_intv: new rekeying interval (when "changed" = true) 2401 */ 2402 void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed, 2403 u32 new_intv) 2404 { 2405 unsigned long delay; 2406 bool now = false; 2407 2408 if (changed) { 2409 if (new_intv == TIPC_REKEYING_NOW) 2410 now = true; 2411 else 2412 tx->rekeying_intv = new_intv; 2413 cancel_delayed_work_sync(&tx->work); 2414 } 2415 2416 if (tx->rekeying_intv || now) { 2417 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000; 2418 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay)); 2419 } 2420 } 2421 2422 /** 2423 * tipc_crypto_work_tx - Scheduled TX works handler 2424 * @work: the struct TX work 2425 * 2426 * The function processes the previous scheduled work, i.e. key rekeying, by 2427 * generating a new session key based on current one, then attaching it to the 2428 * TX crypto and finally distributing it to peers. It also re-schedules the 2429 * rekeying if needed. 2430 */ 2431 static void tipc_crypto_work_tx(struct work_struct *work) 2432 { 2433 struct delayed_work *dwork = to_delayed_work(work); 2434 struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work); 2435 struct tipc_aead_key *skey = NULL; 2436 struct tipc_key key = tx->key; 2437 struct tipc_aead *aead; 2438 int rc = -ENOMEM; 2439 2440 if (unlikely(key.pending)) 2441 goto resched; 2442 2443 /* Take current key as a template */ 2444 rcu_read_lock(); 2445 aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]); 2446 if (unlikely(!aead)) { 2447 rcu_read_unlock(); 2448 /* At least one key should exist for securing */ 2449 return; 2450 } 2451 2452 /* Lets duplicate it first */ 2453 skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC); 2454 rcu_read_unlock(); 2455 2456 /* Now, generate new key, initiate & distribute it */ 2457 if (likely(skey)) { 2458 rc = tipc_aead_key_generate(skey) ?: 2459 tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false); 2460 if (likely(rc > 0)) 2461 rc = tipc_crypto_key_distr(tx, rc, NULL); 2462 kfree_sensitive(skey); 2463 } 2464 2465 if (unlikely(rc)) 2466 pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc); 2467 2468 resched: 2469 /* Re-schedule rekeying if any */ 2470 tipc_crypto_rekeying_sched(tx, false, 0); 2471 } 2472