1 // SPDX-License-Identifier: GPL-2.0 2 /** 3 * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption 4 * 5 * Copyright (c) 2019, Ericsson AB 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <crypto/aead.h> 38 #include <crypto/aes.h> 39 #include "crypto.h" 40 41 #define TIPC_TX_PROBE_LIM msecs_to_jiffies(1000) /* > 1s */ 42 #define TIPC_TX_LASTING_LIM msecs_to_jiffies(120000) /* 2 mins */ 43 #define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */ 44 #define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(180000) /* 3 mins */ 45 #define TIPC_MAX_TFMS_DEF 10 46 #define TIPC_MAX_TFMS_LIM 1000 47 48 /** 49 * TIPC Key ids 50 */ 51 enum { 52 KEY_UNUSED = 0, 53 KEY_MIN, 54 KEY_1 = KEY_MIN, 55 KEY_2, 56 KEY_3, 57 KEY_MAX = KEY_3, 58 }; 59 60 /** 61 * TIPC Crypto statistics 62 */ 63 enum { 64 STAT_OK, 65 STAT_NOK, 66 STAT_ASYNC, 67 STAT_ASYNC_OK, 68 STAT_ASYNC_NOK, 69 STAT_BADKEYS, /* tx only */ 70 STAT_BADMSGS = STAT_BADKEYS, /* rx only */ 71 STAT_NOKEYS, 72 STAT_SWITCHES, 73 74 MAX_STATS, 75 }; 76 77 /* TIPC crypto statistics' header */ 78 static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok", 79 "async_nok", "badmsgs", "nokeys", 80 "switches"}; 81 82 /* Max TFMs number per key */ 83 int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF; 84 85 /** 86 * struct tipc_key - TIPC keys' status indicator 87 * 88 * 7 6 5 4 3 2 1 0 89 * +-----+-----+-----+-----+-----+-----+-----+-----+ 90 * key: | (reserved)|passive idx| active idx|pending idx| 91 * +-----+-----+-----+-----+-----+-----+-----+-----+ 92 */ 93 struct tipc_key { 94 #define KEY_BITS (2) 95 #define KEY_MASK ((1 << KEY_BITS) - 1) 96 union { 97 struct { 98 #if defined(__LITTLE_ENDIAN_BITFIELD) 99 u8 pending:2, 100 active:2, 101 passive:2, /* rx only */ 102 reserved:2; 103 #elif defined(__BIG_ENDIAN_BITFIELD) 104 u8 reserved:2, 105 passive:2, /* rx only */ 106 active:2, 107 pending:2; 108 #else 109 #error "Please fix <asm/byteorder.h>" 110 #endif 111 } __packed; 112 u8 keys; 113 }; 114 }; 115 116 /** 117 * struct tipc_tfm - TIPC TFM structure to form a list of TFMs 118 */ 119 struct tipc_tfm { 120 struct crypto_aead *tfm; 121 struct list_head list; 122 }; 123 124 /** 125 * struct tipc_aead - TIPC AEAD key structure 126 * @tfm_entry: per-cpu pointer to one entry in TFM list 127 * @crypto: TIPC crypto owns this key 128 * @cloned: reference to the source key in case cloning 129 * @users: the number of the key users (TX/RX) 130 * @salt: the key's SALT value 131 * @authsize: authentication tag size (max = 16) 132 * @mode: crypto mode is applied to the key 133 * @hint[]: a hint for user key 134 * @rcu: struct rcu_head 135 * @seqno: the key seqno (cluster scope) 136 * @refcnt: the key reference counter 137 */ 138 struct tipc_aead { 139 #define TIPC_AEAD_HINT_LEN (5) 140 struct tipc_tfm * __percpu *tfm_entry; 141 struct tipc_crypto *crypto; 142 struct tipc_aead *cloned; 143 atomic_t users; 144 u32 salt; 145 u8 authsize; 146 u8 mode; 147 char hint[TIPC_AEAD_HINT_LEN + 1]; 148 struct rcu_head rcu; 149 150 atomic64_t seqno ____cacheline_aligned; 151 refcount_t refcnt ____cacheline_aligned; 152 153 } ____cacheline_aligned; 154 155 /** 156 * struct tipc_crypto_stats - TIPC Crypto statistics 157 */ 158 struct tipc_crypto_stats { 159 unsigned int stat[MAX_STATS]; 160 }; 161 162 /** 163 * struct tipc_crypto - TIPC TX/RX crypto structure 164 * @net: struct net 165 * @node: TIPC node (RX) 166 * @aead: array of pointers to AEAD keys for encryption/decryption 167 * @peer_rx_active: replicated peer RX active key index 168 * @key: the key states 169 * @working: the crypto is working or not 170 * @stats: the crypto statistics 171 * @sndnxt: the per-peer sndnxt (TX) 172 * @timer1: general timer 1 (jiffies) 173 * @timer2: general timer 1 (jiffies) 174 * @lock: tipc_key lock 175 */ 176 struct tipc_crypto { 177 struct net *net; 178 struct tipc_node *node; 179 struct tipc_aead __rcu *aead[KEY_MAX + 1]; /* key[0] is UNUSED */ 180 atomic_t peer_rx_active; 181 struct tipc_key key; 182 u8 working:1; 183 struct tipc_crypto_stats __percpu *stats; 184 185 atomic64_t sndnxt ____cacheline_aligned; 186 unsigned long timer1; 187 unsigned long timer2; 188 spinlock_t lock; /* crypto lock */ 189 190 } ____cacheline_aligned; 191 192 /* struct tipc_crypto_tx_ctx - TX context for callbacks */ 193 struct tipc_crypto_tx_ctx { 194 struct tipc_aead *aead; 195 struct tipc_bearer *bearer; 196 struct tipc_media_addr dst; 197 }; 198 199 /* struct tipc_crypto_rx_ctx - RX context for callbacks */ 200 struct tipc_crypto_rx_ctx { 201 struct tipc_aead *aead; 202 struct tipc_bearer *bearer; 203 }; 204 205 static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead); 206 static inline void tipc_aead_put(struct tipc_aead *aead); 207 static void tipc_aead_free(struct rcu_head *rp); 208 static int tipc_aead_users(struct tipc_aead __rcu *aead); 209 static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim); 210 static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim); 211 static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val); 212 static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead); 213 static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, 214 u8 mode); 215 static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src); 216 static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, 217 unsigned int crypto_ctx_size, 218 u8 **iv, struct aead_request **req, 219 struct scatterlist **sg, int nsg); 220 static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, 221 struct tipc_bearer *b, 222 struct tipc_media_addr *dst, 223 struct tipc_node *__dnode); 224 static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err); 225 static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, 226 struct sk_buff *skb, struct tipc_bearer *b); 227 static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err); 228 static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr); 229 static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, 230 u8 tx_key, struct sk_buff *skb, 231 struct tipc_crypto *__rx); 232 static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, 233 u8 new_passive, 234 u8 new_active, 235 u8 new_pending); 236 static int tipc_crypto_key_attach(struct tipc_crypto *c, 237 struct tipc_aead *aead, u8 pos); 238 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending); 239 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, 240 struct tipc_crypto *rx, 241 struct sk_buff *skb); 242 static void tipc_crypto_key_synch(struct tipc_crypto *rx, u8 new_rx_active, 243 struct tipc_msg *hdr); 244 static int tipc_crypto_key_revoke(struct net *net, u8 tx_key); 245 static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, 246 struct tipc_bearer *b, 247 struct sk_buff **skb, int err); 248 static void tipc_crypto_do_cmd(struct net *net, int cmd); 249 static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf); 250 #ifdef TIPC_CRYPTO_DEBUG 251 static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, 252 char *buf); 253 #endif 254 255 #define key_next(cur) ((cur) % KEY_MAX + 1) 256 257 #define tipc_aead_rcu_ptr(rcu_ptr, lock) \ 258 rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock)) 259 260 #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \ 261 do { \ 262 typeof(rcu_ptr) __tmp = rcu_dereference_protected((rcu_ptr), \ 263 lockdep_is_held(lock)); \ 264 rcu_assign_pointer((rcu_ptr), (ptr)); \ 265 tipc_aead_put(__tmp); \ 266 } while (0) 267 268 #define tipc_crypto_key_detach(rcu_ptr, lock) \ 269 tipc_aead_rcu_replace((rcu_ptr), NULL, lock) 270 271 /** 272 * tipc_aead_key_validate - Validate a AEAD user key 273 */ 274 int tipc_aead_key_validate(struct tipc_aead_key *ukey) 275 { 276 int keylen; 277 278 /* Check if algorithm exists */ 279 if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) { 280 pr_info("Not found cipher: \"%s\"!\n", ukey->alg_name); 281 return -ENODEV; 282 } 283 284 /* Currently, we only support the "gcm(aes)" cipher algorithm */ 285 if (strcmp(ukey->alg_name, "gcm(aes)")) 286 return -ENOTSUPP; 287 288 /* Check if key size is correct */ 289 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; 290 if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 && 291 keylen != TIPC_AES_GCM_KEY_SIZE_192 && 292 keylen != TIPC_AES_GCM_KEY_SIZE_256)) 293 return -EINVAL; 294 295 return 0; 296 } 297 298 static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead) 299 { 300 struct tipc_aead *tmp; 301 302 rcu_read_lock(); 303 tmp = rcu_dereference(aead); 304 if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt))) 305 tmp = NULL; 306 rcu_read_unlock(); 307 308 return tmp; 309 } 310 311 static inline void tipc_aead_put(struct tipc_aead *aead) 312 { 313 if (aead && refcount_dec_and_test(&aead->refcnt)) 314 call_rcu(&aead->rcu, tipc_aead_free); 315 } 316 317 /** 318 * tipc_aead_free - Release AEAD key incl. all the TFMs in the list 319 * @rp: rcu head pointer 320 */ 321 static void tipc_aead_free(struct rcu_head *rp) 322 { 323 struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu); 324 struct tipc_tfm *tfm_entry, *head, *tmp; 325 326 if (aead->cloned) { 327 tipc_aead_put(aead->cloned); 328 } else { 329 head = *get_cpu_ptr(aead->tfm_entry); 330 put_cpu_ptr(aead->tfm_entry); 331 list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) { 332 crypto_free_aead(tfm_entry->tfm); 333 list_del(&tfm_entry->list); 334 kfree(tfm_entry); 335 } 336 /* Free the head */ 337 crypto_free_aead(head->tfm); 338 list_del(&head->list); 339 kfree(head); 340 } 341 free_percpu(aead->tfm_entry); 342 kfree(aead); 343 } 344 345 static int tipc_aead_users(struct tipc_aead __rcu *aead) 346 { 347 struct tipc_aead *tmp; 348 int users = 0; 349 350 rcu_read_lock(); 351 tmp = rcu_dereference(aead); 352 if (tmp) 353 users = atomic_read(&tmp->users); 354 rcu_read_unlock(); 355 356 return users; 357 } 358 359 static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim) 360 { 361 struct tipc_aead *tmp; 362 363 rcu_read_lock(); 364 tmp = rcu_dereference(aead); 365 if (tmp) 366 atomic_add_unless(&tmp->users, 1, lim); 367 rcu_read_unlock(); 368 } 369 370 static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim) 371 { 372 struct tipc_aead *tmp; 373 374 rcu_read_lock(); 375 tmp = rcu_dereference(aead); 376 if (tmp) 377 atomic_add_unless(&rcu_dereference(aead)->users, -1, lim); 378 rcu_read_unlock(); 379 } 380 381 static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val) 382 { 383 struct tipc_aead *tmp; 384 int cur; 385 386 rcu_read_lock(); 387 tmp = rcu_dereference(aead); 388 if (tmp) { 389 do { 390 cur = atomic_read(&tmp->users); 391 if (cur == val) 392 break; 393 } while (atomic_cmpxchg(&tmp->users, cur, val) != cur); 394 } 395 rcu_read_unlock(); 396 } 397 398 /** 399 * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it 400 */ 401 static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead) 402 { 403 struct tipc_tfm **tfm_entry; 404 struct crypto_aead *tfm; 405 406 tfm_entry = get_cpu_ptr(aead->tfm_entry); 407 *tfm_entry = list_next_entry(*tfm_entry, list); 408 tfm = (*tfm_entry)->tfm; 409 put_cpu_ptr(tfm_entry); 410 411 return tfm; 412 } 413 414 /** 415 * tipc_aead_init - Initiate TIPC AEAD 416 * @aead: returned new TIPC AEAD key handle pointer 417 * @ukey: pointer to user key data 418 * @mode: the key mode 419 * 420 * Allocate a (list of) new cipher transformation (TFM) with the specific user 421 * key data if valid. The number of the allocated TFMs can be set via the sysfs 422 * "net/tipc/max_tfms" first. 423 * Also, all the other AEAD data are also initialized. 424 * 425 * Return: 0 if the initiation is successful, otherwise: < 0 426 */ 427 static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, 428 u8 mode) 429 { 430 struct tipc_tfm *tfm_entry, *head; 431 struct crypto_aead *tfm; 432 struct tipc_aead *tmp; 433 int keylen, err, cpu; 434 int tfm_cnt = 0; 435 436 if (unlikely(*aead)) 437 return -EEXIST; 438 439 /* Allocate a new AEAD */ 440 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); 441 if (unlikely(!tmp)) 442 return -ENOMEM; 443 444 /* The key consists of two parts: [AES-KEY][SALT] */ 445 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; 446 447 /* Allocate per-cpu TFM entry pointer */ 448 tmp->tfm_entry = alloc_percpu(struct tipc_tfm *); 449 if (!tmp->tfm_entry) { 450 kfree_sensitive(tmp); 451 return -ENOMEM; 452 } 453 454 /* Make a list of TFMs with the user key data */ 455 do { 456 tfm = crypto_alloc_aead(ukey->alg_name, 0, 0); 457 if (IS_ERR(tfm)) { 458 err = PTR_ERR(tfm); 459 break; 460 } 461 462 if (unlikely(!tfm_cnt && 463 crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) { 464 crypto_free_aead(tfm); 465 err = -ENOTSUPP; 466 break; 467 } 468 469 err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE); 470 err |= crypto_aead_setkey(tfm, ukey->key, keylen); 471 if (unlikely(err)) { 472 crypto_free_aead(tfm); 473 break; 474 } 475 476 tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL); 477 if (unlikely(!tfm_entry)) { 478 crypto_free_aead(tfm); 479 err = -ENOMEM; 480 break; 481 } 482 INIT_LIST_HEAD(&tfm_entry->list); 483 tfm_entry->tfm = tfm; 484 485 /* First entry? */ 486 if (!tfm_cnt) { 487 head = tfm_entry; 488 for_each_possible_cpu(cpu) { 489 *per_cpu_ptr(tmp->tfm_entry, cpu) = head; 490 } 491 } else { 492 list_add_tail(&tfm_entry->list, &head->list); 493 } 494 495 } while (++tfm_cnt < sysctl_tipc_max_tfms); 496 497 /* Not any TFM is allocated? */ 498 if (!tfm_cnt) { 499 free_percpu(tmp->tfm_entry); 500 kfree_sensitive(tmp); 501 return err; 502 } 503 504 /* Copy some chars from the user key as a hint */ 505 memcpy(tmp->hint, ukey->key, TIPC_AEAD_HINT_LEN); 506 tmp->hint[TIPC_AEAD_HINT_LEN] = '\0'; 507 508 /* Initialize the other data */ 509 tmp->mode = mode; 510 tmp->cloned = NULL; 511 tmp->authsize = TIPC_AES_GCM_TAG_SIZE; 512 memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE); 513 atomic_set(&tmp->users, 0); 514 atomic64_set(&tmp->seqno, 0); 515 refcount_set(&tmp->refcnt, 1); 516 517 *aead = tmp; 518 return 0; 519 } 520 521 /** 522 * tipc_aead_clone - Clone a TIPC AEAD key 523 * @dst: dest key for the cloning 524 * @src: source key to clone from 525 * 526 * Make a "copy" of the source AEAD key data to the dest, the TFMs list is 527 * common for the keys. 528 * A reference to the source is hold in the "cloned" pointer for the later 529 * freeing purposes. 530 * 531 * Note: this must be done in cluster-key mode only! 532 * Return: 0 in case of success, otherwise < 0 533 */ 534 static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src) 535 { 536 struct tipc_aead *aead; 537 int cpu; 538 539 if (!src) 540 return -ENOKEY; 541 542 if (src->mode != CLUSTER_KEY) 543 return -EINVAL; 544 545 if (unlikely(*dst)) 546 return -EEXIST; 547 548 aead = kzalloc(sizeof(*aead), GFP_ATOMIC); 549 if (unlikely(!aead)) 550 return -ENOMEM; 551 552 aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC); 553 if (unlikely(!aead->tfm_entry)) { 554 kfree_sensitive(aead); 555 return -ENOMEM; 556 } 557 558 for_each_possible_cpu(cpu) { 559 *per_cpu_ptr(aead->tfm_entry, cpu) = 560 *per_cpu_ptr(src->tfm_entry, cpu); 561 } 562 563 memcpy(aead->hint, src->hint, sizeof(src->hint)); 564 aead->mode = src->mode; 565 aead->salt = src->salt; 566 aead->authsize = src->authsize; 567 atomic_set(&aead->users, 0); 568 atomic64_set(&aead->seqno, 0); 569 refcount_set(&aead->refcnt, 1); 570 571 WARN_ON(!refcount_inc_not_zero(&src->refcnt)); 572 aead->cloned = src; 573 574 *dst = aead; 575 return 0; 576 } 577 578 /** 579 * tipc_aead_mem_alloc - Allocate memory for AEAD request operations 580 * @tfm: cipher handle to be registered with the request 581 * @crypto_ctx_size: size of crypto context for callback 582 * @iv: returned pointer to IV data 583 * @req: returned pointer to AEAD request data 584 * @sg: returned pointer to SG lists 585 * @nsg: number of SG lists to be allocated 586 * 587 * Allocate memory to store the crypto context data, AEAD request, IV and SG 588 * lists, the memory layout is as follows: 589 * crypto_ctx || iv || aead_req || sg[] 590 * 591 * Return: the pointer to the memory areas in case of success, otherwise NULL 592 */ 593 static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, 594 unsigned int crypto_ctx_size, 595 u8 **iv, struct aead_request **req, 596 struct scatterlist **sg, int nsg) 597 { 598 unsigned int iv_size, req_size; 599 unsigned int len; 600 u8 *mem; 601 602 iv_size = crypto_aead_ivsize(tfm); 603 req_size = sizeof(**req) + crypto_aead_reqsize(tfm); 604 605 len = crypto_ctx_size; 606 len += iv_size; 607 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); 608 len = ALIGN(len, crypto_tfm_ctx_alignment()); 609 len += req_size; 610 len = ALIGN(len, __alignof__(struct scatterlist)); 611 len += nsg * sizeof(**sg); 612 613 mem = kmalloc(len, GFP_ATOMIC); 614 if (!mem) 615 return NULL; 616 617 *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size, 618 crypto_aead_alignmask(tfm) + 1); 619 *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size, 620 crypto_tfm_ctx_alignment()); 621 *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size, 622 __alignof__(struct scatterlist)); 623 624 return (void *)mem; 625 } 626 627 /** 628 * tipc_aead_encrypt - Encrypt a message 629 * @aead: TIPC AEAD key for the message encryption 630 * @skb: the input/output skb 631 * @b: TIPC bearer where the message will be delivered after the encryption 632 * @dst: the destination media address 633 * @__dnode: TIPC dest node if "known" 634 * 635 * Return: 636 * 0 : if the encryption has completed 637 * -EINPROGRESS/-EBUSY : if a callback will be performed 638 * < 0 : the encryption has failed 639 */ 640 static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, 641 struct tipc_bearer *b, 642 struct tipc_media_addr *dst, 643 struct tipc_node *__dnode) 644 { 645 struct crypto_aead *tfm = tipc_aead_tfm_next(aead); 646 struct tipc_crypto_tx_ctx *tx_ctx; 647 struct aead_request *req; 648 struct sk_buff *trailer; 649 struct scatterlist *sg; 650 struct tipc_ehdr *ehdr; 651 int ehsz, len, tailen, nsg, rc; 652 void *ctx; 653 u32 salt; 654 u8 *iv; 655 656 /* Make sure message len at least 4-byte aligned */ 657 len = ALIGN(skb->len, 4); 658 tailen = len - skb->len + aead->authsize; 659 660 /* Expand skb tail for authentication tag: 661 * As for simplicity, we'd have made sure skb having enough tailroom 662 * for authentication tag @skb allocation. Even when skb is nonlinear 663 * but there is no frag_list, it should be still fine! 664 * Otherwise, we must cow it to be a writable buffer with the tailroom. 665 */ 666 #ifdef TIPC_CRYPTO_DEBUG 667 SKB_LINEAR_ASSERT(skb); 668 if (tailen > skb_tailroom(skb)) { 669 pr_warn("TX: skb tailroom is not enough: %d, requires: %d\n", 670 skb_tailroom(skb), tailen); 671 } 672 #endif 673 674 if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) { 675 nsg = 1; 676 trailer = skb; 677 } else { 678 /* TODO: We could avoid skb_cow_data() if skb has no frag_list 679 * e.g. by skb_fill_page_desc() to add another page to the skb 680 * with the wanted tailen... However, page skbs look not often, 681 * so take it easy now! 682 * Cloned skbs e.g. from link_xmit() seems no choice though :( 683 */ 684 nsg = skb_cow_data(skb, tailen, &trailer); 685 if (unlikely(nsg < 0)) { 686 pr_err("TX: skb_cow_data() returned %d\n", nsg); 687 return nsg; 688 } 689 } 690 691 pskb_put(skb, trailer, tailen); 692 693 /* Allocate memory for the AEAD operation */ 694 ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg); 695 if (unlikely(!ctx)) 696 return -ENOMEM; 697 TIPC_SKB_CB(skb)->crypto_ctx = ctx; 698 699 /* Map skb to the sg lists */ 700 sg_init_table(sg, nsg); 701 rc = skb_to_sgvec(skb, sg, 0, skb->len); 702 if (unlikely(rc < 0)) { 703 pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg); 704 goto exit; 705 } 706 707 /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)] 708 * In case we're in cluster-key mode, SALT is varied by xor-ing with 709 * the source address (or w0 of id), otherwise with the dest address 710 * if dest is known. 711 */ 712 ehdr = (struct tipc_ehdr *)skb->data; 713 salt = aead->salt; 714 if (aead->mode == CLUSTER_KEY) 715 salt ^= ehdr->addr; /* __be32 */ 716 else if (__dnode) 717 salt ^= tipc_node_get_addr(__dnode); 718 memcpy(iv, &salt, 4); 719 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); 720 721 /* Prepare request */ 722 ehsz = tipc_ehdr_size(ehdr); 723 aead_request_set_tfm(req, tfm); 724 aead_request_set_ad(req, ehsz); 725 aead_request_set_crypt(req, sg, sg, len - ehsz, iv); 726 727 /* Set callback function & data */ 728 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 729 tipc_aead_encrypt_done, skb); 730 tx_ctx = (struct tipc_crypto_tx_ctx *)ctx; 731 tx_ctx->aead = aead; 732 tx_ctx->bearer = b; 733 memcpy(&tx_ctx->dst, dst, sizeof(*dst)); 734 735 /* Hold bearer */ 736 if (unlikely(!tipc_bearer_hold(b))) { 737 rc = -ENODEV; 738 goto exit; 739 } 740 741 /* Now, do encrypt */ 742 rc = crypto_aead_encrypt(req); 743 if (rc == -EINPROGRESS || rc == -EBUSY) 744 return rc; 745 746 tipc_bearer_put(b); 747 748 exit: 749 kfree(ctx); 750 TIPC_SKB_CB(skb)->crypto_ctx = NULL; 751 return rc; 752 } 753 754 static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err) 755 { 756 struct sk_buff *skb = base->data; 757 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; 758 struct tipc_bearer *b = tx_ctx->bearer; 759 struct tipc_aead *aead = tx_ctx->aead; 760 struct tipc_crypto *tx = aead->crypto; 761 struct net *net = tx->net; 762 763 switch (err) { 764 case 0: 765 this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]); 766 rcu_read_lock(); 767 if (likely(test_bit(0, &b->up))) 768 b->media->send_msg(net, skb, b, &tx_ctx->dst); 769 else 770 kfree_skb(skb); 771 rcu_read_unlock(); 772 break; 773 case -EINPROGRESS: 774 return; 775 default: 776 this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]); 777 kfree_skb(skb); 778 break; 779 } 780 781 kfree(tx_ctx); 782 tipc_bearer_put(b); 783 tipc_aead_put(aead); 784 } 785 786 /** 787 * tipc_aead_decrypt - Decrypt an encrypted message 788 * @net: struct net 789 * @aead: TIPC AEAD for the message decryption 790 * @skb: the input/output skb 791 * @b: TIPC bearer where the message has been received 792 * 793 * Return: 794 * 0 : if the decryption has completed 795 * -EINPROGRESS/-EBUSY : if a callback will be performed 796 * < 0 : the decryption has failed 797 */ 798 static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, 799 struct sk_buff *skb, struct tipc_bearer *b) 800 { 801 struct tipc_crypto_rx_ctx *rx_ctx; 802 struct aead_request *req; 803 struct crypto_aead *tfm; 804 struct sk_buff *unused; 805 struct scatterlist *sg; 806 struct tipc_ehdr *ehdr; 807 int ehsz, nsg, rc; 808 void *ctx; 809 u32 salt; 810 u8 *iv; 811 812 if (unlikely(!aead)) 813 return -ENOKEY; 814 815 /* Cow skb data if needed */ 816 if (likely(!skb_cloned(skb) && 817 (!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) { 818 nsg = 1 + skb_shinfo(skb)->nr_frags; 819 } else { 820 nsg = skb_cow_data(skb, 0, &unused); 821 if (unlikely(nsg < 0)) { 822 pr_err("RX: skb_cow_data() returned %d\n", nsg); 823 return nsg; 824 } 825 } 826 827 /* Allocate memory for the AEAD operation */ 828 tfm = tipc_aead_tfm_next(aead); 829 ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg); 830 if (unlikely(!ctx)) 831 return -ENOMEM; 832 TIPC_SKB_CB(skb)->crypto_ctx = ctx; 833 834 /* Map skb to the sg lists */ 835 sg_init_table(sg, nsg); 836 rc = skb_to_sgvec(skb, sg, 0, skb->len); 837 if (unlikely(rc < 0)) { 838 pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg); 839 goto exit; 840 } 841 842 /* Reconstruct IV: */ 843 ehdr = (struct tipc_ehdr *)skb->data; 844 salt = aead->salt; 845 if (aead->mode == CLUSTER_KEY) 846 salt ^= ehdr->addr; /* __be32 */ 847 else if (ehdr->destined) 848 salt ^= tipc_own_addr(net); 849 memcpy(iv, &salt, 4); 850 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); 851 852 /* Prepare request */ 853 ehsz = tipc_ehdr_size(ehdr); 854 aead_request_set_tfm(req, tfm); 855 aead_request_set_ad(req, ehsz); 856 aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv); 857 858 /* Set callback function & data */ 859 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 860 tipc_aead_decrypt_done, skb); 861 rx_ctx = (struct tipc_crypto_rx_ctx *)ctx; 862 rx_ctx->aead = aead; 863 rx_ctx->bearer = b; 864 865 /* Hold bearer */ 866 if (unlikely(!tipc_bearer_hold(b))) { 867 rc = -ENODEV; 868 goto exit; 869 } 870 871 /* Now, do decrypt */ 872 rc = crypto_aead_decrypt(req); 873 if (rc == -EINPROGRESS || rc == -EBUSY) 874 return rc; 875 876 tipc_bearer_put(b); 877 878 exit: 879 kfree(ctx); 880 TIPC_SKB_CB(skb)->crypto_ctx = NULL; 881 return rc; 882 } 883 884 static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err) 885 { 886 struct sk_buff *skb = base->data; 887 struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; 888 struct tipc_bearer *b = rx_ctx->bearer; 889 struct tipc_aead *aead = rx_ctx->aead; 890 struct tipc_crypto_stats __percpu *stats = aead->crypto->stats; 891 struct net *net = aead->crypto->net; 892 893 switch (err) { 894 case 0: 895 this_cpu_inc(stats->stat[STAT_ASYNC_OK]); 896 break; 897 case -EINPROGRESS: 898 return; 899 default: 900 this_cpu_inc(stats->stat[STAT_ASYNC_NOK]); 901 break; 902 } 903 904 kfree(rx_ctx); 905 tipc_crypto_rcv_complete(net, aead, b, &skb, err); 906 if (likely(skb)) { 907 if (likely(test_bit(0, &b->up))) 908 tipc_rcv(net, skb, b); 909 else 910 kfree_skb(skb); 911 } 912 913 tipc_bearer_put(b); 914 } 915 916 static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr) 917 { 918 return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; 919 } 920 921 /** 922 * tipc_ehdr_validate - Validate an encryption message 923 * @skb: the message buffer 924 * 925 * Returns "true" if this is a valid encryption message, otherwise "false" 926 */ 927 bool tipc_ehdr_validate(struct sk_buff *skb) 928 { 929 struct tipc_ehdr *ehdr; 930 int ehsz; 931 932 if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE))) 933 return false; 934 935 ehdr = (struct tipc_ehdr *)skb->data; 936 if (unlikely(ehdr->version != TIPC_EVERSION)) 937 return false; 938 ehsz = tipc_ehdr_size(ehdr); 939 if (unlikely(!pskb_may_pull(skb, ehsz))) 940 return false; 941 if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE)) 942 return false; 943 if (unlikely(!ehdr->tx_key)) 944 return false; 945 946 return true; 947 } 948 949 /** 950 * tipc_ehdr_build - Build TIPC encryption message header 951 * @net: struct net 952 * @aead: TX AEAD key to be used for the message encryption 953 * @tx_key: key id used for the message encryption 954 * @skb: input/output message skb 955 * @__rx: RX crypto handle if dest is "known" 956 * 957 * Return: the header size if the building is successful, otherwise < 0 958 */ 959 static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, 960 u8 tx_key, struct sk_buff *skb, 961 struct tipc_crypto *__rx) 962 { 963 struct tipc_msg *hdr = buf_msg(skb); 964 struct tipc_ehdr *ehdr; 965 u32 user = msg_user(hdr); 966 u64 seqno; 967 int ehsz; 968 969 /* Make room for encryption header */ 970 ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; 971 WARN_ON(skb_headroom(skb) < ehsz); 972 ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz); 973 974 /* Obtain a seqno first: 975 * Use the key seqno (= cluster wise) if dest is unknown or we're in 976 * cluster key mode, otherwise it's better for a per-peer seqno! 977 */ 978 if (!__rx || aead->mode == CLUSTER_KEY) 979 seqno = atomic64_inc_return(&aead->seqno); 980 else 981 seqno = atomic64_inc_return(&__rx->sndnxt); 982 983 /* Revoke the key if seqno is wrapped around */ 984 if (unlikely(!seqno)) 985 return tipc_crypto_key_revoke(net, tx_key); 986 987 /* Word 1-2 */ 988 ehdr->seqno = cpu_to_be64(seqno); 989 990 /* Words 0, 3- */ 991 ehdr->version = TIPC_EVERSION; 992 ehdr->user = 0; 993 ehdr->keepalive = 0; 994 ehdr->tx_key = tx_key; 995 ehdr->destined = (__rx) ? 1 : 0; 996 ehdr->rx_key_active = (__rx) ? __rx->key.active : 0; 997 ehdr->reserved_1 = 0; 998 ehdr->reserved_2 = 0; 999 1000 switch (user) { 1001 case LINK_CONFIG: 1002 ehdr->user = LINK_CONFIG; 1003 memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN); 1004 break; 1005 default: 1006 if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) { 1007 ehdr->user = LINK_PROTOCOL; 1008 ehdr->keepalive = msg_is_keepalive(hdr); 1009 } 1010 ehdr->addr = hdr->hdr[3]; 1011 break; 1012 } 1013 1014 return ehsz; 1015 } 1016 1017 static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, 1018 u8 new_passive, 1019 u8 new_active, 1020 u8 new_pending) 1021 { 1022 #ifdef TIPC_CRYPTO_DEBUG 1023 struct tipc_key old = c->key; 1024 char buf[32]; 1025 #endif 1026 1027 c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) | 1028 ((new_active & KEY_MASK) << (KEY_BITS)) | 1029 ((new_pending & KEY_MASK)); 1030 1031 #ifdef TIPC_CRYPTO_DEBUG 1032 pr_info("%s(%s): key changing %s ::%pS\n", 1033 (c->node) ? "RX" : "TX", 1034 (c->node) ? tipc_node_get_id_str(c->node) : 1035 tipc_own_id_string(c->net), 1036 tipc_key_change_dump(old, c->key, buf), 1037 __builtin_return_address(0)); 1038 #endif 1039 } 1040 1041 /** 1042 * tipc_crypto_key_init - Initiate a new user / AEAD key 1043 * @c: TIPC crypto to which new key is attached 1044 * @ukey: the user key 1045 * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY) 1046 * 1047 * A new TIPC AEAD key will be allocated and initiated with the specified user 1048 * key, then attached to the TIPC crypto. 1049 * 1050 * Return: new key id in case of success, otherwise: < 0 1051 */ 1052 int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey, 1053 u8 mode) 1054 { 1055 struct tipc_aead *aead = NULL; 1056 int rc = 0; 1057 1058 /* Initiate with the new user key */ 1059 rc = tipc_aead_init(&aead, ukey, mode); 1060 1061 /* Attach it to the crypto */ 1062 if (likely(!rc)) { 1063 rc = tipc_crypto_key_attach(c, aead, 0); 1064 if (rc < 0) 1065 tipc_aead_free(&aead->rcu); 1066 } 1067 1068 pr_info("%s(%s): key initiating, rc %d!\n", 1069 (c->node) ? "RX" : "TX", 1070 (c->node) ? tipc_node_get_id_str(c->node) : 1071 tipc_own_id_string(c->net), 1072 rc); 1073 1074 return rc; 1075 } 1076 1077 /** 1078 * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto 1079 * @c: TIPC crypto to which the new AEAD key is attached 1080 * @aead: the new AEAD key pointer 1081 * @pos: desired slot in the crypto key array, = 0 if any! 1082 * 1083 * Return: new key id in case of success, otherwise: -EBUSY 1084 */ 1085 static int tipc_crypto_key_attach(struct tipc_crypto *c, 1086 struct tipc_aead *aead, u8 pos) 1087 { 1088 u8 new_pending, new_passive, new_key; 1089 struct tipc_key key; 1090 int rc = -EBUSY; 1091 1092 spin_lock_bh(&c->lock); 1093 key = c->key; 1094 if (key.active && key.passive) 1095 goto exit; 1096 if (key.passive && !tipc_aead_users(c->aead[key.passive])) 1097 goto exit; 1098 if (key.pending) { 1099 if (pos) 1100 goto exit; 1101 if (tipc_aead_users(c->aead[key.pending]) > 0) 1102 goto exit; 1103 /* Replace it */ 1104 new_pending = key.pending; 1105 new_passive = key.passive; 1106 new_key = new_pending; 1107 } else { 1108 if (pos) { 1109 if (key.active && pos != key_next(key.active)) { 1110 new_pending = key.pending; 1111 new_passive = pos; 1112 new_key = new_passive; 1113 goto attach; 1114 } else if (!key.active && !key.passive) { 1115 new_pending = pos; 1116 new_passive = key.passive; 1117 new_key = new_pending; 1118 goto attach; 1119 } 1120 } 1121 new_pending = key_next(key.active ?: key.passive); 1122 new_passive = key.passive; 1123 new_key = new_pending; 1124 } 1125 1126 attach: 1127 aead->crypto = c; 1128 tipc_crypto_key_set_state(c, new_passive, key.active, new_pending); 1129 tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock); 1130 1131 c->working = 1; 1132 c->timer1 = jiffies; 1133 c->timer2 = jiffies; 1134 rc = new_key; 1135 1136 exit: 1137 spin_unlock_bh(&c->lock); 1138 return rc; 1139 } 1140 1141 void tipc_crypto_key_flush(struct tipc_crypto *c) 1142 { 1143 int k; 1144 1145 spin_lock_bh(&c->lock); 1146 c->working = 0; 1147 tipc_crypto_key_set_state(c, 0, 0, 0); 1148 for (k = KEY_MIN; k <= KEY_MAX; k++) 1149 tipc_crypto_key_detach(c->aead[k], &c->lock); 1150 atomic_set(&c->peer_rx_active, 0); 1151 atomic64_set(&c->sndnxt, 0); 1152 spin_unlock_bh(&c->lock); 1153 } 1154 1155 /** 1156 * tipc_crypto_key_try_align - Align RX keys if possible 1157 * @rx: RX crypto handle 1158 * @new_pending: new pending slot if aligned (= TX key from peer) 1159 * 1160 * Peer has used an unknown key slot, this only happens when peer has left and 1161 * rejoned, or we are newcomer. 1162 * That means, there must be no active key but a pending key at unaligned slot. 1163 * If so, we try to move the pending key to the new slot. 1164 * Note: A potential passive key can exist, it will be shifted correspondingly! 1165 * 1166 * Return: "true" if key is successfully aligned, otherwise "false" 1167 */ 1168 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending) 1169 { 1170 struct tipc_aead *tmp1, *tmp2 = NULL; 1171 struct tipc_key key; 1172 bool aligned = false; 1173 u8 new_passive = 0; 1174 int x; 1175 1176 spin_lock(&rx->lock); 1177 key = rx->key; 1178 if (key.pending == new_pending) { 1179 aligned = true; 1180 goto exit; 1181 } 1182 if (key.active) 1183 goto exit; 1184 if (!key.pending) 1185 goto exit; 1186 if (tipc_aead_users(rx->aead[key.pending]) > 0) 1187 goto exit; 1188 1189 /* Try to "isolate" this pending key first */ 1190 tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock); 1191 if (!refcount_dec_if_one(&tmp1->refcnt)) 1192 goto exit; 1193 rcu_assign_pointer(rx->aead[key.pending], NULL); 1194 1195 /* Move passive key if any */ 1196 if (key.passive) { 1197 tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock)); 1198 x = (key.passive - key.pending + new_pending) % KEY_MAX; 1199 new_passive = (x <= 0) ? x + KEY_MAX : x; 1200 } 1201 1202 /* Re-allocate the key(s) */ 1203 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending); 1204 rcu_assign_pointer(rx->aead[new_pending], tmp1); 1205 if (new_passive) 1206 rcu_assign_pointer(rx->aead[new_passive], tmp2); 1207 refcount_set(&tmp1->refcnt, 1); 1208 aligned = true; 1209 pr_info("RX(%s): key is aligned!\n", tipc_node_get_id_str(rx->node)); 1210 1211 exit: 1212 spin_unlock(&rx->lock); 1213 return aligned; 1214 } 1215 1216 /** 1217 * tipc_crypto_key_pick_tx - Pick one TX key for message decryption 1218 * @tx: TX crypto handle 1219 * @rx: RX crypto handle (can be NULL) 1220 * @skb: the message skb which will be decrypted later 1221 * 1222 * This function looks up the existing TX keys and pick one which is suitable 1223 * for the message decryption, that must be a cluster key and not used before 1224 * on the same message (i.e. recursive). 1225 * 1226 * Return: the TX AEAD key handle in case of success, otherwise NULL 1227 */ 1228 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, 1229 struct tipc_crypto *rx, 1230 struct sk_buff *skb) 1231 { 1232 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb); 1233 struct tipc_aead *aead = NULL; 1234 struct tipc_key key = tx->key; 1235 u8 k, i = 0; 1236 1237 /* Initialize data if not yet */ 1238 if (!skb_cb->tx_clone_deferred) { 1239 skb_cb->tx_clone_deferred = 1; 1240 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); 1241 } 1242 1243 skb_cb->tx_clone_ctx.rx = rx; 1244 if (++skb_cb->tx_clone_ctx.recurs > 2) 1245 return NULL; 1246 1247 /* Pick one TX key */ 1248 spin_lock(&tx->lock); 1249 do { 1250 k = (i == 0) ? key.pending : 1251 ((i == 1) ? key.active : key.passive); 1252 if (!k) 1253 continue; 1254 aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock); 1255 if (!aead) 1256 continue; 1257 if (aead->mode != CLUSTER_KEY || 1258 aead == skb_cb->tx_clone_ctx.last) { 1259 aead = NULL; 1260 continue; 1261 } 1262 /* Ok, found one cluster key */ 1263 skb_cb->tx_clone_ctx.last = aead; 1264 WARN_ON(skb->next); 1265 skb->next = skb_clone(skb, GFP_ATOMIC); 1266 if (unlikely(!skb->next)) 1267 pr_warn("Failed to clone skb for next round if any\n"); 1268 WARN_ON(!refcount_inc_not_zero(&aead->refcnt)); 1269 break; 1270 } while (++i < 3); 1271 spin_unlock(&tx->lock); 1272 1273 return aead; 1274 } 1275 1276 /** 1277 * tipc_crypto_key_synch: Synch own key data according to peer key status 1278 * @rx: RX crypto handle 1279 * @new_rx_active: latest RX active key from peer 1280 * @hdr: TIPCv2 message 1281 * 1282 * This function updates the peer node related data as the peer RX active key 1283 * has changed, so the number of TX keys' users on this node are increased and 1284 * decreased correspondingly. 1285 * 1286 * The "per-peer" sndnxt is also reset when the peer key has switched. 1287 */ 1288 static void tipc_crypto_key_synch(struct tipc_crypto *rx, u8 new_rx_active, 1289 struct tipc_msg *hdr) 1290 { 1291 struct net *net = rx->net; 1292 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1293 u8 cur_rx_active; 1294 1295 /* TX might be even not ready yet */ 1296 if (unlikely(!tx->key.active && !tx->key.pending)) 1297 return; 1298 1299 cur_rx_active = atomic_read(&rx->peer_rx_active); 1300 if (likely(cur_rx_active == new_rx_active)) 1301 return; 1302 1303 /* Make sure this message destined for this node */ 1304 if (unlikely(msg_short(hdr) || 1305 msg_destnode(hdr) != tipc_own_addr(net))) 1306 return; 1307 1308 /* Peer RX active key has changed, try to update owns' & TX users */ 1309 if (atomic_cmpxchg(&rx->peer_rx_active, 1310 cur_rx_active, 1311 new_rx_active) == cur_rx_active) { 1312 if (new_rx_active) 1313 tipc_aead_users_inc(tx->aead[new_rx_active], INT_MAX); 1314 if (cur_rx_active) 1315 tipc_aead_users_dec(tx->aead[cur_rx_active], 0); 1316 1317 atomic64_set(&rx->sndnxt, 0); 1318 /* Mark the point TX key users changed */ 1319 tx->timer1 = jiffies; 1320 1321 #ifdef TIPC_CRYPTO_DEBUG 1322 pr_info("TX(%s): key users changed %d-- %d++, peer RX(%s)\n", 1323 tipc_own_id_string(net), cur_rx_active, 1324 new_rx_active, tipc_node_get_id_str(rx->node)); 1325 #endif 1326 } 1327 } 1328 1329 static int tipc_crypto_key_revoke(struct net *net, u8 tx_key) 1330 { 1331 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1332 struct tipc_key key; 1333 1334 spin_lock(&tx->lock); 1335 key = tx->key; 1336 WARN_ON(!key.active || tx_key != key.active); 1337 1338 /* Free the active key */ 1339 tipc_crypto_key_set_state(tx, key.passive, 0, key.pending); 1340 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); 1341 spin_unlock(&tx->lock); 1342 1343 pr_warn("TX(%s): key is revoked!\n", tipc_own_id_string(net)); 1344 return -EKEYREVOKED; 1345 } 1346 1347 int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, 1348 struct tipc_node *node) 1349 { 1350 struct tipc_crypto *c; 1351 1352 if (*crypto) 1353 return -EEXIST; 1354 1355 /* Allocate crypto */ 1356 c = kzalloc(sizeof(*c), GFP_ATOMIC); 1357 if (!c) 1358 return -ENOMEM; 1359 1360 /* Allocate statistic structure */ 1361 c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); 1362 if (!c->stats) { 1363 kfree_sensitive(c); 1364 return -ENOMEM; 1365 } 1366 1367 c->working = 0; 1368 c->net = net; 1369 c->node = node; 1370 tipc_crypto_key_set_state(c, 0, 0, 0); 1371 atomic_set(&c->peer_rx_active, 0); 1372 atomic64_set(&c->sndnxt, 0); 1373 c->timer1 = jiffies; 1374 c->timer2 = jiffies; 1375 spin_lock_init(&c->lock); 1376 *crypto = c; 1377 1378 return 0; 1379 } 1380 1381 void tipc_crypto_stop(struct tipc_crypto **crypto) 1382 { 1383 struct tipc_crypto *c, *tx, *rx; 1384 bool is_rx; 1385 u8 k; 1386 1387 if (!*crypto) 1388 return; 1389 1390 rcu_read_lock(); 1391 /* RX stopping? => decrease TX key users if any */ 1392 is_rx = !!((*crypto)->node); 1393 if (is_rx) { 1394 rx = *crypto; 1395 tx = tipc_net(rx->net)->crypto_tx; 1396 k = atomic_read(&rx->peer_rx_active); 1397 if (k) { 1398 tipc_aead_users_dec(tx->aead[k], 0); 1399 /* Mark the point TX key users changed */ 1400 tx->timer1 = jiffies; 1401 } 1402 } 1403 1404 /* Release AEAD keys */ 1405 c = *crypto; 1406 for (k = KEY_MIN; k <= KEY_MAX; k++) 1407 tipc_aead_put(rcu_dereference(c->aead[k])); 1408 rcu_read_unlock(); 1409 1410 pr_warn("%s(%s) has been purged, node left!\n", 1411 (is_rx) ? "RX" : "TX", 1412 (is_rx) ? tipc_node_get_id_str((*crypto)->node) : 1413 tipc_own_id_string((*crypto)->net)); 1414 1415 /* Free this crypto statistics */ 1416 free_percpu(c->stats); 1417 1418 *crypto = NULL; 1419 kfree_sensitive(c); 1420 } 1421 1422 void tipc_crypto_timeout(struct tipc_crypto *rx) 1423 { 1424 struct tipc_net *tn = tipc_net(rx->net); 1425 struct tipc_crypto *tx = tn->crypto_tx; 1426 struct tipc_key key; 1427 u8 new_pending, new_passive; 1428 int cmd; 1429 1430 /* TX key activating: 1431 * The pending key (users > 0) -> active 1432 * The active key if any (users == 0) -> free 1433 */ 1434 spin_lock(&tx->lock); 1435 key = tx->key; 1436 if (key.active && tipc_aead_users(tx->aead[key.active]) > 0) 1437 goto s1; 1438 if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0) 1439 goto s1; 1440 if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_LIM)) 1441 goto s1; 1442 1443 tipc_crypto_key_set_state(tx, key.passive, key.pending, 0); 1444 if (key.active) 1445 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); 1446 this_cpu_inc(tx->stats->stat[STAT_SWITCHES]); 1447 pr_info("TX(%s): key %d is activated!\n", tipc_own_id_string(tx->net), 1448 key.pending); 1449 1450 s1: 1451 spin_unlock(&tx->lock); 1452 1453 /* RX key activating: 1454 * The pending key (users > 0) -> active 1455 * The active key if any -> passive, freed later 1456 */ 1457 spin_lock(&rx->lock); 1458 key = rx->key; 1459 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0) 1460 goto s2; 1461 1462 new_pending = (key.passive && 1463 !tipc_aead_users(rx->aead[key.passive])) ? 1464 key.passive : 0; 1465 new_passive = (key.active) ?: ((new_pending) ? 0 : key.passive); 1466 tipc_crypto_key_set_state(rx, new_passive, key.pending, new_pending); 1467 this_cpu_inc(rx->stats->stat[STAT_SWITCHES]); 1468 pr_info("RX(%s): key %d is activated!\n", 1469 tipc_node_get_id_str(rx->node), key.pending); 1470 goto s5; 1471 1472 s2: 1473 /* RX key "faulty" switching: 1474 * The faulty pending key (users < -30) -> passive 1475 * The passive key (users = 0) -> pending 1476 * Note: This only happens after RX deactivated - s3! 1477 */ 1478 key = rx->key; 1479 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -30) 1480 goto s3; 1481 if (!key.passive || tipc_aead_users(rx->aead[key.passive]) != 0) 1482 goto s3; 1483 1484 new_pending = key.passive; 1485 new_passive = key.pending; 1486 tipc_crypto_key_set_state(rx, new_passive, key.active, new_pending); 1487 goto s5; 1488 1489 s3: 1490 /* RX key deactivating: 1491 * The passive key if any -> pending 1492 * The active key -> passive (users = 0) / pending 1493 * The pending key if any -> passive (users = 0) 1494 */ 1495 key = rx->key; 1496 if (!key.active) 1497 goto s4; 1498 if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM)) 1499 goto s4; 1500 1501 new_pending = (key.passive) ?: key.active; 1502 new_passive = (key.passive) ? key.active : key.pending; 1503 tipc_aead_users_set(rx->aead[new_pending], 0); 1504 if (new_passive) 1505 tipc_aead_users_set(rx->aead[new_passive], 0); 1506 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending); 1507 pr_info("RX(%s): key %d is deactivated!\n", 1508 tipc_node_get_id_str(rx->node), key.active); 1509 goto s5; 1510 1511 s4: 1512 /* RX key passive -> freed: */ 1513 key = rx->key; 1514 if (!key.passive || !tipc_aead_users(rx->aead[key.passive])) 1515 goto s5; 1516 if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM)) 1517 goto s5; 1518 1519 tipc_crypto_key_set_state(rx, 0, key.active, key.pending); 1520 tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock); 1521 pr_info("RX(%s): key %d is freed!\n", tipc_node_get_id_str(rx->node), 1522 key.passive); 1523 1524 s5: 1525 spin_unlock(&rx->lock); 1526 1527 /* Limit max_tfms & do debug commands if needed */ 1528 if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM)) 1529 return; 1530 1531 cmd = sysctl_tipc_max_tfms; 1532 sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF; 1533 tipc_crypto_do_cmd(rx->net, cmd); 1534 } 1535 1536 /** 1537 * tipc_crypto_xmit - Build & encrypt TIPC message for xmit 1538 * @net: struct net 1539 * @skb: input/output message skb pointer 1540 * @b: bearer used for xmit later 1541 * @dst: destination media address 1542 * @__dnode: destination node for reference if any 1543 * 1544 * First, build an encryption message header on the top of the message, then 1545 * encrypt the original TIPC message by using the active or pending TX key. 1546 * If the encryption is successful, the encrypted skb is returned directly or 1547 * via the callback. 1548 * Otherwise, the skb is freed! 1549 * 1550 * Return: 1551 * 0 : the encryption has succeeded (or no encryption) 1552 * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made 1553 * -ENOKEK : the encryption has failed due to no key 1554 * -EKEYREVOKED : the encryption has failed due to key revoked 1555 * -ENOMEM : the encryption has failed due to no memory 1556 * < 0 : the encryption has failed due to other reasons 1557 */ 1558 int tipc_crypto_xmit(struct net *net, struct sk_buff **skb, 1559 struct tipc_bearer *b, struct tipc_media_addr *dst, 1560 struct tipc_node *__dnode) 1561 { 1562 struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode); 1563 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1564 struct tipc_crypto_stats __percpu *stats = tx->stats; 1565 struct tipc_key key = tx->key; 1566 struct tipc_aead *aead = NULL; 1567 struct sk_buff *probe; 1568 int rc = -ENOKEY; 1569 u8 tx_key; 1570 1571 /* No encryption? */ 1572 if (!tx->working) 1573 return 0; 1574 1575 /* Try with the pending key if available and: 1576 * 1) This is the only choice (i.e. no active key) or; 1577 * 2) Peer has switched to this key (unicast only) or; 1578 * 3) It is time to do a pending key probe; 1579 */ 1580 if (unlikely(key.pending)) { 1581 tx_key = key.pending; 1582 if (!key.active) 1583 goto encrypt; 1584 if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key) 1585 goto encrypt; 1586 if (TIPC_SKB_CB(*skb)->probe) 1587 goto encrypt; 1588 if (!__rx && 1589 time_after(jiffies, tx->timer2 + TIPC_TX_PROBE_LIM)) { 1590 tx->timer2 = jiffies; 1591 probe = skb_clone(*skb, GFP_ATOMIC); 1592 if (probe) { 1593 TIPC_SKB_CB(probe)->probe = 1; 1594 tipc_crypto_xmit(net, &probe, b, dst, __dnode); 1595 if (probe) 1596 b->media->send_msg(net, probe, b, dst); 1597 } 1598 } 1599 } 1600 /* Else, use the active key if any */ 1601 if (likely(key.active)) { 1602 tx_key = key.active; 1603 goto encrypt; 1604 } 1605 goto exit; 1606 1607 encrypt: 1608 aead = tipc_aead_get(tx->aead[tx_key]); 1609 if (unlikely(!aead)) 1610 goto exit; 1611 rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx); 1612 if (likely(rc > 0)) 1613 rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode); 1614 1615 exit: 1616 switch (rc) { 1617 case 0: 1618 this_cpu_inc(stats->stat[STAT_OK]); 1619 break; 1620 case -EINPROGRESS: 1621 case -EBUSY: 1622 this_cpu_inc(stats->stat[STAT_ASYNC]); 1623 *skb = NULL; 1624 return rc; 1625 default: 1626 this_cpu_inc(stats->stat[STAT_NOK]); 1627 if (rc == -ENOKEY) 1628 this_cpu_inc(stats->stat[STAT_NOKEYS]); 1629 else if (rc == -EKEYREVOKED) 1630 this_cpu_inc(stats->stat[STAT_BADKEYS]); 1631 kfree_skb(*skb); 1632 *skb = NULL; 1633 break; 1634 } 1635 1636 tipc_aead_put(aead); 1637 return rc; 1638 } 1639 1640 /** 1641 * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer 1642 * @net: struct net 1643 * @rx: RX crypto handle 1644 * @skb: input/output message skb pointer 1645 * @b: bearer where the message has been received 1646 * 1647 * If the decryption is successful, the decrypted skb is returned directly or 1648 * as the callback, the encryption header and auth tag will be trimed out 1649 * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete(). 1650 * Otherwise, the skb will be freed! 1651 * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX 1652 * cluster key(s) can be taken for decryption (- recursive). 1653 * 1654 * Return: 1655 * 0 : the decryption has successfully completed 1656 * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made 1657 * -ENOKEY : the decryption has failed due to no key 1658 * -EBADMSG : the decryption has failed due to bad message 1659 * -ENOMEM : the decryption has failed due to no memory 1660 * < 0 : the decryption has failed due to other reasons 1661 */ 1662 int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx, 1663 struct sk_buff **skb, struct tipc_bearer *b) 1664 { 1665 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1666 struct tipc_crypto_stats __percpu *stats; 1667 struct tipc_aead *aead = NULL; 1668 struct tipc_key key; 1669 int rc = -ENOKEY; 1670 u8 tx_key = 0; 1671 1672 /* New peer? 1673 * Let's try with TX key (i.e. cluster mode) & verify the skb first! 1674 */ 1675 if (unlikely(!rx)) 1676 goto pick_tx; 1677 1678 /* Pick RX key according to TX key, three cases are possible: 1679 * 1) The current active key (likely) or; 1680 * 2) The pending (new or deactivated) key (if any) or; 1681 * 3) The passive or old active key (i.e. users > 0); 1682 */ 1683 tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key; 1684 key = rx->key; 1685 if (likely(tx_key == key.active)) 1686 goto decrypt; 1687 if (tx_key == key.pending) 1688 goto decrypt; 1689 if (tx_key == key.passive) { 1690 rx->timer2 = jiffies; 1691 if (tipc_aead_users(rx->aead[key.passive]) > 0) 1692 goto decrypt; 1693 } 1694 1695 /* Unknown key, let's try to align RX key(s) */ 1696 if (tipc_crypto_key_try_align(rx, tx_key)) 1697 goto decrypt; 1698 1699 pick_tx: 1700 /* No key suitable? Try to pick one from TX... */ 1701 aead = tipc_crypto_key_pick_tx(tx, rx, *skb); 1702 if (aead) 1703 goto decrypt; 1704 goto exit; 1705 1706 decrypt: 1707 rcu_read_lock(); 1708 if (!aead) 1709 aead = tipc_aead_get(rx->aead[tx_key]); 1710 rc = tipc_aead_decrypt(net, aead, *skb, b); 1711 rcu_read_unlock(); 1712 1713 exit: 1714 stats = ((rx) ?: tx)->stats; 1715 switch (rc) { 1716 case 0: 1717 this_cpu_inc(stats->stat[STAT_OK]); 1718 break; 1719 case -EINPROGRESS: 1720 case -EBUSY: 1721 this_cpu_inc(stats->stat[STAT_ASYNC]); 1722 *skb = NULL; 1723 return rc; 1724 default: 1725 this_cpu_inc(stats->stat[STAT_NOK]); 1726 if (rc == -ENOKEY) { 1727 kfree_skb(*skb); 1728 *skb = NULL; 1729 if (rx) 1730 tipc_node_put(rx->node); 1731 this_cpu_inc(stats->stat[STAT_NOKEYS]); 1732 return rc; 1733 } else if (rc == -EBADMSG) { 1734 this_cpu_inc(stats->stat[STAT_BADMSGS]); 1735 } 1736 break; 1737 } 1738 1739 tipc_crypto_rcv_complete(net, aead, b, skb, rc); 1740 return rc; 1741 } 1742 1743 static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, 1744 struct tipc_bearer *b, 1745 struct sk_buff **skb, int err) 1746 { 1747 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb); 1748 struct tipc_crypto *rx = aead->crypto; 1749 struct tipc_aead *tmp = NULL; 1750 struct tipc_ehdr *ehdr; 1751 struct tipc_node *n; 1752 u8 rx_key_active; 1753 bool destined; 1754 1755 /* Is this completed by TX? */ 1756 if (unlikely(!rx->node)) { 1757 rx = skb_cb->tx_clone_ctx.rx; 1758 #ifdef TIPC_CRYPTO_DEBUG 1759 pr_info("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n", 1760 (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead, 1761 (*skb)->next, skb_cb->flags); 1762 pr_info("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n", 1763 skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last, 1764 aead->crypto->aead[1], aead->crypto->aead[2], 1765 aead->crypto->aead[3]); 1766 #endif 1767 if (unlikely(err)) { 1768 if (err == -EBADMSG && (*skb)->next) 1769 tipc_rcv(net, (*skb)->next, b); 1770 goto free_skb; 1771 } 1772 1773 if (likely((*skb)->next)) { 1774 kfree_skb((*skb)->next); 1775 (*skb)->next = NULL; 1776 } 1777 ehdr = (struct tipc_ehdr *)(*skb)->data; 1778 if (!rx) { 1779 WARN_ON(ehdr->user != LINK_CONFIG); 1780 n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0, 1781 true); 1782 rx = tipc_node_crypto_rx(n); 1783 if (unlikely(!rx)) 1784 goto free_skb; 1785 } 1786 1787 /* Skip cloning this time as we had a RX pending key */ 1788 if (rx->key.pending) 1789 goto rcv; 1790 if (tipc_aead_clone(&tmp, aead) < 0) 1791 goto rcv; 1792 if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key) < 0) { 1793 tipc_aead_free(&tmp->rcu); 1794 goto rcv; 1795 } 1796 tipc_aead_put(aead); 1797 aead = tipc_aead_get(tmp); 1798 } 1799 1800 if (unlikely(err)) { 1801 tipc_aead_users_dec(aead, INT_MIN); 1802 goto free_skb; 1803 } 1804 1805 /* Set the RX key's user */ 1806 tipc_aead_users_set(aead, 1); 1807 1808 rcv: 1809 /* Mark this point, RX works */ 1810 rx->timer1 = jiffies; 1811 1812 /* Remove ehdr & auth. tag prior to tipc_rcv() */ 1813 ehdr = (struct tipc_ehdr *)(*skb)->data; 1814 destined = ehdr->destined; 1815 rx_key_active = ehdr->rx_key_active; 1816 skb_pull(*skb, tipc_ehdr_size(ehdr)); 1817 pskb_trim(*skb, (*skb)->len - aead->authsize); 1818 1819 /* Validate TIPCv2 message */ 1820 if (unlikely(!tipc_msg_validate(skb))) { 1821 pr_err_ratelimited("Packet dropped after decryption!\n"); 1822 goto free_skb; 1823 } 1824 1825 /* Update peer RX active key & TX users */ 1826 if (destined) 1827 tipc_crypto_key_synch(rx, rx_key_active, buf_msg(*skb)); 1828 1829 /* Mark skb decrypted */ 1830 skb_cb->decrypted = 1; 1831 1832 /* Clear clone cxt if any */ 1833 if (likely(!skb_cb->tx_clone_deferred)) 1834 goto exit; 1835 skb_cb->tx_clone_deferred = 0; 1836 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); 1837 goto exit; 1838 1839 free_skb: 1840 kfree_skb(*skb); 1841 *skb = NULL; 1842 1843 exit: 1844 tipc_aead_put(aead); 1845 if (rx) 1846 tipc_node_put(rx->node); 1847 } 1848 1849 static void tipc_crypto_do_cmd(struct net *net, int cmd) 1850 { 1851 struct tipc_net *tn = tipc_net(net); 1852 struct tipc_crypto *tx = tn->crypto_tx, *rx; 1853 struct list_head *p; 1854 unsigned int stat; 1855 int i, j, cpu; 1856 char buf[200]; 1857 1858 /* Currently only one command is supported */ 1859 switch (cmd) { 1860 case 0xfff1: 1861 goto print_stats; 1862 default: 1863 return; 1864 } 1865 1866 print_stats: 1867 /* Print a header */ 1868 pr_info("\n=============== TIPC Crypto Statistics ===============\n\n"); 1869 1870 /* Print key status */ 1871 pr_info("Key status:\n"); 1872 pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net), 1873 tipc_crypto_key_dump(tx, buf)); 1874 1875 rcu_read_lock(); 1876 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { 1877 rx = tipc_node_crypto_rx_by_list(p); 1878 pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node), 1879 tipc_crypto_key_dump(rx, buf)); 1880 } 1881 rcu_read_unlock(); 1882 1883 /* Print crypto statistics */ 1884 for (i = 0, j = 0; i < MAX_STATS; i++) 1885 j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]); 1886 pr_info("\nCounter %s", buf); 1887 1888 memset(buf, '-', 115); 1889 buf[115] = '\0'; 1890 pr_info("%s\n", buf); 1891 1892 j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net)); 1893 for_each_possible_cpu(cpu) { 1894 for (i = 0; i < MAX_STATS; i++) { 1895 stat = per_cpu_ptr(tx->stats, cpu)->stat[i]; 1896 j += scnprintf(buf + j, 200 - j, "|%11d ", stat); 1897 } 1898 pr_info("%s", buf); 1899 j = scnprintf(buf, 200, "%12s", " "); 1900 } 1901 1902 rcu_read_lock(); 1903 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { 1904 rx = tipc_node_crypto_rx_by_list(p); 1905 j = scnprintf(buf, 200, "RX(%7.7s) ", 1906 tipc_node_get_id_str(rx->node)); 1907 for_each_possible_cpu(cpu) { 1908 for (i = 0; i < MAX_STATS; i++) { 1909 stat = per_cpu_ptr(rx->stats, cpu)->stat[i]; 1910 j += scnprintf(buf + j, 200 - j, "|%11d ", 1911 stat); 1912 } 1913 pr_info("%s", buf); 1914 j = scnprintf(buf, 200, "%12s", " "); 1915 } 1916 } 1917 rcu_read_unlock(); 1918 1919 pr_info("\n======================== Done ========================\n"); 1920 } 1921 1922 static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf) 1923 { 1924 struct tipc_key key = c->key; 1925 struct tipc_aead *aead; 1926 int k, i = 0; 1927 char *s; 1928 1929 for (k = KEY_MIN; k <= KEY_MAX; k++) { 1930 if (k == key.passive) 1931 s = "PAS"; 1932 else if (k == key.active) 1933 s = "ACT"; 1934 else if (k == key.pending) 1935 s = "PEN"; 1936 else 1937 s = "-"; 1938 i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s); 1939 1940 rcu_read_lock(); 1941 aead = rcu_dereference(c->aead[k]); 1942 if (aead) 1943 i += scnprintf(buf + i, 200 - i, 1944 "{\"%s...\", \"%s\"}/%d:%d", 1945 aead->hint, 1946 (aead->mode == CLUSTER_KEY) ? "c" : "p", 1947 atomic_read(&aead->users), 1948 refcount_read(&aead->refcnt)); 1949 rcu_read_unlock(); 1950 i += scnprintf(buf + i, 200 - i, "\n"); 1951 } 1952 1953 if (c->node) 1954 i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n", 1955 atomic_read(&c->peer_rx_active)); 1956 1957 return buf; 1958 } 1959 1960 #ifdef TIPC_CRYPTO_DEBUG 1961 static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, 1962 char *buf) 1963 { 1964 struct tipc_key *key = &old; 1965 int k, i = 0; 1966 char *s; 1967 1968 /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */ 1969 again: 1970 i += scnprintf(buf + i, 32 - i, "["); 1971 for (k = KEY_MIN; k <= KEY_MAX; k++) { 1972 if (k == key->passive) 1973 s = "pas"; 1974 else if (k == key->active) 1975 s = "act"; 1976 else if (k == key->pending) 1977 s = "pen"; 1978 else 1979 s = "-"; 1980 i += scnprintf(buf + i, 32 - i, 1981 (k != KEY_MAX) ? "%s " : "%s", s); 1982 } 1983 if (key != &new) { 1984 i += scnprintf(buf + i, 32 - i, "] -> "); 1985 key = &new; 1986 goto again; 1987 } 1988 i += scnprintf(buf + i, 32 - i, "]"); 1989 return buf; 1990 } 1991 #endif 1992