1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org> 4 * Copyright (C) 2018 Samsung Electronics Co., Ltd. 5 */ 6 7 #include <linux/mutex.h> 8 #include <linux/freezer.h> 9 #include <linux/module.h> 10 11 #include "server.h" 12 #include "smb_common.h" 13 #include "mgmt/ksmbd_ida.h" 14 #include "connection.h" 15 #include "transport_tcp.h" 16 #include "transport_rdma.h" 17 18 static DEFINE_MUTEX(init_lock); 19 20 static struct ksmbd_conn_ops default_conn_ops; 21 22 LIST_HEAD(conn_list); 23 DECLARE_RWSEM(conn_list_lock); 24 25 /** 26 * ksmbd_conn_free() - free resources of the connection instance 27 * 28 * @conn: connection instance to be cleand up 29 * 30 * During the thread termination, the corresponding conn instance 31 * resources(sock/memory) are released and finally the conn object is freed. 32 */ 33 void ksmbd_conn_free(struct ksmbd_conn *conn) 34 { 35 down_write(&conn_list_lock); 36 list_del(&conn->conns_list); 37 up_write(&conn_list_lock); 38 39 xa_destroy(&conn->sessions); 40 kvfree(conn->request_buf); 41 kfree(conn->preauth_info); 42 if (atomic_dec_and_test(&conn->refcnt)) 43 kfree(conn); 44 } 45 46 /** 47 * ksmbd_conn_alloc() - initialize a new connection instance 48 * 49 * Return: ksmbd_conn struct on success, otherwise NULL 50 */ 51 struct ksmbd_conn *ksmbd_conn_alloc(void) 52 { 53 struct ksmbd_conn *conn; 54 55 conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL); 56 if (!conn) 57 return NULL; 58 59 conn->need_neg = true; 60 ksmbd_conn_set_new(conn); 61 conn->local_nls = load_nls("utf8"); 62 if (!conn->local_nls) 63 conn->local_nls = load_nls_default(); 64 if (IS_ENABLED(CONFIG_UNICODE)) 65 conn->um = utf8_load(UNICODE_AGE(12, 1, 0)); 66 else 67 conn->um = ERR_PTR(-EOPNOTSUPP); 68 if (IS_ERR(conn->um)) 69 conn->um = NULL; 70 atomic_set(&conn->req_running, 0); 71 atomic_set(&conn->r_count, 0); 72 atomic_set(&conn->refcnt, 1); 73 conn->total_credits = 1; 74 conn->outstanding_credits = 0; 75 76 init_waitqueue_head(&conn->req_running_q); 77 init_waitqueue_head(&conn->r_count_q); 78 INIT_LIST_HEAD(&conn->conns_list); 79 INIT_LIST_HEAD(&conn->requests); 80 INIT_LIST_HEAD(&conn->async_requests); 81 spin_lock_init(&conn->request_lock); 82 spin_lock_init(&conn->credits_lock); 83 ida_init(&conn->async_ida); 84 xa_init(&conn->sessions); 85 86 spin_lock_init(&conn->llist_lock); 87 INIT_LIST_HEAD(&conn->lock_list); 88 89 init_rwsem(&conn->session_lock); 90 91 down_write(&conn_list_lock); 92 list_add(&conn->conns_list, &conn_list); 93 up_write(&conn_list_lock); 94 return conn; 95 } 96 97 bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c) 98 { 99 struct ksmbd_conn *t; 100 bool ret = false; 101 102 down_read(&conn_list_lock); 103 list_for_each_entry(t, &conn_list, conns_list) { 104 if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE)) 105 continue; 106 107 ret = true; 108 break; 109 } 110 up_read(&conn_list_lock); 111 return ret; 112 } 113 114 void ksmbd_conn_enqueue_request(struct ksmbd_work *work) 115 { 116 struct ksmbd_conn *conn = work->conn; 117 struct list_head *requests_queue = NULL; 118 119 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) 120 requests_queue = &conn->requests; 121 122 if (requests_queue) { 123 atomic_inc(&conn->req_running); 124 spin_lock(&conn->request_lock); 125 list_add_tail(&work->request_entry, requests_queue); 126 spin_unlock(&conn->request_lock); 127 } 128 } 129 130 void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work) 131 { 132 struct ksmbd_conn *conn = work->conn; 133 134 if (list_empty(&work->request_entry) && 135 list_empty(&work->async_request_entry)) 136 return; 137 138 atomic_dec(&conn->req_running); 139 spin_lock(&conn->request_lock); 140 list_del_init(&work->request_entry); 141 spin_unlock(&conn->request_lock); 142 if (work->asynchronous) 143 release_async_work(work); 144 145 wake_up_all(&conn->req_running_q); 146 } 147 148 void ksmbd_conn_lock(struct ksmbd_conn *conn) 149 { 150 mutex_lock(&conn->srv_mutex); 151 } 152 153 void ksmbd_conn_unlock(struct ksmbd_conn *conn) 154 { 155 mutex_unlock(&conn->srv_mutex); 156 } 157 158 void ksmbd_all_conn_set_status(u64 sess_id, u32 status) 159 { 160 struct ksmbd_conn *conn; 161 162 down_read(&conn_list_lock); 163 list_for_each_entry(conn, &conn_list, conns_list) { 164 if (conn->binding || xa_load(&conn->sessions, sess_id)) 165 WRITE_ONCE(conn->status, status); 166 } 167 up_read(&conn_list_lock); 168 } 169 170 void ksmbd_conn_wait_idle(struct ksmbd_conn *conn) 171 { 172 wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2); 173 } 174 175 int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id) 176 { 177 struct ksmbd_conn *conn; 178 int rc, retry_count = 0, max_timeout = 120; 179 int rcount = 1; 180 181 retry_idle: 182 if (retry_count >= max_timeout) 183 return -EIO; 184 185 down_read(&conn_list_lock); 186 list_for_each_entry(conn, &conn_list, conns_list) { 187 if (conn->binding || xa_load(&conn->sessions, sess_id)) { 188 if (conn == curr_conn) 189 rcount = 2; 190 if (atomic_read(&conn->req_running) >= rcount) { 191 rc = wait_event_timeout(conn->req_running_q, 192 atomic_read(&conn->req_running) < rcount, 193 HZ); 194 if (!rc) { 195 up_read(&conn_list_lock); 196 retry_count++; 197 goto retry_idle; 198 } 199 } 200 } 201 } 202 up_read(&conn_list_lock); 203 204 return 0; 205 } 206 207 int ksmbd_conn_write(struct ksmbd_work *work) 208 { 209 struct ksmbd_conn *conn = work->conn; 210 int sent; 211 212 if (!work->response_buf) { 213 pr_err("NULL response header\n"); 214 return -EINVAL; 215 } 216 217 if (work->send_no_response) 218 return 0; 219 220 if (!work->iov_idx) 221 return -EINVAL; 222 223 ksmbd_conn_lock(conn); 224 sent = conn->transport->ops->writev(conn->transport, work->iov, 225 work->iov_cnt, 226 get_rfc1002_len(work->iov[0].iov_base) + 4, 227 work->need_invalidate_rkey, 228 work->remote_key); 229 ksmbd_conn_unlock(conn); 230 231 if (sent < 0) { 232 pr_err("Failed to send message: %d\n", sent); 233 return sent; 234 } 235 236 return 0; 237 } 238 239 int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, 240 void *buf, unsigned int buflen, 241 struct smb2_buffer_desc_v1 *desc, 242 unsigned int desc_len) 243 { 244 int ret = -EINVAL; 245 246 if (conn->transport->ops->rdma_read) 247 ret = conn->transport->ops->rdma_read(conn->transport, 248 buf, buflen, 249 desc, desc_len); 250 return ret; 251 } 252 253 int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, 254 void *buf, unsigned int buflen, 255 struct smb2_buffer_desc_v1 *desc, 256 unsigned int desc_len) 257 { 258 int ret = -EINVAL; 259 260 if (conn->transport->ops->rdma_write) 261 ret = conn->transport->ops->rdma_write(conn->transport, 262 buf, buflen, 263 desc, desc_len); 264 return ret; 265 } 266 267 bool ksmbd_conn_alive(struct ksmbd_conn *conn) 268 { 269 if (!ksmbd_server_running()) 270 return false; 271 272 if (ksmbd_conn_exiting(conn)) 273 return false; 274 275 if (kthread_should_stop()) 276 return false; 277 278 if (atomic_read(&conn->stats.open_files_count) > 0) 279 return true; 280 281 /* 282 * Stop current session if the time that get last request from client 283 * is bigger than deadtime user configured and opening file count is 284 * zero. 285 */ 286 if (server_conf.deadtime > 0 && 287 time_after(jiffies, conn->last_active + server_conf.deadtime)) { 288 ksmbd_debug(CONN, "No response from client in %lu minutes\n", 289 server_conf.deadtime / SMB_ECHO_INTERVAL); 290 return false; 291 } 292 return true; 293 } 294 295 #define SMB1_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb_hdr)) 296 #define SMB2_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr) + 4) 297 298 /** 299 * ksmbd_conn_handler_loop() - session thread to listen on new smb requests 300 * @p: connection instance 301 * 302 * One thread each per connection 303 * 304 * Return: 0 on success 305 */ 306 int ksmbd_conn_handler_loop(void *p) 307 { 308 struct ksmbd_conn *conn = (struct ksmbd_conn *)p; 309 struct ksmbd_transport *t = conn->transport; 310 unsigned int pdu_size, max_allowed_pdu_size; 311 char hdr_buf[4] = {0,}; 312 int size; 313 314 mutex_init(&conn->srv_mutex); 315 __module_get(THIS_MODULE); 316 317 if (t->ops->prepare && t->ops->prepare(t)) 318 goto out; 319 320 conn->last_active = jiffies; 321 set_freezable(); 322 while (ksmbd_conn_alive(conn)) { 323 if (try_to_freeze()) 324 continue; 325 326 kvfree(conn->request_buf); 327 conn->request_buf = NULL; 328 329 size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1); 330 if (size != sizeof(hdr_buf)) 331 break; 332 333 pdu_size = get_rfc1002_len(hdr_buf); 334 ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size); 335 336 if (ksmbd_conn_good(conn)) 337 max_allowed_pdu_size = 338 SMB3_MAX_MSGSIZE + conn->vals->max_write_size; 339 else 340 max_allowed_pdu_size = SMB3_MAX_MSGSIZE; 341 342 if (pdu_size > max_allowed_pdu_size) { 343 pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n", 344 pdu_size, max_allowed_pdu_size, 345 READ_ONCE(conn->status)); 346 break; 347 } 348 349 /* 350 * Check maximum pdu size(0x00FFFFFF). 351 */ 352 if (pdu_size > MAX_STREAM_PROT_LEN) 353 break; 354 355 if (pdu_size < SMB1_MIN_SUPPORTED_HEADER_SIZE) 356 break; 357 358 /* 4 for rfc1002 length field */ 359 /* 1 for implied bcc[0] */ 360 size = pdu_size + 4 + 1; 361 conn->request_buf = kvmalloc(size, GFP_KERNEL); 362 if (!conn->request_buf) 363 break; 364 365 memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf)); 366 367 /* 368 * We already read 4 bytes to find out PDU size, now 369 * read in PDU 370 */ 371 size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2); 372 if (size < 0) { 373 pr_err("sock_read failed: %d\n", size); 374 break; 375 } 376 377 if (size != pdu_size) { 378 pr_err("PDU error. Read: %d, Expected: %d\n", 379 size, pdu_size); 380 continue; 381 } 382 383 if (!ksmbd_smb_request(conn)) 384 break; 385 386 if (((struct smb2_hdr *)smb2_get_msg(conn->request_buf))->ProtocolId == 387 SMB2_PROTO_NUMBER) { 388 if (pdu_size < SMB2_MIN_SUPPORTED_HEADER_SIZE) 389 break; 390 } 391 392 if (!default_conn_ops.process_fn) { 393 pr_err("No connection request callback\n"); 394 break; 395 } 396 397 if (default_conn_ops.process_fn(conn)) { 398 pr_err("Cannot handle request\n"); 399 break; 400 } 401 } 402 403 out: 404 ksmbd_conn_set_releasing(conn); 405 /* Wait till all reference dropped to the Server object*/ 406 wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0); 407 408 if (IS_ENABLED(CONFIG_UNICODE)) 409 utf8_unload(conn->um); 410 unload_nls(conn->local_nls); 411 if (default_conn_ops.terminate_fn) 412 default_conn_ops.terminate_fn(conn); 413 t->ops->disconnect(t); 414 module_put(THIS_MODULE); 415 return 0; 416 } 417 418 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops) 419 { 420 default_conn_ops.process_fn = ops->process_fn; 421 default_conn_ops.terminate_fn = ops->terminate_fn; 422 } 423 424 int ksmbd_conn_transport_init(void) 425 { 426 int ret; 427 428 mutex_lock(&init_lock); 429 ret = ksmbd_tcp_init(); 430 if (ret) { 431 pr_err("Failed to init TCP subsystem: %d\n", ret); 432 goto out; 433 } 434 435 ret = ksmbd_rdma_init(); 436 if (ret) { 437 pr_err("Failed to init RDMA subsystem: %d\n", ret); 438 goto out; 439 } 440 out: 441 mutex_unlock(&init_lock); 442 return ret; 443 } 444 445 static void stop_sessions(void) 446 { 447 struct ksmbd_conn *conn; 448 struct ksmbd_transport *t; 449 450 again: 451 down_read(&conn_list_lock); 452 list_for_each_entry(conn, &conn_list, conns_list) { 453 t = conn->transport; 454 ksmbd_conn_set_exiting(conn); 455 if (t->ops->shutdown) { 456 up_read(&conn_list_lock); 457 t->ops->shutdown(t); 458 down_read(&conn_list_lock); 459 } 460 } 461 up_read(&conn_list_lock); 462 463 if (!list_empty(&conn_list)) { 464 schedule_timeout_interruptible(HZ / 10); /* 100ms */ 465 goto again; 466 } 467 } 468 469 void ksmbd_conn_transport_destroy(void) 470 { 471 mutex_lock(&init_lock); 472 ksmbd_tcp_destroy(); 473 ksmbd_rdma_destroy(); 474 stop_sessions(); 475 mutex_unlock(&init_lock); 476 } 477