1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org> 4 * Copyright (C) 2018 Samsung Electronics Co., Ltd. 5 */ 6 7 #include <linux/mutex.h> 8 #include <linux/freezer.h> 9 #include <linux/module.h> 10 11 #include "server.h" 12 #include "smb_common.h" 13 #include "mgmt/ksmbd_ida.h" 14 #include "connection.h" 15 #include "transport_tcp.h" 16 #include "transport_rdma.h" 17 18 static DEFINE_MUTEX(init_lock); 19 20 static struct ksmbd_conn_ops default_conn_ops; 21 22 LIST_HEAD(conn_list); 23 DECLARE_RWSEM(conn_list_lock); 24 25 /** 26 * ksmbd_conn_free() - free resources of the connection instance 27 * 28 * @conn: connection instance to be cleand up 29 * 30 * During the thread termination, the corresponding conn instance 31 * resources(sock/memory) are released and finally the conn object is freed. 32 */ 33 void ksmbd_conn_free(struct ksmbd_conn *conn) 34 { 35 down_write(&conn_list_lock); 36 list_del(&conn->conns_list); 37 up_write(&conn_list_lock); 38 39 xa_destroy(&conn->sessions); 40 kvfree(conn->request_buf); 41 kfree(conn->preauth_info); 42 kfree(conn); 43 } 44 45 /** 46 * ksmbd_conn_alloc() - initialize a new connection instance 47 * 48 * Return: ksmbd_conn struct on success, otherwise NULL 49 */ 50 struct ksmbd_conn *ksmbd_conn_alloc(void) 51 { 52 struct ksmbd_conn *conn; 53 54 conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL); 55 if (!conn) 56 return NULL; 57 58 conn->need_neg = true; 59 ksmbd_conn_set_new(conn); 60 conn->local_nls = load_nls("utf8"); 61 if (!conn->local_nls) 62 conn->local_nls = load_nls_default(); 63 if (IS_ENABLED(CONFIG_UNICODE)) 64 conn->um = utf8_load(UNICODE_AGE(12, 1, 0)); 65 else 66 conn->um = ERR_PTR(-EOPNOTSUPP); 67 if (IS_ERR(conn->um)) 68 conn->um = NULL; 69 atomic_set(&conn->req_running, 0); 70 atomic_set(&conn->r_count, 0); 71 conn->total_credits = 1; 72 conn->outstanding_credits = 0; 73 74 init_waitqueue_head(&conn->req_running_q); 75 init_waitqueue_head(&conn->r_count_q); 76 INIT_LIST_HEAD(&conn->conns_list); 77 INIT_LIST_HEAD(&conn->requests); 78 INIT_LIST_HEAD(&conn->async_requests); 79 spin_lock_init(&conn->request_lock); 80 spin_lock_init(&conn->credits_lock); 81 ida_init(&conn->async_ida); 82 xa_init(&conn->sessions); 83 84 spin_lock_init(&conn->llist_lock); 85 INIT_LIST_HEAD(&conn->lock_list); 86 87 down_write(&conn_list_lock); 88 list_add(&conn->conns_list, &conn_list); 89 up_write(&conn_list_lock); 90 return conn; 91 } 92 93 bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c) 94 { 95 struct ksmbd_conn *t; 96 bool ret = false; 97 98 down_read(&conn_list_lock); 99 list_for_each_entry(t, &conn_list, conns_list) { 100 if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE)) 101 continue; 102 103 ret = true; 104 break; 105 } 106 up_read(&conn_list_lock); 107 return ret; 108 } 109 110 void ksmbd_conn_enqueue_request(struct ksmbd_work *work) 111 { 112 struct ksmbd_conn *conn = work->conn; 113 struct list_head *requests_queue = NULL; 114 115 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) 116 requests_queue = &conn->requests; 117 118 if (requests_queue) { 119 atomic_inc(&conn->req_running); 120 spin_lock(&conn->request_lock); 121 list_add_tail(&work->request_entry, requests_queue); 122 spin_unlock(&conn->request_lock); 123 } 124 } 125 126 int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work) 127 { 128 struct ksmbd_conn *conn = work->conn; 129 int ret = 1; 130 131 if (list_empty(&work->request_entry) && 132 list_empty(&work->async_request_entry)) 133 return 0; 134 135 if (!work->multiRsp) 136 atomic_dec(&conn->req_running); 137 if (!work->multiRsp) { 138 spin_lock(&conn->request_lock); 139 list_del_init(&work->request_entry); 140 spin_unlock(&conn->request_lock); 141 if (work->asynchronous) 142 release_async_work(work); 143 ret = 0; 144 } 145 146 wake_up_all(&conn->req_running_q); 147 return ret; 148 } 149 150 void ksmbd_conn_lock(struct ksmbd_conn *conn) 151 { 152 mutex_lock(&conn->srv_mutex); 153 } 154 155 void ksmbd_conn_unlock(struct ksmbd_conn *conn) 156 { 157 mutex_unlock(&conn->srv_mutex); 158 } 159 160 void ksmbd_all_conn_set_status(u64 sess_id, u32 status) 161 { 162 struct ksmbd_conn *conn; 163 164 down_read(&conn_list_lock); 165 list_for_each_entry(conn, &conn_list, conns_list) { 166 if (conn->binding || xa_load(&conn->sessions, sess_id)) 167 WRITE_ONCE(conn->status, status); 168 } 169 up_read(&conn_list_lock); 170 } 171 172 void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id) 173 { 174 struct ksmbd_conn *bind_conn; 175 176 wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2); 177 178 down_read(&conn_list_lock); 179 list_for_each_entry(bind_conn, &conn_list, conns_list) { 180 if (bind_conn == conn) 181 continue; 182 183 if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) && 184 !ksmbd_conn_releasing(bind_conn) && 185 atomic_read(&bind_conn->req_running)) { 186 wait_event(bind_conn->req_running_q, 187 atomic_read(&bind_conn->req_running) == 0); 188 } 189 } 190 up_read(&conn_list_lock); 191 } 192 193 int ksmbd_conn_write(struct ksmbd_work *work) 194 { 195 struct ksmbd_conn *conn = work->conn; 196 size_t len = 0; 197 int sent; 198 struct kvec iov[3]; 199 int iov_idx = 0; 200 201 if (!work->response_buf) { 202 pr_err("NULL response header\n"); 203 return -EINVAL; 204 } 205 206 if (work->tr_buf) { 207 iov[iov_idx] = (struct kvec) { work->tr_buf, 208 sizeof(struct smb2_transform_hdr) + 4 }; 209 len += iov[iov_idx++].iov_len; 210 } 211 212 if (work->aux_payload_sz) { 213 iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz }; 214 len += iov[iov_idx++].iov_len; 215 iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz }; 216 len += iov[iov_idx++].iov_len; 217 } else { 218 if (work->tr_buf) 219 iov[iov_idx].iov_len = work->resp_hdr_sz; 220 else 221 iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4; 222 iov[iov_idx].iov_base = work->response_buf; 223 len += iov[iov_idx++].iov_len; 224 } 225 226 ksmbd_conn_lock(conn); 227 sent = conn->transport->ops->writev(conn->transport, &iov[0], 228 iov_idx, len, 229 work->need_invalidate_rkey, 230 work->remote_key); 231 ksmbd_conn_unlock(conn); 232 233 if (sent < 0) { 234 pr_err("Failed to send message: %d\n", sent); 235 return sent; 236 } 237 238 return 0; 239 } 240 241 int ksmbd_conn_rdma_read(struct ksmbd_conn *conn, 242 void *buf, unsigned int buflen, 243 struct smb2_buffer_desc_v1 *desc, 244 unsigned int desc_len) 245 { 246 int ret = -EINVAL; 247 248 if (conn->transport->ops->rdma_read) 249 ret = conn->transport->ops->rdma_read(conn->transport, 250 buf, buflen, 251 desc, desc_len); 252 return ret; 253 } 254 255 int ksmbd_conn_rdma_write(struct ksmbd_conn *conn, 256 void *buf, unsigned int buflen, 257 struct smb2_buffer_desc_v1 *desc, 258 unsigned int desc_len) 259 { 260 int ret = -EINVAL; 261 262 if (conn->transport->ops->rdma_write) 263 ret = conn->transport->ops->rdma_write(conn->transport, 264 buf, buflen, 265 desc, desc_len); 266 return ret; 267 } 268 269 bool ksmbd_conn_alive(struct ksmbd_conn *conn) 270 { 271 if (!ksmbd_server_running()) 272 return false; 273 274 if (ksmbd_conn_exiting(conn)) 275 return false; 276 277 if (kthread_should_stop()) 278 return false; 279 280 if (atomic_read(&conn->stats.open_files_count) > 0) 281 return true; 282 283 /* 284 * Stop current session if the time that get last request from client 285 * is bigger than deadtime user configured and opening file count is 286 * zero. 287 */ 288 if (server_conf.deadtime > 0 && 289 time_after(jiffies, conn->last_active + server_conf.deadtime)) { 290 ksmbd_debug(CONN, "No response from client in %lu minutes\n", 291 server_conf.deadtime / SMB_ECHO_INTERVAL); 292 return false; 293 } 294 return true; 295 } 296 297 /** 298 * ksmbd_conn_handler_loop() - session thread to listen on new smb requests 299 * @p: connection instance 300 * 301 * One thread each per connection 302 * 303 * Return: 0 on success 304 */ 305 int ksmbd_conn_handler_loop(void *p) 306 { 307 struct ksmbd_conn *conn = (struct ksmbd_conn *)p; 308 struct ksmbd_transport *t = conn->transport; 309 unsigned int pdu_size, max_allowed_pdu_size; 310 char hdr_buf[4] = {0,}; 311 int size; 312 313 mutex_init(&conn->srv_mutex); 314 __module_get(THIS_MODULE); 315 316 if (t->ops->prepare && t->ops->prepare(t)) 317 goto out; 318 319 conn->last_active = jiffies; 320 while (ksmbd_conn_alive(conn)) { 321 if (try_to_freeze()) 322 continue; 323 324 kvfree(conn->request_buf); 325 conn->request_buf = NULL; 326 327 size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1); 328 if (size != sizeof(hdr_buf)) 329 break; 330 331 pdu_size = get_rfc1002_len(hdr_buf); 332 ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size); 333 334 if (ksmbd_conn_good(conn)) 335 max_allowed_pdu_size = 336 SMB3_MAX_MSGSIZE + conn->vals->max_write_size; 337 else 338 max_allowed_pdu_size = SMB3_MAX_MSGSIZE; 339 340 if (pdu_size > max_allowed_pdu_size) { 341 pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n", 342 pdu_size, max_allowed_pdu_size, 343 READ_ONCE(conn->status)); 344 break; 345 } 346 347 /* 348 * Check maximum pdu size(0x00FFFFFF). 349 */ 350 if (pdu_size > MAX_STREAM_PROT_LEN) 351 break; 352 353 /* 4 for rfc1002 length field */ 354 /* 1 for implied bcc[0] */ 355 size = pdu_size + 4 + 1; 356 conn->request_buf = kvmalloc(size, GFP_KERNEL); 357 if (!conn->request_buf) 358 break; 359 360 memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf)); 361 if (!ksmbd_smb_request(conn)) 362 break; 363 364 /* 365 * We already read 4 bytes to find out PDU size, now 366 * read in PDU 367 */ 368 size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2); 369 if (size < 0) { 370 pr_err("sock_read failed: %d\n", size); 371 break; 372 } 373 374 if (size != pdu_size) { 375 pr_err("PDU error. Read: %d, Expected: %d\n", 376 size, pdu_size); 377 continue; 378 } 379 380 if (!default_conn_ops.process_fn) { 381 pr_err("No connection request callback\n"); 382 break; 383 } 384 385 if (default_conn_ops.process_fn(conn)) { 386 pr_err("Cannot handle request\n"); 387 break; 388 } 389 } 390 391 out: 392 ksmbd_conn_set_releasing(conn); 393 /* Wait till all reference dropped to the Server object*/ 394 wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0); 395 396 if (IS_ENABLED(CONFIG_UNICODE)) 397 utf8_unload(conn->um); 398 unload_nls(conn->local_nls); 399 if (default_conn_ops.terminate_fn) 400 default_conn_ops.terminate_fn(conn); 401 t->ops->disconnect(t); 402 module_put(THIS_MODULE); 403 return 0; 404 } 405 406 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops) 407 { 408 default_conn_ops.process_fn = ops->process_fn; 409 default_conn_ops.terminate_fn = ops->terminate_fn; 410 } 411 412 int ksmbd_conn_transport_init(void) 413 { 414 int ret; 415 416 mutex_lock(&init_lock); 417 ret = ksmbd_tcp_init(); 418 if (ret) { 419 pr_err("Failed to init TCP subsystem: %d\n", ret); 420 goto out; 421 } 422 423 ret = ksmbd_rdma_init(); 424 if (ret) { 425 pr_err("Failed to init RDMA subsystem: %d\n", ret); 426 goto out; 427 } 428 out: 429 mutex_unlock(&init_lock); 430 return ret; 431 } 432 433 static void stop_sessions(void) 434 { 435 struct ksmbd_conn *conn; 436 struct ksmbd_transport *t; 437 438 again: 439 down_read(&conn_list_lock); 440 list_for_each_entry(conn, &conn_list, conns_list) { 441 struct task_struct *task; 442 443 t = conn->transport; 444 task = t->handler; 445 if (task) 446 ksmbd_debug(CONN, "Stop session handler %s/%d\n", 447 task->comm, task_pid_nr(task)); 448 ksmbd_conn_set_exiting(conn); 449 if (t->ops->shutdown) { 450 up_read(&conn_list_lock); 451 t->ops->shutdown(t); 452 down_read(&conn_list_lock); 453 } 454 } 455 up_read(&conn_list_lock); 456 457 if (!list_empty(&conn_list)) { 458 schedule_timeout_interruptible(HZ / 10); /* 100ms */ 459 goto again; 460 } 461 } 462 463 void ksmbd_conn_transport_destroy(void) 464 { 465 mutex_lock(&init_lock); 466 ksmbd_tcp_destroy(); 467 ksmbd_rdma_destroy(); 468 stop_sessions(); 469 mutex_unlock(&init_lock); 470 } 471