1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 8 * Copyright (C) 2017 T-Platforms. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * BSD LICENSE 20 * 21 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 22 * Copyright (C) 2017 T-Platforms. All Rights Reserved. 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 28 * * Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * * Redistributions in binary form must reproduce the above copy 31 * notice, this list of conditions and the following disclaimer in 32 * the documentation and/or other materials provided with the 33 * distribution. 34 * * Neither the name of Intel Corporation nor the names of its 35 * contributors may be used to endorse or promote products derived 36 * from this software without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 49 * 50 * PCIe NTB Pingpong Linux driver 51 */ 52 53 /* 54 * How to use this tool, by example. 55 * 56 * Assuming $DBG_DIR is something like: 57 * '/sys/kernel/debug/ntb_perf/0000:00:03.0' 58 * Suppose aside from local device there is at least one remote device 59 * connected to NTB with index 0. 60 *----------------------------------------------------------------------------- 61 * Eg: install driver with specified delay between doorbell event and response 62 * 63 * root@self# insmod ntb_pingpong.ko delay_ms=1000 64 *----------------------------------------------------------------------------- 65 * Eg: get number of ping-pong cycles performed 66 * 67 * root@self# cat $DBG_DIR/count 68 */ 69 70 #include <linux/init.h> 71 #include <linux/kernel.h> 72 #include <linux/module.h> 73 #include <linux/device.h> 74 #include <linux/bitops.h> 75 76 #include <linux/pci.h> 77 #include <linux/slab.h> 78 #include <linux/hrtimer.h> 79 #include <linux/debugfs.h> 80 81 #include <linux/ntb.h> 82 83 #define DRIVER_NAME "ntb_pingpong" 84 #define DRIVER_VERSION "2.0" 85 86 MODULE_LICENSE("Dual BSD/GPL"); 87 MODULE_VERSION(DRIVER_VERSION); 88 MODULE_AUTHOR("Allen Hubbe <Allen.Hubbe@emc.com>"); 89 MODULE_DESCRIPTION("PCIe NTB Simple Pingpong Client"); 90 91 static unsigned int unsafe; 92 module_param(unsafe, uint, 0644); 93 MODULE_PARM_DESC(unsafe, "Run even though ntb operations may be unsafe"); 94 95 static unsigned int delay_ms = 1000; 96 module_param(delay_ms, uint, 0644); 97 MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer"); 98 99 struct pp_ctx { 100 struct ntb_dev *ntb; 101 struct hrtimer timer; 102 u64 in_db; 103 u64 out_db; 104 int out_pidx; 105 u64 nmask; 106 u64 pmask; 107 atomic_t count; 108 spinlock_t lock; 109 struct dentry *dbgfs_dir; 110 }; 111 #define to_pp_timer(__timer) \ 112 container_of(__timer, struct pp_ctx, timer) 113 114 static struct dentry *pp_dbgfs_topdir; 115 116 static int pp_find_next_peer(struct pp_ctx *pp) 117 { 118 u64 link, out_db; 119 int pidx; 120 121 link = ntb_link_is_up(pp->ntb, NULL, NULL); 122 123 /* Find next available peer */ 124 if (link & pp->nmask) { 125 pidx = __ffs64(link & pp->nmask); 126 out_db = BIT_ULL(pidx + 1); 127 } else if (link & pp->pmask) { 128 pidx = __ffs64(link & pp->pmask); 129 out_db = BIT_ULL(pidx); 130 } else { 131 return -ENODEV; 132 } 133 134 spin_lock(&pp->lock); 135 pp->out_pidx = pidx; 136 pp->out_db = out_db; 137 spin_unlock(&pp->lock); 138 139 return 0; 140 } 141 142 static void pp_setup(struct pp_ctx *pp) 143 { 144 int ret; 145 146 ntb_db_set_mask(pp->ntb, pp->in_db); 147 148 hrtimer_cancel(&pp->timer); 149 150 ret = pp_find_next_peer(pp); 151 if (ret == -ENODEV) { 152 dev_dbg(&pp->ntb->dev, "Got no peers, so cancel\n"); 153 return; 154 } 155 156 dev_dbg(&pp->ntb->dev, "Ping-pong started with port %d, db %#llx\n", 157 ntb_peer_port_number(pp->ntb, pp->out_pidx), pp->out_db); 158 159 hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); 160 } 161 162 static void pp_clear(struct pp_ctx *pp) 163 { 164 hrtimer_cancel(&pp->timer); 165 166 ntb_db_set_mask(pp->ntb, pp->in_db); 167 168 dev_dbg(&pp->ntb->dev, "Ping-pong cancelled\n"); 169 } 170 171 static void pp_ping(struct pp_ctx *pp) 172 { 173 u32 count; 174 175 count = atomic_read(&pp->count); 176 177 spin_lock(&pp->lock); 178 ntb_peer_spad_write(pp->ntb, pp->out_pidx, 0, count); 179 ntb_peer_msg_write(pp->ntb, pp->out_pidx, 0, count); 180 181 dev_dbg(&pp->ntb->dev, "Ping port %d spad %#x, msg %#x\n", 182 ntb_peer_port_number(pp->ntb, pp->out_pidx), count, count); 183 184 ntb_peer_db_set(pp->ntb, pp->out_db); 185 ntb_db_clear_mask(pp->ntb, pp->in_db); 186 spin_unlock(&pp->lock); 187 } 188 189 static void pp_pong(struct pp_ctx *pp) 190 { 191 u32 msg_data = -1, spad_data = -1; 192 int pidx = 0; 193 194 /* Read pong data */ 195 spad_data = ntb_spad_read(pp->ntb, 0); 196 msg_data = ntb_msg_read(pp->ntb, &pidx, 0); 197 ntb_msg_clear_sts(pp->ntb, -1); 198 199 /* 200 * Scratchpad and message data may differ, since message register can't 201 * be rewritten unless status is cleared. Additionally either of them 202 * might be unsupported 203 */ 204 dev_dbg(&pp->ntb->dev, "Pong spad %#x, msg %#x (port %d)\n", 205 spad_data, msg_data, ntb_peer_port_number(pp->ntb, pidx)); 206 207 atomic_inc(&pp->count); 208 209 ntb_db_set_mask(pp->ntb, pp->in_db); 210 ntb_db_clear(pp->ntb, pp->in_db); 211 212 hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); 213 } 214 215 static enum hrtimer_restart pp_timer_func(struct hrtimer *t) 216 { 217 struct pp_ctx *pp = to_pp_timer(t); 218 219 pp_ping(pp); 220 221 return HRTIMER_NORESTART; 222 } 223 224 static void pp_link_event(void *ctx) 225 { 226 struct pp_ctx *pp = ctx; 227 228 pp_setup(pp); 229 } 230 231 static void pp_db_event(void *ctx, int vec) 232 { 233 struct pp_ctx *pp = ctx; 234 235 pp_pong(pp); 236 } 237 238 static const struct ntb_ctx_ops pp_ops = { 239 .link_event = pp_link_event, 240 .db_event = pp_db_event 241 }; 242 243 static int pp_check_ntb(struct ntb_dev *ntb) 244 { 245 u64 pmask; 246 247 if (ntb_db_is_unsafe(ntb)) { 248 dev_dbg(&ntb->dev, "Doorbell is unsafe\n"); 249 if (!unsafe) 250 return -EINVAL; 251 } 252 253 if (ntb_spad_is_unsafe(ntb)) { 254 dev_dbg(&ntb->dev, "Scratchpad is unsafe\n"); 255 if (!unsafe) 256 return -EINVAL; 257 } 258 259 pmask = GENMASK_ULL(ntb_peer_port_count(ntb), 0); 260 if ((ntb_db_valid_mask(ntb) & pmask) != pmask) { 261 dev_err(&ntb->dev, "Unsupported DB configuration\n"); 262 return -EINVAL; 263 } 264 265 if (ntb_spad_count(ntb) < 1 && ntb_msg_count(ntb) < 1) { 266 dev_err(&ntb->dev, "Scratchpads and messages unsupported\n"); 267 return -EINVAL; 268 } else if (ntb_spad_count(ntb) < 1) { 269 dev_dbg(&ntb->dev, "Scratchpads unsupported\n"); 270 } else if (ntb_msg_count(ntb) < 1) { 271 dev_dbg(&ntb->dev, "Messages unsupported\n"); 272 } 273 274 return 0; 275 } 276 277 static struct pp_ctx *pp_create_data(struct ntb_dev *ntb) 278 { 279 struct pp_ctx *pp; 280 281 pp = devm_kzalloc(&ntb->dev, sizeof(*pp), GFP_KERNEL); 282 if (!pp) 283 return ERR_PTR(-ENOMEM); 284 285 pp->ntb = ntb; 286 atomic_set(&pp->count, 0); 287 spin_lock_init(&pp->lock); 288 hrtimer_init(&pp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 289 pp->timer.function = pp_timer_func; 290 291 return pp; 292 } 293 294 static void pp_init_flds(struct pp_ctx *pp) 295 { 296 int pidx, lport, pcnt; 297 298 /* Find global port index */ 299 lport = ntb_port_number(pp->ntb); 300 pcnt = ntb_peer_port_count(pp->ntb); 301 for (pidx = 0; pidx < pcnt; pidx++) { 302 if (lport < ntb_peer_port_number(pp->ntb, pidx)) 303 break; 304 } 305 306 pp->in_db = BIT_ULL(pidx); 307 pp->pmask = GENMASK_ULL(pidx, 0) >> 1; 308 pp->nmask = GENMASK_ULL(pcnt - 1, pidx); 309 310 dev_dbg(&pp->ntb->dev, "Inbound db %#llx, prev %#llx, next %#llx\n", 311 pp->in_db, pp->pmask, pp->nmask); 312 } 313 314 static int pp_mask_events(struct pp_ctx *pp) 315 { 316 u64 db_mask, msg_mask; 317 int ret; 318 319 db_mask = ntb_db_valid_mask(pp->ntb); 320 ret = ntb_db_set_mask(pp->ntb, db_mask); 321 if (ret) 322 return ret; 323 324 /* Skip message events masking if unsupported */ 325 if (ntb_msg_count(pp->ntb) < 1) 326 return 0; 327 328 msg_mask = ntb_msg_outbits(pp->ntb) | ntb_msg_inbits(pp->ntb); 329 return ntb_msg_set_mask(pp->ntb, msg_mask); 330 } 331 332 static int pp_setup_ctx(struct pp_ctx *pp) 333 { 334 int ret; 335 336 ret = ntb_set_ctx(pp->ntb, pp, &pp_ops); 337 if (ret) 338 return ret; 339 340 ntb_link_enable(pp->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 341 /* Might be not necessary */ 342 ntb_link_event(pp->ntb); 343 344 return 0; 345 } 346 347 static void pp_clear_ctx(struct pp_ctx *pp) 348 { 349 ntb_link_disable(pp->ntb); 350 351 ntb_clear_ctx(pp->ntb); 352 } 353 354 static void pp_setup_dbgfs(struct pp_ctx *pp) 355 { 356 struct pci_dev *pdev = pp->ntb->pdev; 357 void *ret; 358 359 pp->dbgfs_dir = debugfs_create_dir(pci_name(pdev), pp_dbgfs_topdir); 360 361 ret = debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count); 362 if (!ret) 363 dev_warn(&pp->ntb->dev, "DebugFS unsupported\n"); 364 } 365 366 static void pp_clear_dbgfs(struct pp_ctx *pp) 367 { 368 debugfs_remove_recursive(pp->dbgfs_dir); 369 } 370 371 static int pp_probe(struct ntb_client *client, struct ntb_dev *ntb) 372 { 373 struct pp_ctx *pp; 374 int ret; 375 376 ret = pp_check_ntb(ntb); 377 if (ret) 378 return ret; 379 380 pp = pp_create_data(ntb); 381 if (IS_ERR(pp)) 382 return PTR_ERR(pp); 383 384 pp_init_flds(pp); 385 386 ret = pp_mask_events(pp); 387 if (ret) 388 return ret; 389 390 ret = pp_setup_ctx(pp); 391 if (ret) 392 return ret; 393 394 pp_setup_dbgfs(pp); 395 396 return 0; 397 } 398 399 static void pp_remove(struct ntb_client *client, struct ntb_dev *ntb) 400 { 401 struct pp_ctx *pp = ntb->ctx; 402 403 pp_clear_dbgfs(pp); 404 405 pp_clear_ctx(pp); 406 407 pp_clear(pp); 408 } 409 410 static struct ntb_client pp_client = { 411 .ops = { 412 .probe = pp_probe, 413 .remove = pp_remove 414 } 415 }; 416 417 static int __init pp_init(void) 418 { 419 int ret; 420 421 if (debugfs_initialized()) 422 pp_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL); 423 424 ret = ntb_register_client(&pp_client); 425 if (ret) 426 debugfs_remove_recursive(pp_dbgfs_topdir); 427 428 return ret; 429 } 430 module_init(pp_init); 431 432 static void __exit pp_exit(void) 433 { 434 ntb_unregister_client(&pp_client); 435 debugfs_remove_recursive(pp_dbgfs_topdir); 436 } 437 module_exit(pp_exit); 438 439