1 /* 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 8 * Copyright (C) 2017 T-Platforms. All Rights Reserved. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * BSD LICENSE 20 * 21 * Copyright (C) 2015 EMC Corporation. All Rights Reserved. 22 * Copyright (C) 2017 T-Platforms. All Rights Reserved. 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 28 * * Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * * Redistributions in binary form must reproduce the above copy 31 * notice, this list of conditions and the following disclaimer in 32 * the documentation and/or other materials provided with the 33 * distribution. 34 * * Neither the name of Intel Corporation nor the names of its 35 * contributors may be used to endorse or promote products derived 36 * from this software without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 49 * 50 * PCIe NTB Pingpong Linux driver 51 */ 52 53 /* 54 * How to use this tool, by example. 55 * 56 * Assuming $DBG_DIR is something like: 57 * '/sys/kernel/debug/ntb_perf/0000:00:03.0' 58 * Suppose aside from local device there is at least one remote device 59 * connected to NTB with index 0. 60 *----------------------------------------------------------------------------- 61 * Eg: install driver with specified delay between doorbell event and response 62 * 63 * root@self# insmod ntb_pingpong.ko delay_ms=1000 64 *----------------------------------------------------------------------------- 65 * Eg: get number of ping-pong cycles performed 66 * 67 * root@self# cat $DBG_DIR/count 68 */ 69 70 #include <linux/init.h> 71 #include <linux/kernel.h> 72 #include <linux/module.h> 73 #include <linux/device.h> 74 #include <linux/bitops.h> 75 76 #include <linux/pci.h> 77 #include <linux/slab.h> 78 #include <linux/hrtimer.h> 79 #include <linux/debugfs.h> 80 81 #include <linux/ntb.h> 82 83 #define DRIVER_NAME "ntb_pingpong" 84 #define DRIVER_VERSION "2.0" 85 86 MODULE_LICENSE("Dual BSD/GPL"); 87 MODULE_VERSION(DRIVER_VERSION); 88 MODULE_AUTHOR("Allen Hubbe <Allen.Hubbe@emc.com>"); 89 MODULE_DESCRIPTION("PCIe NTB Simple Pingpong Client"); 90 91 static unsigned int unsafe; 92 module_param(unsafe, uint, 0644); 93 MODULE_PARM_DESC(unsafe, "Run even though ntb operations may be unsafe"); 94 95 static unsigned int delay_ms = 1000; 96 module_param(delay_ms, uint, 0644); 97 MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer"); 98 99 struct pp_ctx { 100 struct ntb_dev *ntb; 101 struct hrtimer timer; 102 u64 in_db; 103 u64 out_db; 104 int out_pidx; 105 u64 nmask; 106 u64 pmask; 107 atomic_t count; 108 spinlock_t lock; 109 struct dentry *dbgfs_dir; 110 }; 111 #define to_pp_timer(__timer) \ 112 container_of(__timer, struct pp_ctx, timer) 113 114 static struct dentry *pp_dbgfs_topdir; 115 116 static int pp_find_next_peer(struct pp_ctx *pp) 117 { 118 u64 link, out_db; 119 int pidx; 120 121 link = ntb_link_is_up(pp->ntb, NULL, NULL); 122 123 /* Find next available peer */ 124 if (link & pp->nmask) 125 pidx = __ffs64(link & pp->nmask); 126 else if (link & pp->pmask) 127 pidx = __ffs64(link & pp->pmask); 128 else 129 return -ENODEV; 130 131 out_db = BIT_ULL(ntb_peer_port_number(pp->ntb, pidx)); 132 133 spin_lock(&pp->lock); 134 pp->out_pidx = pidx; 135 pp->out_db = out_db; 136 spin_unlock(&pp->lock); 137 138 return 0; 139 } 140 141 static void pp_setup(struct pp_ctx *pp) 142 { 143 int ret; 144 145 ntb_db_set_mask(pp->ntb, pp->in_db); 146 147 hrtimer_cancel(&pp->timer); 148 149 ret = pp_find_next_peer(pp); 150 if (ret == -ENODEV) { 151 dev_dbg(&pp->ntb->dev, "Got no peers, so cancel\n"); 152 return; 153 } 154 155 dev_dbg(&pp->ntb->dev, "Ping-pong started with port %d, db %#llx\n", 156 ntb_peer_port_number(pp->ntb, pp->out_pidx), pp->out_db); 157 158 hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); 159 } 160 161 static void pp_clear(struct pp_ctx *pp) 162 { 163 hrtimer_cancel(&pp->timer); 164 165 ntb_db_set_mask(pp->ntb, pp->in_db); 166 167 dev_dbg(&pp->ntb->dev, "Ping-pong cancelled\n"); 168 } 169 170 static void pp_ping(struct pp_ctx *pp) 171 { 172 u32 count; 173 174 count = atomic_read(&pp->count); 175 176 spin_lock(&pp->lock); 177 ntb_peer_spad_write(pp->ntb, pp->out_pidx, 0, count); 178 ntb_peer_msg_write(pp->ntb, pp->out_pidx, 0, count); 179 180 dev_dbg(&pp->ntb->dev, "Ping port %d spad %#x, msg %#x\n", 181 ntb_peer_port_number(pp->ntb, pp->out_pidx), count, count); 182 183 ntb_peer_db_set(pp->ntb, pp->out_db); 184 ntb_db_clear_mask(pp->ntb, pp->in_db); 185 spin_unlock(&pp->lock); 186 } 187 188 static void pp_pong(struct pp_ctx *pp) 189 { 190 u32 msg_data = -1, spad_data = -1; 191 int pidx = 0; 192 193 /* Read pong data */ 194 spad_data = ntb_spad_read(pp->ntb, 0); 195 msg_data = ntb_msg_read(pp->ntb, &pidx, 0); 196 ntb_msg_clear_sts(pp->ntb, -1); 197 198 /* 199 * Scratchpad and message data may differ, since message register can't 200 * be rewritten unless status is cleared. Additionally either of them 201 * might be unsupported 202 */ 203 dev_dbg(&pp->ntb->dev, "Pong spad %#x, msg %#x (port %d)\n", 204 spad_data, msg_data, ntb_peer_port_number(pp->ntb, pidx)); 205 206 atomic_inc(&pp->count); 207 208 ntb_db_set_mask(pp->ntb, pp->in_db); 209 ntb_db_clear(pp->ntb, pp->in_db); 210 211 hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); 212 } 213 214 static enum hrtimer_restart pp_timer_func(struct hrtimer *t) 215 { 216 struct pp_ctx *pp = to_pp_timer(t); 217 218 pp_ping(pp); 219 220 return HRTIMER_NORESTART; 221 } 222 223 static void pp_link_event(void *ctx) 224 { 225 struct pp_ctx *pp = ctx; 226 227 pp_setup(pp); 228 } 229 230 static void pp_db_event(void *ctx, int vec) 231 { 232 struct pp_ctx *pp = ctx; 233 234 pp_pong(pp); 235 } 236 237 static const struct ntb_ctx_ops pp_ops = { 238 .link_event = pp_link_event, 239 .db_event = pp_db_event 240 }; 241 242 static int pp_check_ntb(struct ntb_dev *ntb) 243 { 244 u64 pmask; 245 246 if (ntb_db_is_unsafe(ntb)) { 247 dev_dbg(&ntb->dev, "Doorbell is unsafe\n"); 248 if (!unsafe) 249 return -EINVAL; 250 } 251 252 if (ntb_spad_is_unsafe(ntb)) { 253 dev_dbg(&ntb->dev, "Scratchpad is unsafe\n"); 254 if (!unsafe) 255 return -EINVAL; 256 } 257 258 pmask = GENMASK_ULL(ntb_peer_port_count(ntb), 0); 259 if ((ntb_db_valid_mask(ntb) & pmask) != pmask) { 260 dev_err(&ntb->dev, "Unsupported DB configuration\n"); 261 return -EINVAL; 262 } 263 264 if (ntb_spad_count(ntb) < 1 && ntb_msg_count(ntb) < 1) { 265 dev_err(&ntb->dev, "Scratchpads and messages unsupported\n"); 266 return -EINVAL; 267 } else if (ntb_spad_count(ntb) < 1) { 268 dev_dbg(&ntb->dev, "Scratchpads unsupported\n"); 269 } else if (ntb_msg_count(ntb) < 1) { 270 dev_dbg(&ntb->dev, "Messages unsupported\n"); 271 } 272 273 return 0; 274 } 275 276 static struct pp_ctx *pp_create_data(struct ntb_dev *ntb) 277 { 278 struct pp_ctx *pp; 279 280 pp = devm_kzalloc(&ntb->dev, sizeof(*pp), GFP_KERNEL); 281 if (!pp) 282 return ERR_PTR(-ENOMEM); 283 284 pp->ntb = ntb; 285 atomic_set(&pp->count, 0); 286 spin_lock_init(&pp->lock); 287 hrtimer_init(&pp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 288 pp->timer.function = pp_timer_func; 289 290 return pp; 291 } 292 293 static void pp_init_flds(struct pp_ctx *pp) 294 { 295 int pidx, lport, pcnt; 296 297 /* Find global port index */ 298 lport = ntb_port_number(pp->ntb); 299 pcnt = ntb_peer_port_count(pp->ntb); 300 for (pidx = 0; pidx < pcnt; pidx++) { 301 if (lport < ntb_peer_port_number(pp->ntb, pidx)) 302 break; 303 } 304 305 pp->in_db = BIT_ULL(lport); 306 pp->pmask = GENMASK_ULL(pidx, 0) >> 1; 307 pp->nmask = GENMASK_ULL(pcnt - 1, pidx); 308 309 dev_dbg(&pp->ntb->dev, "Inbound db %#llx, prev %#llx, next %#llx\n", 310 pp->in_db, pp->pmask, pp->nmask); 311 } 312 313 static int pp_mask_events(struct pp_ctx *pp) 314 { 315 u64 db_mask, msg_mask; 316 int ret; 317 318 db_mask = ntb_db_valid_mask(pp->ntb); 319 ret = ntb_db_set_mask(pp->ntb, db_mask); 320 if (ret) 321 return ret; 322 323 /* Skip message events masking if unsupported */ 324 if (ntb_msg_count(pp->ntb) < 1) 325 return 0; 326 327 msg_mask = ntb_msg_outbits(pp->ntb) | ntb_msg_inbits(pp->ntb); 328 return ntb_msg_set_mask(pp->ntb, msg_mask); 329 } 330 331 static int pp_setup_ctx(struct pp_ctx *pp) 332 { 333 int ret; 334 335 ret = ntb_set_ctx(pp->ntb, pp, &pp_ops); 336 if (ret) 337 return ret; 338 339 ntb_link_enable(pp->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 340 /* Might be not necessary */ 341 ntb_link_event(pp->ntb); 342 343 return 0; 344 } 345 346 static void pp_clear_ctx(struct pp_ctx *pp) 347 { 348 ntb_link_disable(pp->ntb); 349 350 ntb_clear_ctx(pp->ntb); 351 } 352 353 static void pp_setup_dbgfs(struct pp_ctx *pp) 354 { 355 struct pci_dev *pdev = pp->ntb->pdev; 356 357 pp->dbgfs_dir = debugfs_create_dir(pci_name(pdev), pp_dbgfs_topdir); 358 359 debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count); 360 } 361 362 static void pp_clear_dbgfs(struct pp_ctx *pp) 363 { 364 debugfs_remove_recursive(pp->dbgfs_dir); 365 } 366 367 static int pp_probe(struct ntb_client *client, struct ntb_dev *ntb) 368 { 369 struct pp_ctx *pp; 370 int ret; 371 372 ret = pp_check_ntb(ntb); 373 if (ret) 374 return ret; 375 376 pp = pp_create_data(ntb); 377 if (IS_ERR(pp)) 378 return PTR_ERR(pp); 379 380 pp_init_flds(pp); 381 382 ret = pp_mask_events(pp); 383 if (ret) 384 return ret; 385 386 ret = pp_setup_ctx(pp); 387 if (ret) 388 return ret; 389 390 pp_setup_dbgfs(pp); 391 392 return 0; 393 } 394 395 static void pp_remove(struct ntb_client *client, struct ntb_dev *ntb) 396 { 397 struct pp_ctx *pp = ntb->ctx; 398 399 pp_clear_dbgfs(pp); 400 401 pp_clear_ctx(pp); 402 403 pp_clear(pp); 404 } 405 406 static struct ntb_client pp_client = { 407 .ops = { 408 .probe = pp_probe, 409 .remove = pp_remove 410 } 411 }; 412 413 static int __init pp_init(void) 414 { 415 int ret; 416 417 if (debugfs_initialized()) 418 pp_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL); 419 420 ret = ntb_register_client(&pp_client); 421 if (ret) 422 debugfs_remove_recursive(pp_dbgfs_topdir); 423 424 return ret; 425 } 426 module_init(pp_init); 427 428 static void __exit pp_exit(void) 429 { 430 ntb_unregister_client(&pp_client); 431 debugfs_remove_recursive(pp_dbgfs_topdir); 432 } 433 module_exit(pp_exit); 434