1ca791d7fSThierry Reding /* 2ca791d7fSThierry Reding * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. 3ca791d7fSThierry Reding * 4ca791d7fSThierry Reding * This program is free software; you can redistribute it and/or modify it 5ca791d7fSThierry Reding * under the terms and conditions of the GNU General Public License, 6ca791d7fSThierry Reding * version 2, as published by the Free Software Foundation. 7ca791d7fSThierry Reding * 8ca791d7fSThierry Reding * This program is distributed in the hope it will be useful, but WITHOUT 9ca791d7fSThierry Reding * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10ca791d7fSThierry Reding * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11ca791d7fSThierry Reding * more details. 12ca791d7fSThierry Reding */ 13ca791d7fSThierry Reding 14ca791d7fSThierry Reding #include <soc/tegra/ivc.h> 15ca791d7fSThierry Reding 16ca791d7fSThierry Reding #define TEGRA_IVC_ALIGN 64 17ca791d7fSThierry Reding 18ca791d7fSThierry Reding /* 19ca791d7fSThierry Reding * IVC channel reset protocol. 20ca791d7fSThierry Reding * 21ca791d7fSThierry Reding * Each end uses its tx_channel.state to indicate its synchronization state. 22ca791d7fSThierry Reding */ 23ca791d7fSThierry Reding enum tegra_ivc_state { 24ca791d7fSThierry Reding /* 25ca791d7fSThierry Reding * This value is zero for backwards compatibility with services that 26ca791d7fSThierry Reding * assume channels to be initially zeroed. Such channels are in an 27ca791d7fSThierry Reding * initially valid state, but cannot be asynchronously reset, and must 28ca791d7fSThierry Reding * maintain a valid state at all times. 29ca791d7fSThierry Reding * 30ca791d7fSThierry Reding * The transmitting end can enter the established state from the sync or 31ca791d7fSThierry Reding * ack state when it observes the receiving endpoint in the ack or 32ca791d7fSThierry Reding * established state, indicating that has cleared the counters in our 33ca791d7fSThierry Reding * rx_channel. 34ca791d7fSThierry Reding */ 35ca791d7fSThierry Reding TEGRA_IVC_STATE_ESTABLISHED = 0, 36ca791d7fSThierry Reding 37ca791d7fSThierry Reding /* 38ca791d7fSThierry Reding * If an endpoint is observed in the sync state, the remote endpoint is 39ca791d7fSThierry Reding * allowed to clear the counters it owns asynchronously with respect to 40ca791d7fSThierry Reding * the current endpoint. Therefore, the current endpoint is no longer 41ca791d7fSThierry Reding * allowed to communicate. 42ca791d7fSThierry Reding */ 43ca791d7fSThierry Reding TEGRA_IVC_STATE_SYNC, 44ca791d7fSThierry Reding 45ca791d7fSThierry Reding /* 46ca791d7fSThierry Reding * When the transmitting end observes the receiving end in the sync 47ca791d7fSThierry Reding * state, it can clear the w_count and r_count and transition to the ack 48ca791d7fSThierry Reding * state. If the remote endpoint observes us in the ack state, it can 49ca791d7fSThierry Reding * return to the established state once it has cleared its counters. 50ca791d7fSThierry Reding */ 51ca791d7fSThierry Reding TEGRA_IVC_STATE_ACK 52ca791d7fSThierry Reding }; 53ca791d7fSThierry Reding 54ca791d7fSThierry Reding /* 55ca791d7fSThierry Reding * This structure is divided into two-cache aligned parts, the first is only 56ca791d7fSThierry Reding * written through the tx.channel pointer, while the second is only written 57ca791d7fSThierry Reding * through the rx.channel pointer. This delineates ownership of the cache 58ca791d7fSThierry Reding * lines, which is critical to performance and necessary in non-cache coherent 59ca791d7fSThierry Reding * implementations. 60ca791d7fSThierry Reding */ 61ca791d7fSThierry Reding struct tegra_ivc_header { 62ca791d7fSThierry Reding union { 63ca791d7fSThierry Reding struct { 64ca791d7fSThierry Reding /* fields owned by the transmitting end */ 65ca791d7fSThierry Reding u32 count; 66ca791d7fSThierry Reding u32 state; 67ca791d7fSThierry Reding }; 68ca791d7fSThierry Reding 69ca791d7fSThierry Reding u8 pad[TEGRA_IVC_ALIGN]; 70ca791d7fSThierry Reding } tx; 71ca791d7fSThierry Reding 72ca791d7fSThierry Reding union { 73ca791d7fSThierry Reding /* fields owned by the receiving end */ 74ca791d7fSThierry Reding u32 count; 75ca791d7fSThierry Reding u8 pad[TEGRA_IVC_ALIGN]; 76ca791d7fSThierry Reding } rx; 77ca791d7fSThierry Reding }; 78ca791d7fSThierry Reding 79ca791d7fSThierry Reding static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys) 80ca791d7fSThierry Reding { 81ca791d7fSThierry Reding if (!ivc->peer) 82ca791d7fSThierry Reding return; 83ca791d7fSThierry Reding 84ca791d7fSThierry Reding dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN, 85ca791d7fSThierry Reding DMA_FROM_DEVICE); 86ca791d7fSThierry Reding } 87ca791d7fSThierry Reding 88ca791d7fSThierry Reding static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys) 89ca791d7fSThierry Reding { 90ca791d7fSThierry Reding if (!ivc->peer) 91ca791d7fSThierry Reding return; 92ca791d7fSThierry Reding 93ca791d7fSThierry Reding dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN, 94ca791d7fSThierry Reding DMA_TO_DEVICE); 95ca791d7fSThierry Reding } 96ca791d7fSThierry Reding 97ca791d7fSThierry Reding static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, 98ca791d7fSThierry Reding struct tegra_ivc_header *header) 99ca791d7fSThierry Reding { 100ca791d7fSThierry Reding /* 101ca791d7fSThierry Reding * This function performs multiple checks on the same values with 102*eeafcc5aSMark Rutland * security implications, so create snapshots with READ_ONCE() to 103ca791d7fSThierry Reding * ensure that these checks use the same values. 104ca791d7fSThierry Reding */ 105*eeafcc5aSMark Rutland u32 tx = READ_ONCE(header->tx.count); 106*eeafcc5aSMark Rutland u32 rx = READ_ONCE(header->rx.count); 107ca791d7fSThierry Reding 108ca791d7fSThierry Reding /* 109ca791d7fSThierry Reding * Perform an over-full check to prevent denial of service attacks 110ca791d7fSThierry Reding * where a server could be easily fooled into believing that there's 111ca791d7fSThierry Reding * an extremely large number of frames ready, since receivers are not 112ca791d7fSThierry Reding * expected to check for full or over-full conditions. 113ca791d7fSThierry Reding * 114ca791d7fSThierry Reding * Although the channel isn't empty, this is an invalid case caused by 115ca791d7fSThierry Reding * a potentially malicious peer, so returning empty is safer, because 116ca791d7fSThierry Reding * it gives the impression that the channel has gone silent. 117ca791d7fSThierry Reding */ 118ca791d7fSThierry Reding if (tx - rx > ivc->num_frames) 119ca791d7fSThierry Reding return true; 120ca791d7fSThierry Reding 121ca791d7fSThierry Reding return tx == rx; 122ca791d7fSThierry Reding } 123ca791d7fSThierry Reding 124ca791d7fSThierry Reding static inline bool tegra_ivc_full(struct tegra_ivc *ivc, 125ca791d7fSThierry Reding struct tegra_ivc_header *header) 126ca791d7fSThierry Reding { 127*eeafcc5aSMark Rutland u32 tx = READ_ONCE(header->tx.count); 128*eeafcc5aSMark Rutland u32 rx = READ_ONCE(header->rx.count); 129ca791d7fSThierry Reding 130ca791d7fSThierry Reding /* 131ca791d7fSThierry Reding * Invalid cases where the counters indicate that the queue is over 132ca791d7fSThierry Reding * capacity also appear full. 133ca791d7fSThierry Reding */ 134ca791d7fSThierry Reding return tx - rx >= ivc->num_frames; 135ca791d7fSThierry Reding } 136ca791d7fSThierry Reding 137ca791d7fSThierry Reding static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, 138ca791d7fSThierry Reding struct tegra_ivc_header *header) 139ca791d7fSThierry Reding { 140*eeafcc5aSMark Rutland u32 tx = READ_ONCE(header->tx.count); 141*eeafcc5aSMark Rutland u32 rx = READ_ONCE(header->rx.count); 142ca791d7fSThierry Reding 143ca791d7fSThierry Reding /* 144ca791d7fSThierry Reding * This function isn't expected to be used in scenarios where an 145ca791d7fSThierry Reding * over-full situation can lead to denial of service attacks. See the 146ca791d7fSThierry Reding * comment in tegra_ivc_empty() for an explanation about special 147ca791d7fSThierry Reding * over-full considerations. 148ca791d7fSThierry Reding */ 149ca791d7fSThierry Reding return tx - rx; 150ca791d7fSThierry Reding } 151ca791d7fSThierry Reding 152ca791d7fSThierry Reding static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) 153ca791d7fSThierry Reding { 154*eeafcc5aSMark Rutland WRITE_ONCE(ivc->tx.channel->tx.count, 155*eeafcc5aSMark Rutland READ_ONCE(ivc->tx.channel->tx.count) + 1); 156ca791d7fSThierry Reding 157ca791d7fSThierry Reding if (ivc->tx.position == ivc->num_frames - 1) 158ca791d7fSThierry Reding ivc->tx.position = 0; 159ca791d7fSThierry Reding else 160ca791d7fSThierry Reding ivc->tx.position++; 161ca791d7fSThierry Reding } 162ca791d7fSThierry Reding 163ca791d7fSThierry Reding static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc) 164ca791d7fSThierry Reding { 165*eeafcc5aSMark Rutland WRITE_ONCE(ivc->rx.channel->rx.count, 166*eeafcc5aSMark Rutland READ_ONCE(ivc->rx.channel->rx.count) + 1); 167ca791d7fSThierry Reding 168ca791d7fSThierry Reding if (ivc->rx.position == ivc->num_frames - 1) 169ca791d7fSThierry Reding ivc->rx.position = 0; 170ca791d7fSThierry Reding else 171ca791d7fSThierry Reding ivc->rx.position++; 172ca791d7fSThierry Reding } 173ca791d7fSThierry Reding 174ca791d7fSThierry Reding static inline int tegra_ivc_check_read(struct tegra_ivc *ivc) 175ca791d7fSThierry Reding { 176ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); 177ca791d7fSThierry Reding 178ca791d7fSThierry Reding /* 179ca791d7fSThierry Reding * tx.channel->state is set locally, so it is not synchronized with 180ca791d7fSThierry Reding * state from the remote peer. The remote peer cannot reset its 181ca791d7fSThierry Reding * transmit counters until we've acknowledged its synchronization 182ca791d7fSThierry Reding * request, so no additional synchronization is required because an 183ca791d7fSThierry Reding * asynchronous transition of rx.channel->state to 184ca791d7fSThierry Reding * TEGRA_IVC_STATE_ACK is not allowed. 185ca791d7fSThierry Reding */ 186ca791d7fSThierry Reding if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) 187ca791d7fSThierry Reding return -ECONNRESET; 188ca791d7fSThierry Reding 189ca791d7fSThierry Reding /* 190ca791d7fSThierry Reding * Avoid unnecessary invalidations when performing repeated accesses 191ca791d7fSThierry Reding * to an IVC channel by checking the old queue pointers first. 192ca791d7fSThierry Reding * 193ca791d7fSThierry Reding * Synchronization is only necessary when these pointers indicate 194ca791d7fSThierry Reding * empty or full. 195ca791d7fSThierry Reding */ 196ca791d7fSThierry Reding if (!tegra_ivc_empty(ivc, ivc->rx.channel)) 197ca791d7fSThierry Reding return 0; 198ca791d7fSThierry Reding 199ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); 200ca791d7fSThierry Reding 201ca791d7fSThierry Reding if (tegra_ivc_empty(ivc, ivc->rx.channel)) 202ca791d7fSThierry Reding return -ENOSPC; 203ca791d7fSThierry Reding 204ca791d7fSThierry Reding return 0; 205ca791d7fSThierry Reding } 206ca791d7fSThierry Reding 207ca791d7fSThierry Reding static inline int tegra_ivc_check_write(struct tegra_ivc *ivc) 208ca791d7fSThierry Reding { 209ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, rx.count); 210ca791d7fSThierry Reding 211ca791d7fSThierry Reding if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) 212ca791d7fSThierry Reding return -ECONNRESET; 213ca791d7fSThierry Reding 214ca791d7fSThierry Reding if (!tegra_ivc_full(ivc, ivc->tx.channel)) 215ca791d7fSThierry Reding return 0; 216ca791d7fSThierry Reding 217ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->tx.phys + offset); 218ca791d7fSThierry Reding 219ca791d7fSThierry Reding if (tegra_ivc_full(ivc, ivc->tx.channel)) 220ca791d7fSThierry Reding return -ENOSPC; 221ca791d7fSThierry Reding 222ca791d7fSThierry Reding return 0; 223ca791d7fSThierry Reding } 224ca791d7fSThierry Reding 225ca791d7fSThierry Reding static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc, 226ca791d7fSThierry Reding struct tegra_ivc_header *header, 227ca791d7fSThierry Reding unsigned int frame) 228ca791d7fSThierry Reding { 229ca791d7fSThierry Reding if (WARN_ON(frame >= ivc->num_frames)) 230ca791d7fSThierry Reding return ERR_PTR(-EINVAL); 231ca791d7fSThierry Reding 232ca791d7fSThierry Reding return (void *)(header + 1) + ivc->frame_size * frame; 233ca791d7fSThierry Reding } 234ca791d7fSThierry Reding 235ca791d7fSThierry Reding static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc, 236ca791d7fSThierry Reding dma_addr_t phys, 237ca791d7fSThierry Reding unsigned int frame) 238ca791d7fSThierry Reding { 239ca791d7fSThierry Reding unsigned long offset; 240ca791d7fSThierry Reding 241ca791d7fSThierry Reding offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame; 242ca791d7fSThierry Reding 243ca791d7fSThierry Reding return phys + offset; 244ca791d7fSThierry Reding } 245ca791d7fSThierry Reding 246ca791d7fSThierry Reding static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc, 247ca791d7fSThierry Reding dma_addr_t phys, 248ca791d7fSThierry Reding unsigned int frame, 249ca791d7fSThierry Reding unsigned int offset, 250ca791d7fSThierry Reding size_t size) 251ca791d7fSThierry Reding { 252ca791d7fSThierry Reding if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) 253ca791d7fSThierry Reding return; 254ca791d7fSThierry Reding 255ca791d7fSThierry Reding phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; 256ca791d7fSThierry Reding 257ca791d7fSThierry Reding dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE); 258ca791d7fSThierry Reding } 259ca791d7fSThierry Reding 260ca791d7fSThierry Reding static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc, 261ca791d7fSThierry Reding dma_addr_t phys, 262ca791d7fSThierry Reding unsigned int frame, 263ca791d7fSThierry Reding unsigned int offset, 264ca791d7fSThierry Reding size_t size) 265ca791d7fSThierry Reding { 266ca791d7fSThierry Reding if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) 267ca791d7fSThierry Reding return; 268ca791d7fSThierry Reding 269ca791d7fSThierry Reding phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; 270ca791d7fSThierry Reding 271ca791d7fSThierry Reding dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE); 272ca791d7fSThierry Reding } 273ca791d7fSThierry Reding 274ca791d7fSThierry Reding /* directly peek at the next frame rx'ed */ 275ca791d7fSThierry Reding void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc) 276ca791d7fSThierry Reding { 277ca791d7fSThierry Reding int err; 278ca791d7fSThierry Reding 279ca791d7fSThierry Reding if (WARN_ON(ivc == NULL)) 280ca791d7fSThierry Reding return ERR_PTR(-EINVAL); 281ca791d7fSThierry Reding 282ca791d7fSThierry Reding err = tegra_ivc_check_read(ivc); 283ca791d7fSThierry Reding if (err < 0) 284ca791d7fSThierry Reding return ERR_PTR(err); 285ca791d7fSThierry Reding 286ca791d7fSThierry Reding /* 287ca791d7fSThierry Reding * Order observation of ivc->rx.position potentially indicating new 288ca791d7fSThierry Reding * data before data read. 289ca791d7fSThierry Reding */ 290ca791d7fSThierry Reding smp_rmb(); 291ca791d7fSThierry Reding 292ca791d7fSThierry Reding tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0, 293ca791d7fSThierry Reding ivc->frame_size); 294ca791d7fSThierry Reding 295ca791d7fSThierry Reding return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position); 296ca791d7fSThierry Reding } 297ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_read_get_next_frame); 298ca791d7fSThierry Reding 299ca791d7fSThierry Reding int tegra_ivc_read_advance(struct tegra_ivc *ivc) 300ca791d7fSThierry Reding { 301ca791d7fSThierry Reding unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); 302ca791d7fSThierry Reding unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); 303ca791d7fSThierry Reding int err; 304ca791d7fSThierry Reding 305ca791d7fSThierry Reding /* 306ca791d7fSThierry Reding * No read barriers or synchronization here: the caller is expected to 307ca791d7fSThierry Reding * have already observed the channel non-empty. This check is just to 308ca791d7fSThierry Reding * catch programming errors. 309ca791d7fSThierry Reding */ 310ca791d7fSThierry Reding err = tegra_ivc_check_read(ivc); 311ca791d7fSThierry Reding if (err < 0) 312ca791d7fSThierry Reding return err; 313ca791d7fSThierry Reding 314ca791d7fSThierry Reding tegra_ivc_advance_rx(ivc); 315ca791d7fSThierry Reding 316ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->rx.phys + rx); 317ca791d7fSThierry Reding 318ca791d7fSThierry Reding /* 319ca791d7fSThierry Reding * Ensure our write to ivc->rx.position occurs before our read from 320ca791d7fSThierry Reding * ivc->tx.position. 321ca791d7fSThierry Reding */ 322ca791d7fSThierry Reding smp_mb(); 323ca791d7fSThierry Reding 324ca791d7fSThierry Reding /* 325ca791d7fSThierry Reding * Notify only upon transition from full to non-full. The available 326ca791d7fSThierry Reding * count can only asynchronously increase, so the worst possible 327ca791d7fSThierry Reding * side-effect will be a spurious notification. 328ca791d7fSThierry Reding */ 329ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->rx.phys + tx); 330ca791d7fSThierry Reding 331ca791d7fSThierry Reding if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1) 332ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 333ca791d7fSThierry Reding 334ca791d7fSThierry Reding return 0; 335ca791d7fSThierry Reding } 336ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_read_advance); 337ca791d7fSThierry Reding 338ca791d7fSThierry Reding /* directly poke at the next frame to be tx'ed */ 339ca791d7fSThierry Reding void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc) 340ca791d7fSThierry Reding { 341ca791d7fSThierry Reding int err; 342ca791d7fSThierry Reding 343ca791d7fSThierry Reding err = tegra_ivc_check_write(ivc); 344ca791d7fSThierry Reding if (err < 0) 345ca791d7fSThierry Reding return ERR_PTR(err); 346ca791d7fSThierry Reding 347ca791d7fSThierry Reding return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position); 348ca791d7fSThierry Reding } 349ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_write_get_next_frame); 350ca791d7fSThierry Reding 351ca791d7fSThierry Reding /* advance the tx buffer */ 352ca791d7fSThierry Reding int tegra_ivc_write_advance(struct tegra_ivc *ivc) 353ca791d7fSThierry Reding { 354ca791d7fSThierry Reding unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); 355ca791d7fSThierry Reding unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); 356ca791d7fSThierry Reding int err; 357ca791d7fSThierry Reding 358ca791d7fSThierry Reding err = tegra_ivc_check_write(ivc); 359ca791d7fSThierry Reding if (err < 0) 360ca791d7fSThierry Reding return err; 361ca791d7fSThierry Reding 362ca791d7fSThierry Reding tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0, 363ca791d7fSThierry Reding ivc->frame_size); 364ca791d7fSThierry Reding 365ca791d7fSThierry Reding /* 366ca791d7fSThierry Reding * Order any possible stores to the frame before update of 367ca791d7fSThierry Reding * ivc->tx.position. 368ca791d7fSThierry Reding */ 369ca791d7fSThierry Reding smp_wmb(); 370ca791d7fSThierry Reding 371ca791d7fSThierry Reding tegra_ivc_advance_tx(ivc); 372ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + tx); 373ca791d7fSThierry Reding 374ca791d7fSThierry Reding /* 375ca791d7fSThierry Reding * Ensure our write to ivc->tx.position occurs before our read from 376ca791d7fSThierry Reding * ivc->rx.position. 377ca791d7fSThierry Reding */ 378ca791d7fSThierry Reding smp_mb(); 379ca791d7fSThierry Reding 380ca791d7fSThierry Reding /* 381ca791d7fSThierry Reding * Notify only upon transition from empty to non-empty. The available 382ca791d7fSThierry Reding * count can only asynchronously decrease, so the worst possible 383ca791d7fSThierry Reding * side-effect will be a spurious notification. 384ca791d7fSThierry Reding */ 385ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->tx.phys + rx); 386ca791d7fSThierry Reding 387ca791d7fSThierry Reding if (tegra_ivc_available(ivc, ivc->tx.channel) == 1) 388ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 389ca791d7fSThierry Reding 390ca791d7fSThierry Reding return 0; 391ca791d7fSThierry Reding } 392ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_write_advance); 393ca791d7fSThierry Reding 394ca791d7fSThierry Reding void tegra_ivc_reset(struct tegra_ivc *ivc) 395ca791d7fSThierry Reding { 396ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); 397ca791d7fSThierry Reding 398ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC; 399ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 400ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 401ca791d7fSThierry Reding } 402ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_reset); 403ca791d7fSThierry Reding 404ca791d7fSThierry Reding /* 405ca791d7fSThierry Reding * ======================================================= 406ca791d7fSThierry Reding * IVC State Transition Table - see tegra_ivc_notified() 407ca791d7fSThierry Reding * ======================================================= 408ca791d7fSThierry Reding * 409ca791d7fSThierry Reding * local remote action 410ca791d7fSThierry Reding * ----- ------ ----------------------------------- 411ca791d7fSThierry Reding * SYNC EST <none> 412ca791d7fSThierry Reding * SYNC ACK reset counters; move to EST; notify 413ca791d7fSThierry Reding * SYNC SYNC reset counters; move to ACK; notify 414ca791d7fSThierry Reding * ACK EST move to EST; notify 415ca791d7fSThierry Reding * ACK ACK move to EST; notify 416ca791d7fSThierry Reding * ACK SYNC reset counters; move to ACK; notify 417ca791d7fSThierry Reding * EST EST <none> 418ca791d7fSThierry Reding * EST ACK <none> 419ca791d7fSThierry Reding * EST SYNC reset counters; move to ACK; notify 420ca791d7fSThierry Reding * 421ca791d7fSThierry Reding * =============================================================== 422ca791d7fSThierry Reding */ 423ca791d7fSThierry Reding 424ca791d7fSThierry Reding int tegra_ivc_notified(struct tegra_ivc *ivc) 425ca791d7fSThierry Reding { 426ca791d7fSThierry Reding unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); 427ca791d7fSThierry Reding enum tegra_ivc_state state; 428ca791d7fSThierry Reding 429ca791d7fSThierry Reding /* Copy the receiver's state out of shared memory. */ 430ca791d7fSThierry Reding tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); 431*eeafcc5aSMark Rutland state = READ_ONCE(ivc->rx.channel->tx.state); 432ca791d7fSThierry Reding 433ca791d7fSThierry Reding if (state == TEGRA_IVC_STATE_SYNC) { 434ca791d7fSThierry Reding offset = offsetof(struct tegra_ivc_header, tx.count); 435ca791d7fSThierry Reding 436ca791d7fSThierry Reding /* 437ca791d7fSThierry Reding * Order observation of TEGRA_IVC_STATE_SYNC before stores 438ca791d7fSThierry Reding * clearing tx.channel. 439ca791d7fSThierry Reding */ 440ca791d7fSThierry Reding smp_rmb(); 441ca791d7fSThierry Reding 442ca791d7fSThierry Reding /* 443ca791d7fSThierry Reding * Reset tx.channel counters. The remote end is in the SYNC 444ca791d7fSThierry Reding * state and won't make progress until we change our state, 445ca791d7fSThierry Reding * so the counters are not in use at this time. 446ca791d7fSThierry Reding */ 447ca791d7fSThierry Reding ivc->tx.channel->tx.count = 0; 448ca791d7fSThierry Reding ivc->rx.channel->rx.count = 0; 449ca791d7fSThierry Reding 450ca791d7fSThierry Reding ivc->tx.position = 0; 451ca791d7fSThierry Reding ivc->rx.position = 0; 452ca791d7fSThierry Reding 453ca791d7fSThierry Reding /* 454ca791d7fSThierry Reding * Ensure that counters appear cleared before new state can be 455ca791d7fSThierry Reding * observed. 456ca791d7fSThierry Reding */ 457ca791d7fSThierry Reding smp_wmb(); 458ca791d7fSThierry Reding 459ca791d7fSThierry Reding /* 460ca791d7fSThierry Reding * Move to ACK state. We have just cleared our counters, so it 461ca791d7fSThierry Reding * is now safe for the remote end to start using these values. 462ca791d7fSThierry Reding */ 463ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK; 464ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 465ca791d7fSThierry Reding 466ca791d7fSThierry Reding /* 467ca791d7fSThierry Reding * Notify remote end to observe state transition. 468ca791d7fSThierry Reding */ 469ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 470ca791d7fSThierry Reding 471ca791d7fSThierry Reding } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC && 472ca791d7fSThierry Reding state == TEGRA_IVC_STATE_ACK) { 473ca791d7fSThierry Reding offset = offsetof(struct tegra_ivc_header, tx.count); 474ca791d7fSThierry Reding 475ca791d7fSThierry Reding /* 476ca791d7fSThierry Reding * Order observation of ivc_state_sync before stores clearing 477ca791d7fSThierry Reding * tx_channel. 478ca791d7fSThierry Reding */ 479ca791d7fSThierry Reding smp_rmb(); 480ca791d7fSThierry Reding 481ca791d7fSThierry Reding /* 482ca791d7fSThierry Reding * Reset tx.channel counters. The remote end is in the ACK 483ca791d7fSThierry Reding * state and won't make progress until we change our state, 484ca791d7fSThierry Reding * so the counters are not in use at this time. 485ca791d7fSThierry Reding */ 486ca791d7fSThierry Reding ivc->tx.channel->tx.count = 0; 487ca791d7fSThierry Reding ivc->rx.channel->rx.count = 0; 488ca791d7fSThierry Reding 489ca791d7fSThierry Reding ivc->tx.position = 0; 490ca791d7fSThierry Reding ivc->rx.position = 0; 491ca791d7fSThierry Reding 492ca791d7fSThierry Reding /* 493ca791d7fSThierry Reding * Ensure that counters appear cleared before new state can be 494ca791d7fSThierry Reding * observed. 495ca791d7fSThierry Reding */ 496ca791d7fSThierry Reding smp_wmb(); 497ca791d7fSThierry Reding 498ca791d7fSThierry Reding /* 499ca791d7fSThierry Reding * Move to ESTABLISHED state. We know that the remote end has 500ca791d7fSThierry Reding * already cleared its counters, so it is safe to start 501ca791d7fSThierry Reding * writing/reading on this channel. 502ca791d7fSThierry Reding */ 503ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; 504ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 505ca791d7fSThierry Reding 506ca791d7fSThierry Reding /* 507ca791d7fSThierry Reding * Notify remote end to observe state transition. 508ca791d7fSThierry Reding */ 509ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 510ca791d7fSThierry Reding 511ca791d7fSThierry Reding } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) { 512ca791d7fSThierry Reding offset = offsetof(struct tegra_ivc_header, tx.count); 513ca791d7fSThierry Reding 514ca791d7fSThierry Reding /* 515ca791d7fSThierry Reding * At this point, we have observed the peer to be in either 516ca791d7fSThierry Reding * the ACK or ESTABLISHED state. Next, order observation of 517ca791d7fSThierry Reding * peer state before storing to tx.channel. 518ca791d7fSThierry Reding */ 519ca791d7fSThierry Reding smp_rmb(); 520ca791d7fSThierry Reding 521ca791d7fSThierry Reding /* 522ca791d7fSThierry Reding * Move to ESTABLISHED state. We know that we have previously 523ca791d7fSThierry Reding * cleared our counters, and we know that the remote end has 524ca791d7fSThierry Reding * cleared its counters, so it is safe to start writing/reading 525ca791d7fSThierry Reding * on this channel. 526ca791d7fSThierry Reding */ 527ca791d7fSThierry Reding ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; 528ca791d7fSThierry Reding tegra_ivc_flush(ivc, ivc->tx.phys + offset); 529ca791d7fSThierry Reding 530ca791d7fSThierry Reding /* 531ca791d7fSThierry Reding * Notify remote end to observe state transition. 532ca791d7fSThierry Reding */ 533ca791d7fSThierry Reding ivc->notify(ivc, ivc->notify_data); 534ca791d7fSThierry Reding 535ca791d7fSThierry Reding } else { 536ca791d7fSThierry Reding /* 537ca791d7fSThierry Reding * There is no need to handle any further action. Either the 538ca791d7fSThierry Reding * channel is already fully established, or we are waiting for 539ca791d7fSThierry Reding * the remote end to catch up with our current state. Refer 540ca791d7fSThierry Reding * to the diagram in "IVC State Transition Table" above. 541ca791d7fSThierry Reding */ 542ca791d7fSThierry Reding } 543ca791d7fSThierry Reding 544ca791d7fSThierry Reding if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) 545ca791d7fSThierry Reding return -EAGAIN; 546ca791d7fSThierry Reding 547ca791d7fSThierry Reding return 0; 548ca791d7fSThierry Reding } 549ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_notified); 550ca791d7fSThierry Reding 551ca791d7fSThierry Reding size_t tegra_ivc_align(size_t size) 552ca791d7fSThierry Reding { 553ca791d7fSThierry Reding return ALIGN(size, TEGRA_IVC_ALIGN); 554ca791d7fSThierry Reding } 555ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_align); 556ca791d7fSThierry Reding 557ca791d7fSThierry Reding unsigned tegra_ivc_total_queue_size(unsigned queue_size) 558ca791d7fSThierry Reding { 559ca791d7fSThierry Reding if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) { 560ca791d7fSThierry Reding pr_err("%s: queue_size (%u) must be %u-byte aligned\n", 561ca791d7fSThierry Reding __func__, queue_size, TEGRA_IVC_ALIGN); 562ca791d7fSThierry Reding return 0; 563ca791d7fSThierry Reding } 564ca791d7fSThierry Reding 565ca791d7fSThierry Reding return queue_size + sizeof(struct tegra_ivc_header); 566ca791d7fSThierry Reding } 567ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_total_queue_size); 568ca791d7fSThierry Reding 569ca791d7fSThierry Reding static int tegra_ivc_check_params(unsigned long rx, unsigned long tx, 570ca791d7fSThierry Reding unsigned int num_frames, size_t frame_size) 571ca791d7fSThierry Reding { 572ca791d7fSThierry Reding BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count), 573ca791d7fSThierry Reding TEGRA_IVC_ALIGN)); 574ca791d7fSThierry Reding BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count), 575ca791d7fSThierry Reding TEGRA_IVC_ALIGN)); 576ca791d7fSThierry Reding BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header), 577ca791d7fSThierry Reding TEGRA_IVC_ALIGN)); 578ca791d7fSThierry Reding 579ca791d7fSThierry Reding if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) { 580ca791d7fSThierry Reding pr_err("num_frames * frame_size overflows\n"); 581ca791d7fSThierry Reding return -EINVAL; 582ca791d7fSThierry Reding } 583ca791d7fSThierry Reding 584ca791d7fSThierry Reding if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) { 585ca791d7fSThierry Reding pr_err("frame size not adequately aligned: %zu\n", frame_size); 586ca791d7fSThierry Reding return -EINVAL; 587ca791d7fSThierry Reding } 588ca791d7fSThierry Reding 589ca791d7fSThierry Reding /* 590ca791d7fSThierry Reding * The headers must at least be aligned enough for counters 591ca791d7fSThierry Reding * to be accessed atomically. 592ca791d7fSThierry Reding */ 593ca791d7fSThierry Reding if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) { 594ca791d7fSThierry Reding pr_err("IVC channel start not aligned: %#lx\n", rx); 595ca791d7fSThierry Reding return -EINVAL; 596ca791d7fSThierry Reding } 597ca791d7fSThierry Reding 598ca791d7fSThierry Reding if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) { 599ca791d7fSThierry Reding pr_err("IVC channel start not aligned: %#lx\n", tx); 600ca791d7fSThierry Reding return -EINVAL; 601ca791d7fSThierry Reding } 602ca791d7fSThierry Reding 603ca791d7fSThierry Reding if (rx < tx) { 604ca791d7fSThierry Reding if (rx + frame_size * num_frames > tx) { 605ca791d7fSThierry Reding pr_err("queue regions overlap: %#lx + %zx > %#lx\n", 606ca791d7fSThierry Reding rx, frame_size * num_frames, tx); 607ca791d7fSThierry Reding return -EINVAL; 608ca791d7fSThierry Reding } 609ca791d7fSThierry Reding } else { 610ca791d7fSThierry Reding if (tx + frame_size * num_frames > rx) { 611ca791d7fSThierry Reding pr_err("queue regions overlap: %#lx + %zx > %#lx\n", 612ca791d7fSThierry Reding tx, frame_size * num_frames, rx); 613ca791d7fSThierry Reding return -EINVAL; 614ca791d7fSThierry Reding } 615ca791d7fSThierry Reding } 616ca791d7fSThierry Reding 617ca791d7fSThierry Reding return 0; 618ca791d7fSThierry Reding } 619ca791d7fSThierry Reding 620ca791d7fSThierry Reding int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx, 621ca791d7fSThierry Reding dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys, 622ca791d7fSThierry Reding unsigned int num_frames, size_t frame_size, 623ca791d7fSThierry Reding void (*notify)(struct tegra_ivc *ivc, void *data), 624ca791d7fSThierry Reding void *data) 625ca791d7fSThierry Reding { 626ca791d7fSThierry Reding size_t queue_size; 627ca791d7fSThierry Reding int err; 628ca791d7fSThierry Reding 629ca791d7fSThierry Reding if (WARN_ON(!ivc || !notify)) 630ca791d7fSThierry Reding return -EINVAL; 631ca791d7fSThierry Reding 632ca791d7fSThierry Reding /* 633ca791d7fSThierry Reding * All sizes that can be returned by communication functions should 634ca791d7fSThierry Reding * fit in an int. 635ca791d7fSThierry Reding */ 636ca791d7fSThierry Reding if (frame_size > INT_MAX) 637ca791d7fSThierry Reding return -E2BIG; 638ca791d7fSThierry Reding 639ca791d7fSThierry Reding err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx, 640ca791d7fSThierry Reding num_frames, frame_size); 641ca791d7fSThierry Reding if (err < 0) 642ca791d7fSThierry Reding return err; 643ca791d7fSThierry Reding 644ca791d7fSThierry Reding queue_size = tegra_ivc_total_queue_size(num_frames * frame_size); 645ca791d7fSThierry Reding 646ca791d7fSThierry Reding if (peer) { 647ca791d7fSThierry Reding ivc->rx.phys = dma_map_single(peer, rx, queue_size, 648ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 64954ed8121SChristoph Hellwig if (dma_mapping_error(peer, ivc->rx.phys)) 650ca791d7fSThierry Reding return -ENOMEM; 651ca791d7fSThierry Reding 652ca791d7fSThierry Reding ivc->tx.phys = dma_map_single(peer, tx, queue_size, 653ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 65454ed8121SChristoph Hellwig if (dma_mapping_error(peer, ivc->tx.phys)) { 655ca791d7fSThierry Reding dma_unmap_single(peer, ivc->rx.phys, queue_size, 656ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 657ca791d7fSThierry Reding return -ENOMEM; 658ca791d7fSThierry Reding } 659ca791d7fSThierry Reding } else { 660ca791d7fSThierry Reding ivc->rx.phys = rx_phys; 661ca791d7fSThierry Reding ivc->tx.phys = tx_phys; 662ca791d7fSThierry Reding } 663ca791d7fSThierry Reding 664ca791d7fSThierry Reding ivc->rx.channel = rx; 665ca791d7fSThierry Reding ivc->tx.channel = tx; 666ca791d7fSThierry Reding ivc->peer = peer; 667ca791d7fSThierry Reding ivc->notify = notify; 668ca791d7fSThierry Reding ivc->notify_data = data; 669ca791d7fSThierry Reding ivc->frame_size = frame_size; 670ca791d7fSThierry Reding ivc->num_frames = num_frames; 671ca791d7fSThierry Reding 672ca791d7fSThierry Reding /* 673ca791d7fSThierry Reding * These values aren't necessarily correct until the channel has been 674ca791d7fSThierry Reding * reset. 675ca791d7fSThierry Reding */ 676ca791d7fSThierry Reding ivc->tx.position = 0; 677ca791d7fSThierry Reding ivc->rx.position = 0; 678ca791d7fSThierry Reding 679ca791d7fSThierry Reding return 0; 680ca791d7fSThierry Reding } 681ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_init); 682ca791d7fSThierry Reding 683ca791d7fSThierry Reding void tegra_ivc_cleanup(struct tegra_ivc *ivc) 684ca791d7fSThierry Reding { 685ca791d7fSThierry Reding if (ivc->peer) { 686ca791d7fSThierry Reding size_t size = tegra_ivc_total_queue_size(ivc->num_frames * 687ca791d7fSThierry Reding ivc->frame_size); 688ca791d7fSThierry Reding 689ca791d7fSThierry Reding dma_unmap_single(ivc->peer, ivc->rx.phys, size, 690ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 691ca791d7fSThierry Reding dma_unmap_single(ivc->peer, ivc->tx.phys, size, 692ca791d7fSThierry Reding DMA_BIDIRECTIONAL); 693ca791d7fSThierry Reding } 694ca791d7fSThierry Reding } 695ca791d7fSThierry Reding EXPORT_SYMBOL(tegra_ivc_cleanup); 696